Documentation ¶
Index ¶
- Variables
- func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer)
- type AssistConfig
- func (*AssistConfig) Descriptor() ([]byte, []int)deprecated
- func (x *AssistConfig) GetAudioInConfig() *AudioInConfig
- func (x *AssistConfig) GetAudioOutConfig() *AudioOutConfig
- func (x *AssistConfig) GetDebugConfig() *DebugConfig
- func (x *AssistConfig) GetDeviceConfig() *DeviceConfig
- func (x *AssistConfig) GetDialogStateIn() *DialogStateIn
- func (x *AssistConfig) GetScreenOutConfig() *ScreenOutConfig
- func (x *AssistConfig) GetTextQuery() string
- func (m *AssistConfig) GetType() isAssistConfig_Type
- func (*AssistConfig) ProtoMessage()
- func (x *AssistConfig) ProtoReflect() protoreflect.Message
- func (x *AssistConfig) Reset()
- func (x *AssistConfig) String() string
- type AssistConfig_AudioInConfig
- type AssistConfig_TextQuery
- type AssistRequest
- func (*AssistRequest) Descriptor() ([]byte, []int)deprecated
- func (x *AssistRequest) GetAudioIn() []byte
- func (x *AssistRequest) GetConfig() *AssistConfig
- func (m *AssistRequest) GetType() isAssistRequest_Type
- func (*AssistRequest) ProtoMessage()
- func (x *AssistRequest) ProtoReflect() protoreflect.Message
- func (x *AssistRequest) Reset()
- func (x *AssistRequest) String() string
- type AssistRequest_AudioIn
- type AssistRequest_Config
- type AssistResponse
- func (*AssistResponse) Descriptor() ([]byte, []int)deprecated
- func (x *AssistResponse) GetAudioOut() *AudioOut
- func (x *AssistResponse) GetDebugInfo() *DebugInfo
- func (x *AssistResponse) GetDeviceAction() *DeviceAction
- func (x *AssistResponse) GetDialogStateOut() *DialogStateOut
- func (x *AssistResponse) GetEventType() AssistResponse_EventType
- func (x *AssistResponse) GetScreenOut() *ScreenOut
- func (x *AssistResponse) GetSpeechResults() []*SpeechRecognitionResult
- func (*AssistResponse) ProtoMessage()
- func (x *AssistResponse) ProtoReflect() protoreflect.Message
- func (x *AssistResponse) Reset()
- func (x *AssistResponse) String() string
- type AssistResponse_EventType
- func (AssistResponse_EventType) Descriptor() protoreflect.EnumDescriptor
- func (x AssistResponse_EventType) Enum() *AssistResponse_EventType
- func (AssistResponse_EventType) EnumDescriptor() ([]byte, []int)deprecated
- func (x AssistResponse_EventType) Number() protoreflect.EnumNumber
- func (x AssistResponse_EventType) String() string
- func (AssistResponse_EventType) Type() protoreflect.EnumType
- type AudioInConfig
- func (*AudioInConfig) Descriptor() ([]byte, []int)deprecated
- func (x *AudioInConfig) GetEncoding() AudioInConfig_Encoding
- func (x *AudioInConfig) GetSampleRateHertz() int32
- func (*AudioInConfig) ProtoMessage()
- func (x *AudioInConfig) ProtoReflect() protoreflect.Message
- func (x *AudioInConfig) Reset()
- func (x *AudioInConfig) String() string
- type AudioInConfig_Encoding
- func (AudioInConfig_Encoding) Descriptor() protoreflect.EnumDescriptor
- func (x AudioInConfig_Encoding) Enum() *AudioInConfig_Encoding
- func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int)deprecated
- func (x AudioInConfig_Encoding) Number() protoreflect.EnumNumber
- func (x AudioInConfig_Encoding) String() string
- func (AudioInConfig_Encoding) Type() protoreflect.EnumType
- type AudioOut
- type AudioOutConfig
- func (*AudioOutConfig) Descriptor() ([]byte, []int)deprecated
- func (x *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding
- func (x *AudioOutConfig) GetSampleRateHertz() int32
- func (x *AudioOutConfig) GetVolumePercentage() int32
- func (*AudioOutConfig) ProtoMessage()
- func (x *AudioOutConfig) ProtoReflect() protoreflect.Message
- func (x *AudioOutConfig) Reset()
- func (x *AudioOutConfig) String() string
- type AudioOutConfig_Encoding
- func (AudioOutConfig_Encoding) Descriptor() protoreflect.EnumDescriptor
- func (x AudioOutConfig_Encoding) Enum() *AudioOutConfig_Encoding
- func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int)deprecated
- func (x AudioOutConfig_Encoding) Number() protoreflect.EnumNumber
- func (x AudioOutConfig_Encoding) String() string
- func (AudioOutConfig_Encoding) Type() protoreflect.EnumType
- type DebugConfig
- type DebugInfo
- type DeviceAction
- type DeviceConfig
- func (*DeviceConfig) Descriptor() ([]byte, []int)deprecated
- func (x *DeviceConfig) GetDeviceId() string
- func (x *DeviceConfig) GetDeviceModelId() string
- func (*DeviceConfig) ProtoMessage()
- func (x *DeviceConfig) ProtoReflect() protoreflect.Message
- func (x *DeviceConfig) Reset()
- func (x *DeviceConfig) String() string
- type DeviceLocation
- func (*DeviceLocation) Descriptor() ([]byte, []int)deprecated
- func (x *DeviceLocation) GetCoordinates() *latlng.LatLng
- func (m *DeviceLocation) GetType() isDeviceLocation_Type
- func (*DeviceLocation) ProtoMessage()
- func (x *DeviceLocation) ProtoReflect() protoreflect.Message
- func (x *DeviceLocation) Reset()
- func (x *DeviceLocation) String() string
- type DeviceLocation_Coordinates
- type DialogStateIn
- func (*DialogStateIn) Descriptor() ([]byte, []int)deprecated
- func (x *DialogStateIn) GetConversationState() []byte
- func (x *DialogStateIn) GetDeviceLocation() *DeviceLocation
- func (x *DialogStateIn) GetIsNewConversation() bool
- func (x *DialogStateIn) GetLanguageCode() string
- func (*DialogStateIn) ProtoMessage()
- func (x *DialogStateIn) ProtoReflect() protoreflect.Message
- func (x *DialogStateIn) Reset()
- func (x *DialogStateIn) String() string
- type DialogStateOut
- func (*DialogStateOut) Descriptor() ([]byte, []int)deprecated
- func (x *DialogStateOut) GetConversationState() []byte
- func (x *DialogStateOut) GetMicrophoneMode() DialogStateOut_MicrophoneMode
- func (x *DialogStateOut) GetSupplementalDisplayText() string
- func (x *DialogStateOut) GetVolumePercentage() int32
- func (*DialogStateOut) ProtoMessage()
- func (x *DialogStateOut) ProtoReflect() protoreflect.Message
- func (x *DialogStateOut) Reset()
- func (x *DialogStateOut) String() string
- type DialogStateOut_MicrophoneMode
- func (DialogStateOut_MicrophoneMode) Descriptor() protoreflect.EnumDescriptor
- func (x DialogStateOut_MicrophoneMode) Enum() *DialogStateOut_MicrophoneMode
- func (DialogStateOut_MicrophoneMode) EnumDescriptor() ([]byte, []int)deprecated
- func (x DialogStateOut_MicrophoneMode) Number() protoreflect.EnumNumber
- func (x DialogStateOut_MicrophoneMode) String() string
- func (DialogStateOut_MicrophoneMode) Type() protoreflect.EnumType
- type EmbeddedAssistantClient
- type EmbeddedAssistantServer
- type EmbeddedAssistant_AssistClient
- type EmbeddedAssistant_AssistServer
- type ScreenOut
- type ScreenOutConfig
- func (*ScreenOutConfig) Descriptor() ([]byte, []int)deprecated
- func (x *ScreenOutConfig) GetScreenMode() ScreenOutConfig_ScreenMode
- func (*ScreenOutConfig) ProtoMessage()
- func (x *ScreenOutConfig) ProtoReflect() protoreflect.Message
- func (x *ScreenOutConfig) Reset()
- func (x *ScreenOutConfig) String() string
- type ScreenOutConfig_ScreenMode
- func (ScreenOutConfig_ScreenMode) Descriptor() protoreflect.EnumDescriptor
- func (x ScreenOutConfig_ScreenMode) Enum() *ScreenOutConfig_ScreenMode
- func (ScreenOutConfig_ScreenMode) EnumDescriptor() ([]byte, []int)deprecated
- func (x ScreenOutConfig_ScreenMode) Number() protoreflect.EnumNumber
- func (x ScreenOutConfig_ScreenMode) String() string
- func (ScreenOutConfig_ScreenMode) Type() protoreflect.EnumType
- type ScreenOut_Format
- func (ScreenOut_Format) Descriptor() protoreflect.EnumDescriptor
- func (x ScreenOut_Format) Enum() *ScreenOut_Format
- func (ScreenOut_Format) EnumDescriptor() ([]byte, []int)deprecated
- func (x ScreenOut_Format) Number() protoreflect.EnumNumber
- func (x ScreenOut_Format) String() string
- func (ScreenOut_Format) Type() protoreflect.EnumType
- type SpeechRecognitionResult
- func (*SpeechRecognitionResult) Descriptor() ([]byte, []int)deprecated
- func (x *SpeechRecognitionResult) GetStability() float32
- func (x *SpeechRecognitionResult) GetTranscript() string
- func (*SpeechRecognitionResult) ProtoMessage()
- func (x *SpeechRecognitionResult) ProtoReflect() protoreflect.Message
- func (x *SpeechRecognitionResult) Reset()
- func (x *SpeechRecognitionResult) String() string
- type UnimplementedEmbeddedAssistantServer
Constants ¶
This section is empty.
Variables ¶
var ( AssistResponse_EventType_name = map[int32]string{ 0: "EVENT_TYPE_UNSPECIFIED", 1: "END_OF_UTTERANCE", } AssistResponse_EventType_value = map[string]int32{ "EVENT_TYPE_UNSPECIFIED": 0, "END_OF_UTTERANCE": 1, } )
Enum value maps for AssistResponse_EventType.
var ( AudioInConfig_Encoding_name = map[int32]string{ 0: "ENCODING_UNSPECIFIED", 1: "LINEAR16", 2: "FLAC", } AudioInConfig_Encoding_value = map[string]int32{ "ENCODING_UNSPECIFIED": 0, "LINEAR16": 1, "FLAC": 2, } )
Enum value maps for AudioInConfig_Encoding.
var ( AudioOutConfig_Encoding_name = map[int32]string{ 0: "ENCODING_UNSPECIFIED", 1: "LINEAR16", 2: "MP3", 3: "OPUS_IN_OGG", } AudioOutConfig_Encoding_value = map[string]int32{ "ENCODING_UNSPECIFIED": 0, "LINEAR16": 1, "MP3": 2, "OPUS_IN_OGG": 3, } )
Enum value maps for AudioOutConfig_Encoding.
var ( ScreenOutConfig_ScreenMode_name = map[int32]string{ 0: "SCREEN_MODE_UNSPECIFIED", 1: "OFF", 3: "PLAYING", } ScreenOutConfig_ScreenMode_value = map[string]int32{ "SCREEN_MODE_UNSPECIFIED": 0, "OFF": 1, "PLAYING": 3, } )
Enum value maps for ScreenOutConfig_ScreenMode.
var ( ScreenOut_Format_name = map[int32]string{ 0: "FORMAT_UNSPECIFIED", 1: "HTML", } ScreenOut_Format_value = map[string]int32{ "FORMAT_UNSPECIFIED": 0, "HTML": 1, } )
Enum value maps for ScreenOut_Format.
var ( DialogStateOut_MicrophoneMode_name = map[int32]string{ 0: "MICROPHONE_MODE_UNSPECIFIED", 1: "CLOSE_MICROPHONE", 2: "DIALOG_FOLLOW_ON", } DialogStateOut_MicrophoneMode_value = map[string]int32{ "MICROPHONE_MODE_UNSPECIFIED": 0, "CLOSE_MICROPHONE": 1, "DIALOG_FOLLOW_ON": 2, } )
Enum value maps for DialogStateOut_MicrophoneMode.
var File_google_assistant_embedded_v1alpha2_embedded_assistant_proto protoreflect.FileDescriptor
Functions ¶
func RegisterEmbeddedAssistantServer ¶
func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer)
Types ¶
type AssistConfig ¶
type AssistConfig struct { // Types that are assignable to Type: // *AssistConfig_AudioInConfig // *AssistConfig_TextQuery Type isAssistConfig_Type `protobuf_oneof:"type"` // *Required* Specifies how to format the audio that will be returned. AudioOutConfig *AudioOutConfig `protobuf:"bytes,2,opt,name=audio_out_config,json=audioOutConfig,proto3" json:"audio_out_config,omitempty"` // *Optional* Specifies the desired format to use when server returns a // visual screen response. ScreenOutConfig *ScreenOutConfig `protobuf:"bytes,8,opt,name=screen_out_config,json=screenOutConfig,proto3" json:"screen_out_config,omitempty"` // *Required* Represents the current dialog state. DialogStateIn *DialogStateIn `protobuf:"bytes,3,opt,name=dialog_state_in,json=dialogStateIn,proto3" json:"dialog_state_in,omitempty"` // Device configuration that uniquely identifies a specific device. DeviceConfig *DeviceConfig `protobuf:"bytes,4,opt,name=device_config,json=deviceConfig,proto3" json:"device_config,omitempty"` // *Optional* Debugging parameters for the whole `Assist` RPC. DebugConfig *DebugConfig `protobuf:"bytes,5,opt,name=debug_config,json=debugConfig,proto3" json:"debug_config,omitempty"` // contains filtered or unexported fields }
Specifies how to process the `AssistRequest` messages.
func (*AssistConfig) Descriptor
deprecated
func (*AssistConfig) Descriptor() ([]byte, []int)
Deprecated: Use AssistConfig.ProtoReflect.Descriptor instead.
func (*AssistConfig) GetAudioInConfig ¶
func (x *AssistConfig) GetAudioInConfig() *AudioInConfig
func (*AssistConfig) GetAudioOutConfig ¶
func (x *AssistConfig) GetAudioOutConfig() *AudioOutConfig
func (*AssistConfig) GetDebugConfig ¶
func (x *AssistConfig) GetDebugConfig() *DebugConfig
func (*AssistConfig) GetDeviceConfig ¶
func (x *AssistConfig) GetDeviceConfig() *DeviceConfig
func (*AssistConfig) GetDialogStateIn ¶
func (x *AssistConfig) GetDialogStateIn() *DialogStateIn
func (*AssistConfig) GetScreenOutConfig ¶
func (x *AssistConfig) GetScreenOutConfig() *ScreenOutConfig
func (*AssistConfig) GetTextQuery ¶
func (x *AssistConfig) GetTextQuery() string
func (*AssistConfig) GetType ¶
func (m *AssistConfig) GetType() isAssistConfig_Type
func (*AssistConfig) ProtoMessage ¶
func (*AssistConfig) ProtoMessage()
func (*AssistConfig) ProtoReflect ¶
func (x *AssistConfig) ProtoReflect() protoreflect.Message
func (*AssistConfig) Reset ¶
func (x *AssistConfig) Reset()
func (*AssistConfig) String ¶
func (x *AssistConfig) String() string
type AssistConfig_AudioInConfig ¶
type AssistConfig_AudioInConfig struct { // Specifies how to process the subsequent incoming audio. Required if // [AssistRequest.audio_in][google.assistant.embedded.v1alpha2.AssistRequest.audio_in] // bytes will be provided in subsequent requests. AudioInConfig *AudioInConfig `protobuf:"bytes,1,opt,name=audio_in_config,json=audioInConfig,proto3,oneof"` }
type AssistConfig_TextQuery ¶
type AssistConfig_TextQuery struct { // The text input to be sent to the Assistant. This can be populated from a // text interface if audio input is not available. TextQuery string `protobuf:"bytes,6,opt,name=text_query,json=textQuery,proto3,oneof"` }
type AssistRequest ¶
type AssistRequest struct { // Exactly one of these fields must be specified in each `AssistRequest`. // // Types that are assignable to Type: // *AssistRequest_Config // *AssistRequest_AudioIn Type isAssistRequest_Type `protobuf_oneof:"type"` // contains filtered or unexported fields }
The top-level message sent by the client. Clients must send at least two, and typically numerous `AssistRequest` messages. The first message must contain a `config` message and must not contain `audio_in` data. All subsequent messages must contain `audio_in` data and must not contain a `config` message.
func (*AssistRequest) Descriptor
deprecated
func (*AssistRequest) Descriptor() ([]byte, []int)
Deprecated: Use AssistRequest.ProtoReflect.Descriptor instead.
func (*AssistRequest) GetAudioIn ¶
func (x *AssistRequest) GetAudioIn() []byte
func (*AssistRequest) GetConfig ¶
func (x *AssistRequest) GetConfig() *AssistConfig
func (*AssistRequest) GetType ¶
func (m *AssistRequest) GetType() isAssistRequest_Type
func (*AssistRequest) ProtoMessage ¶
func (*AssistRequest) ProtoMessage()
func (*AssistRequest) ProtoReflect ¶
func (x *AssistRequest) ProtoReflect() protoreflect.Message
func (*AssistRequest) Reset ¶
func (x *AssistRequest) Reset()
func (*AssistRequest) String ¶
func (x *AssistRequest) String() string
type AssistRequest_AudioIn ¶
type AssistRequest_AudioIn struct { // The audio data to be recognized. Sequential chunks of audio data are sent // in sequential `AssistRequest` messages. The first `AssistRequest` // message must not contain `audio_in` data and all subsequent // `AssistRequest` messages must contain `audio_in` data. The audio bytes // must be encoded as specified in `AudioInConfig`. // Audio must be sent at approximately real-time (16000 samples per second). // An error will be returned if audio is sent significantly faster or // slower. AudioIn []byte `protobuf:"bytes,2,opt,name=audio_in,json=audioIn,proto3,oneof"` }
type AssistRequest_Config ¶
type AssistRequest_Config struct { // The `config` message provides information to the recognizer that // specifies how to process the request. // The first `AssistRequest` message must contain a `config` message. Config *AssistConfig `protobuf:"bytes,1,opt,name=config,proto3,oneof"` }
type AssistResponse ¶
type AssistResponse struct { // *Output-only* Indicates the type of event. EventType AssistResponse_EventType `` /* 154-byte string literal not displayed */ // *Output-only* The audio containing the Assistant's response to the query. AudioOut *AudioOut `protobuf:"bytes,3,opt,name=audio_out,json=audioOut,proto3" json:"audio_out,omitempty"` // *Output-only* Contains the Assistant's visual response to the query. ScreenOut *ScreenOut `protobuf:"bytes,4,opt,name=screen_out,json=screenOut,proto3" json:"screen_out,omitempty"` // *Output-only* Contains the action triggered by the query with the // appropriate payloads and semantic parsing. DeviceAction *DeviceAction `protobuf:"bytes,6,opt,name=device_action,json=deviceAction,proto3" json:"device_action,omitempty"` // *Output-only* This repeated list contains zero or more speech recognition // results that correspond to consecutive portions of the audio currently // being processed, starting with the portion corresponding to the earliest // audio (and most stable portion) to the portion corresponding to the most // recent audio. The strings can be concatenated to view the full // in-progress response. When the speech recognition completes, this list // will contain one item with `stability` of `1.0`. SpeechResults []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=speech_results,json=speechResults,proto3" json:"speech_results,omitempty"` // *Output-only* Contains output related to the user's query. DialogStateOut *DialogStateOut `protobuf:"bytes,5,opt,name=dialog_state_out,json=dialogStateOut,proto3" json:"dialog_state_out,omitempty"` // *Output-only* Debugging info for developer. Only returned if request set // `return_debug_info` to true. DebugInfo *DebugInfo `protobuf:"bytes,8,opt,name=debug_info,json=debugInfo,proto3" json:"debug_info,omitempty"` // contains filtered or unexported fields }
The top-level message received by the client. A series of one or more `AssistResponse` messages are streamed back to the client.
func (*AssistResponse) Descriptor
deprecated
func (*AssistResponse) Descriptor() ([]byte, []int)
Deprecated: Use AssistResponse.ProtoReflect.Descriptor instead.
func (*AssistResponse) GetAudioOut ¶
func (x *AssistResponse) GetAudioOut() *AudioOut
func (*AssistResponse) GetDebugInfo ¶
func (x *AssistResponse) GetDebugInfo() *DebugInfo
func (*AssistResponse) GetDeviceAction ¶
func (x *AssistResponse) GetDeviceAction() *DeviceAction
func (*AssistResponse) GetDialogStateOut ¶
func (x *AssistResponse) GetDialogStateOut() *DialogStateOut
func (*AssistResponse) GetEventType ¶
func (x *AssistResponse) GetEventType() AssistResponse_EventType
func (*AssistResponse) GetScreenOut ¶
func (x *AssistResponse) GetScreenOut() *ScreenOut
func (*AssistResponse) GetSpeechResults ¶
func (x *AssistResponse) GetSpeechResults() []*SpeechRecognitionResult
func (*AssistResponse) ProtoMessage ¶
func (*AssistResponse) ProtoMessage()
func (*AssistResponse) ProtoReflect ¶
func (x *AssistResponse) ProtoReflect() protoreflect.Message
func (*AssistResponse) Reset ¶
func (x *AssistResponse) Reset()
func (*AssistResponse) String ¶
func (x *AssistResponse) String() string
type AssistResponse_EventType ¶
type AssistResponse_EventType int32
Indicates the type of event.
const ( // No event specified. AssistResponse_EVENT_TYPE_UNSPECIFIED AssistResponse_EventType = 0 // This event indicates that the server has detected the end of the user's // speech utterance and expects no additional speech. Therefore, the server // will not process additional audio (although it may subsequently return // additional results). The client should stop sending additional audio // data, half-close the gRPC connection, and wait for any additional results // until the server closes the gRPC connection. AssistResponse_END_OF_UTTERANCE AssistResponse_EventType = 1 )
func (AssistResponse_EventType) Descriptor ¶
func (AssistResponse_EventType) Descriptor() protoreflect.EnumDescriptor
func (AssistResponse_EventType) Enum ¶
func (x AssistResponse_EventType) Enum() *AssistResponse_EventType
func (AssistResponse_EventType) EnumDescriptor
deprecated
func (AssistResponse_EventType) EnumDescriptor() ([]byte, []int)
Deprecated: Use AssistResponse_EventType.Descriptor instead.
func (AssistResponse_EventType) Number ¶
func (x AssistResponse_EventType) Number() protoreflect.EnumNumber
func (AssistResponse_EventType) String ¶
func (x AssistResponse_EventType) String() string
func (AssistResponse_EventType) Type ¶
func (AssistResponse_EventType) Type() protoreflect.EnumType
type AudioInConfig ¶
type AudioInConfig struct { // *Required* Encoding of audio data sent in all `audio_in` messages. Encoding AudioInConfig_Encoding `` /* 133-byte string literal not displayed */ // *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in` // messages. Valid values are from 16000-24000, but 16000 is optimal. // For best results, set the sampling rate of the audio source to 16000 Hz. // If that's not possible, use the native sample rate of the audio source // (instead of re-sampling). SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"` // contains filtered or unexported fields }
Specifies how to process the `audio_in` data that will be provided in subsequent requests. For recommended settings, see the Google Assistant SDK [best practices](https://developers.google.com/assistant/sdk/guides/service/python/best-practices/audio).
func (*AudioInConfig) Descriptor
deprecated
func (*AudioInConfig) Descriptor() ([]byte, []int)
Deprecated: Use AudioInConfig.ProtoReflect.Descriptor instead.
func (*AudioInConfig) GetEncoding ¶
func (x *AudioInConfig) GetEncoding() AudioInConfig_Encoding
func (*AudioInConfig) GetSampleRateHertz ¶
func (x *AudioInConfig) GetSampleRateHertz() int32
func (*AudioInConfig) ProtoMessage ¶
func (*AudioInConfig) ProtoMessage()
func (*AudioInConfig) ProtoReflect ¶
func (x *AudioInConfig) ProtoReflect() protoreflect.Message
func (*AudioInConfig) Reset ¶
func (x *AudioInConfig) Reset()
func (*AudioInConfig) String ¶
func (x *AudioInConfig) String() string
type AudioInConfig_Encoding ¶
type AudioInConfig_Encoding int32
Audio encoding of the data sent in the audio message. Audio must be one-channel (mono).
const ( // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. AudioInConfig_ENCODING_UNSPECIFIED AudioInConfig_Encoding = 0 // Uncompressed 16-bit signed little-endian samples (Linear PCM). // This encoding includes no header, only the raw audio bytes. AudioInConfig_LINEAR16 AudioInConfig_Encoding = 1 // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio // Codec) is the recommended encoding because it is // lossless--therefore recognition is not compromised--and // requires only about half the bandwidth of `LINEAR16`. This encoding // includes the `FLAC` stream header followed by audio data. It supports // 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are // supported. AudioInConfig_FLAC AudioInConfig_Encoding = 2 )
func (AudioInConfig_Encoding) Descriptor ¶
func (AudioInConfig_Encoding) Descriptor() protoreflect.EnumDescriptor
func (AudioInConfig_Encoding) Enum ¶
func (x AudioInConfig_Encoding) Enum() *AudioInConfig_Encoding
func (AudioInConfig_Encoding) EnumDescriptor
deprecated
func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int)
Deprecated: Use AudioInConfig_Encoding.Descriptor instead.
func (AudioInConfig_Encoding) Number ¶
func (x AudioInConfig_Encoding) Number() protoreflect.EnumNumber
func (AudioInConfig_Encoding) String ¶
func (x AudioInConfig_Encoding) String() string
func (AudioInConfig_Encoding) Type ¶
func (AudioInConfig_Encoding) Type() protoreflect.EnumType
type AudioOut ¶
type AudioOut struct { // *Output-only* The audio data containing the Assistant's response to the // query. Sequential chunks of audio data are received in sequential // `AssistResponse` messages. AudioData []byte `protobuf:"bytes,1,opt,name=audio_data,json=audioData,proto3" json:"audio_data,omitempty"` // contains filtered or unexported fields }
The audio containing the Assistant's response to the query. Sequential chunks of audio data are received in sequential `AssistResponse` messages.
func (*AudioOut) Descriptor
deprecated
func (*AudioOut) GetAudioData ¶
func (*AudioOut) ProtoMessage ¶
func (*AudioOut) ProtoMessage()
func (*AudioOut) ProtoReflect ¶
func (x *AudioOut) ProtoReflect() protoreflect.Message
type AudioOutConfig ¶
type AudioOutConfig struct { // *Required* The encoding of audio data to be returned in all `audio_out` // messages. Encoding AudioOutConfig_Encoding `` /* 134-byte string literal not displayed */ // *Required* The sample rate in Hertz of the audio data returned in // `audio_out` messages. Valid values are: 16000-24000. SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"` // *Required* Current volume setting of the device's audio output. // Valid values are 1 to 100 (corresponding to 1% to 100%). VolumePercentage int32 `protobuf:"varint,3,opt,name=volume_percentage,json=volumePercentage,proto3" json:"volume_percentage,omitempty"` // contains filtered or unexported fields }
Specifies the desired format for the server to use when it returns `audio_out` messages.
func (*AudioOutConfig) Descriptor
deprecated
func (*AudioOutConfig) Descriptor() ([]byte, []int)
Deprecated: Use AudioOutConfig.ProtoReflect.Descriptor instead.
func (*AudioOutConfig) GetEncoding ¶
func (x *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding
func (*AudioOutConfig) GetSampleRateHertz ¶
func (x *AudioOutConfig) GetSampleRateHertz() int32
func (*AudioOutConfig) GetVolumePercentage ¶
func (x *AudioOutConfig) GetVolumePercentage() int32
func (*AudioOutConfig) ProtoMessage ¶
func (*AudioOutConfig) ProtoMessage()
func (*AudioOutConfig) ProtoReflect ¶
func (x *AudioOutConfig) ProtoReflect() protoreflect.Message
func (*AudioOutConfig) Reset ¶
func (x *AudioOutConfig) Reset()
func (*AudioOutConfig) String ¶
func (x *AudioOutConfig) String() string
type AudioOutConfig_Encoding ¶
type AudioOutConfig_Encoding int32
Audio encoding of the data returned in the audio message. All encodings are raw audio bytes with no header, except as indicated below.
const ( // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. AudioOutConfig_ENCODING_UNSPECIFIED AudioOutConfig_Encoding = 0 // Uncompressed 16-bit signed little-endian samples (Linear PCM). AudioOutConfig_LINEAR16 AudioOutConfig_Encoding = 1 // MP3 audio encoding. The sample rate is encoded in the payload. AudioOutConfig_MP3 AudioOutConfig_Encoding = 2 // Opus-encoded audio wrapped in an ogg container. The result will be a // file which can be played natively on Android and in some browsers (such // as Chrome). The quality of the encoding is considerably higher than MP3 // while using the same bitrate. The sample rate is encoded in the payload. AudioOutConfig_OPUS_IN_OGG AudioOutConfig_Encoding = 3 )
func (AudioOutConfig_Encoding) Descriptor ¶
func (AudioOutConfig_Encoding) Descriptor() protoreflect.EnumDescriptor
func (AudioOutConfig_Encoding) Enum ¶
func (x AudioOutConfig_Encoding) Enum() *AudioOutConfig_Encoding
func (AudioOutConfig_Encoding) EnumDescriptor
deprecated
func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int)
Deprecated: Use AudioOutConfig_Encoding.Descriptor instead.
func (AudioOutConfig_Encoding) Number ¶
func (x AudioOutConfig_Encoding) Number() protoreflect.EnumNumber
func (AudioOutConfig_Encoding) String ¶
func (x AudioOutConfig_Encoding) String() string
func (AudioOutConfig_Encoding) Type ¶
func (AudioOutConfig_Encoding) Type() protoreflect.EnumType
type DebugConfig ¶
type DebugConfig struct { // When this field is set to true, the `debug_info` field in `AssistResponse` // may be populated. However it will significantly increase latency of // responses. Do not set this field true in production code. ReturnDebugInfo bool `protobuf:"varint,6,opt,name=return_debug_info,json=returnDebugInfo,proto3" json:"return_debug_info,omitempty"` // contains filtered or unexported fields }
Debugging parameters for the current request.
func (*DebugConfig) Descriptor
deprecated
func (*DebugConfig) Descriptor() ([]byte, []int)
Deprecated: Use DebugConfig.ProtoReflect.Descriptor instead.
func (*DebugConfig) GetReturnDebugInfo ¶
func (x *DebugConfig) GetReturnDebugInfo() bool
func (*DebugConfig) ProtoMessage ¶
func (*DebugConfig) ProtoMessage()
func (*DebugConfig) ProtoReflect ¶
func (x *DebugConfig) ProtoReflect() protoreflect.Message
func (*DebugConfig) Reset ¶
func (x *DebugConfig) Reset()
func (*DebugConfig) String ¶
func (x *DebugConfig) String() string
type DebugInfo ¶
type DebugInfo struct { // The original JSON response from an Action-on-Google agent to Google server. // See // https://developers.google.com/actions/reference/rest/Shared.Types/AppResponse. // It will only be populated if the request maker owns the AoG project and the // AoG project is in preview mode. AogAgentToAssistantJson string `` /* 136-byte string literal not displayed */ // contains filtered or unexported fields }
Debug info for developer. Only returned if request set `return_debug_info` to true.
func (*DebugInfo) Descriptor
deprecated
func (*DebugInfo) GetAogAgentToAssistantJson ¶
func (*DebugInfo) ProtoMessage ¶
func (*DebugInfo) ProtoMessage()
func (*DebugInfo) ProtoReflect ¶
func (x *DebugInfo) ProtoReflect() protoreflect.Message
type DeviceAction ¶
type DeviceAction struct { // JSON containing the device command response generated from the triggered // Device Action grammar. The format is given by the // `action.devices.EXECUTE` intent for a given // [trait](https://developers.google.com/assistant/sdk/reference/traits/). DeviceRequestJson string `protobuf:"bytes,1,opt,name=device_request_json,json=deviceRequestJson,proto3" json:"device_request_json,omitempty"` // contains filtered or unexported fields }
The response returned to the device if the user has triggered a Device Action. For example, a device which supports the query *Turn on the light* would receive a `DeviceAction` with a JSON payload containing the semantics of the request.
func (*DeviceAction) Descriptor
deprecated
func (*DeviceAction) Descriptor() ([]byte, []int)
Deprecated: Use DeviceAction.ProtoReflect.Descriptor instead.
func (*DeviceAction) GetDeviceRequestJson ¶
func (x *DeviceAction) GetDeviceRequestJson() string
func (*DeviceAction) ProtoMessage ¶
func (*DeviceAction) ProtoMessage()
func (*DeviceAction) ProtoReflect ¶
func (x *DeviceAction) ProtoReflect() protoreflect.Message
func (*DeviceAction) Reset ¶
func (x *DeviceAction) Reset()
func (*DeviceAction) String ¶
func (x *DeviceAction) String() string
type DeviceConfig ¶
type DeviceConfig struct { // *Required* Unique identifier for the device. The id length must be 128 // characters or less. Example: DBCDW098234. This MUST match the device_id // returned from device registration. This device_id is used to match against // the user's registered devices to lookup the supported traits and // capabilities of this device. This information should not change across // device reboots. However, it should not be saved across // factory-default resets. DeviceId string `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` // *Required* Unique identifier for the device model. The combination of // device_model_id and device_id must have been previously associated through // device registration. DeviceModelId string `protobuf:"bytes,3,opt,name=device_model_id,json=deviceModelId,proto3" json:"device_model_id,omitempty"` // contains filtered or unexported fields }
*Required* Fields that identify the device to the Assistant.
See also:
* [Register a Device - REST API](https://developers.google.com/assistant/sdk/reference/device-registration/register-device-manual) * [Device Model and Instance Schemas](https://developers.google.com/assistant/sdk/reference/device-registration/model-and-instance-schemas) * [Device Proto](https://developers.google.com/assistant/sdk/reference/rpc/google.assistant.devices.v1alpha2#device)
func (*DeviceConfig) Descriptor
deprecated
func (*DeviceConfig) Descriptor() ([]byte, []int)
Deprecated: Use DeviceConfig.ProtoReflect.Descriptor instead.
func (*DeviceConfig) GetDeviceId ¶
func (x *DeviceConfig) GetDeviceId() string
func (*DeviceConfig) GetDeviceModelId ¶
func (x *DeviceConfig) GetDeviceModelId() string
func (*DeviceConfig) ProtoMessage ¶
func (*DeviceConfig) ProtoMessage()
func (*DeviceConfig) ProtoReflect ¶
func (x *DeviceConfig) ProtoReflect() protoreflect.Message
func (*DeviceConfig) Reset ¶
func (x *DeviceConfig) Reset()
func (*DeviceConfig) String ¶
func (x *DeviceConfig) String() string
type DeviceLocation ¶
type DeviceLocation struct { // Types that are assignable to Type: // *DeviceLocation_Coordinates Type isDeviceLocation_Type `protobuf_oneof:"type"` // contains filtered or unexported fields }
There are three sources of locations. They are used with this precedence:
- This `DeviceLocation`, which is primarily used for mobile devices with GPS .
- Location specified by the user during device setup; this is per-user, per device. This location is used if `DeviceLocation` is not specified.
- Inferred location based on IP address. This is used only if neither of the above are specified.
func (*DeviceLocation) Descriptor
deprecated
func (*DeviceLocation) Descriptor() ([]byte, []int)
Deprecated: Use DeviceLocation.ProtoReflect.Descriptor instead.
func (*DeviceLocation) GetCoordinates ¶
func (x *DeviceLocation) GetCoordinates() *latlng.LatLng
func (*DeviceLocation) GetType ¶
func (m *DeviceLocation) GetType() isDeviceLocation_Type
func (*DeviceLocation) ProtoMessage ¶
func (*DeviceLocation) ProtoMessage()
func (*DeviceLocation) ProtoReflect ¶
func (x *DeviceLocation) ProtoReflect() protoreflect.Message
func (*DeviceLocation) Reset ¶
func (x *DeviceLocation) Reset()
func (*DeviceLocation) String ¶
func (x *DeviceLocation) String() string
type DialogStateIn ¶
type DialogStateIn struct { // *Required* This field must always be set to the // [DialogStateOut.conversation_state][google.assistant.embedded.v1alpha2.DialogStateOut.conversation_state] // value that was returned in the prior `Assist` RPC. It should only be // omitted (field not set) if there was no prior `Assist` RPC because this is // the first `Assist` RPC made by this device after it was first setup and/or // a factory-default reset. ConversationState []byte `protobuf:"bytes,1,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"` // *Required* Language of the request in // [IETF BCP 47 syntax](https://tools.ietf.org/html/bcp47) (for example, // "en-US"). See [Language // Support](https://developers.google.com/assistant/sdk/reference/rpc/languages) // for more information. If you have selected a language for this `device_id` // using the // [Settings](https://developers.google.com/assistant/sdk/reference/assistant-app/assistant-settings) // menu in your phone's Google Assistant app, that selection will override // this value. LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"` // *Optional* Location of the device where the query originated. DeviceLocation *DeviceLocation `protobuf:"bytes,5,opt,name=device_location,json=deviceLocation,proto3" json:"device_location,omitempty"` // *Optional* If true, the server will treat the request as a new conversation // and not use state from the prior request. Set this field to true when the // conversation should be restarted, such as after a device reboot, or after a // significant lapse of time since the prior query. IsNewConversation bool `protobuf:"varint,7,opt,name=is_new_conversation,json=isNewConversation,proto3" json:"is_new_conversation,omitempty"` // contains filtered or unexported fields }
Provides information about the current dialog state.
func (*DialogStateIn) Descriptor
deprecated
func (*DialogStateIn) Descriptor() ([]byte, []int)
Deprecated: Use DialogStateIn.ProtoReflect.Descriptor instead.
func (*DialogStateIn) GetConversationState ¶
func (x *DialogStateIn) GetConversationState() []byte
func (*DialogStateIn) GetDeviceLocation ¶
func (x *DialogStateIn) GetDeviceLocation() *DeviceLocation
func (*DialogStateIn) GetIsNewConversation ¶
func (x *DialogStateIn) GetIsNewConversation() bool
func (*DialogStateIn) GetLanguageCode ¶
func (x *DialogStateIn) GetLanguageCode() string
func (*DialogStateIn) ProtoMessage ¶
func (*DialogStateIn) ProtoMessage()
func (*DialogStateIn) ProtoReflect ¶
func (x *DialogStateIn) ProtoReflect() protoreflect.Message
func (*DialogStateIn) Reset ¶
func (x *DialogStateIn) Reset()
func (*DialogStateIn) String ¶
func (x *DialogStateIn) String() string
type DialogStateOut ¶
type DialogStateOut struct { // *Output-only* Supplemental display text from the Assistant. This could be // the same as the speech spoken in `AssistResponse.audio_out` or it could // be some additional information which aids the user's understanding. SupplementalDisplayText string `` /* 132-byte string literal not displayed */ // *Output-only* State information for the subsequent `Assist` RPC. This // value should be saved in the client and returned in the // [`DialogStateIn.conversation_state`](#dialogstatein) field with the next // `Assist` RPC. (The client does not need to interpret or otherwise use this // value.) This information should be saved across device reboots. However, // this value should be cleared (not saved in the client) during a // factory-default reset. ConversationState []byte `protobuf:"bytes,2,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"` // *Output-only* Specifies the mode of the microphone after this `Assist` // RPC is processed. MicrophoneMode DialogStateOut_MicrophoneMode `` /* 174-byte string literal not displayed */ // *Output-only* Updated volume level. The value will be 0 or omitted // (indicating no change) unless a voice command such as *Increase the volume* // or *Set volume level 4* was recognized, in which case the value will be // between 1 and 100 (corresponding to the new volume level of 1% to 100%). // Typically, a client should use this volume level when playing the // `audio_out` data, and retain this value as the current volume level and // supply it in the `AudioOutConfig` of the next `AssistRequest`. (Some // clients may also implement other ways to allow the current volume level to // be changed, for example, by providing a knob that the user can turn.) VolumePercentage int32 `protobuf:"varint,4,opt,name=volume_percentage,json=volumePercentage,proto3" json:"volume_percentage,omitempty"` // contains filtered or unexported fields }
The dialog state resulting from the user's query. Multiple of these messages may be received.
func (*DialogStateOut) Descriptor
deprecated
func (*DialogStateOut) Descriptor() ([]byte, []int)
Deprecated: Use DialogStateOut.ProtoReflect.Descriptor instead.
func (*DialogStateOut) GetConversationState ¶
func (x *DialogStateOut) GetConversationState() []byte
func (*DialogStateOut) GetMicrophoneMode ¶
func (x *DialogStateOut) GetMicrophoneMode() DialogStateOut_MicrophoneMode
func (*DialogStateOut) GetSupplementalDisplayText ¶
func (x *DialogStateOut) GetSupplementalDisplayText() string
func (*DialogStateOut) GetVolumePercentage ¶
func (x *DialogStateOut) GetVolumePercentage() int32
func (*DialogStateOut) ProtoMessage ¶
func (*DialogStateOut) ProtoMessage()
func (*DialogStateOut) ProtoReflect ¶
func (x *DialogStateOut) ProtoReflect() protoreflect.Message
func (*DialogStateOut) Reset ¶
func (x *DialogStateOut) Reset()
func (*DialogStateOut) String ¶
func (x *DialogStateOut) String() string
type DialogStateOut_MicrophoneMode ¶
type DialogStateOut_MicrophoneMode int32
Possible states of the microphone after a `Assist` RPC completes.
const ( // No mode specified. DialogStateOut_MICROPHONE_MODE_UNSPECIFIED DialogStateOut_MicrophoneMode = 0 // The service is not expecting a follow-on question from the user. // The microphone should remain off until the user re-activates it. DialogStateOut_CLOSE_MICROPHONE DialogStateOut_MicrophoneMode = 1 // The service is expecting a follow-on question from the user. The // microphone should be re-opened when the `AudioOut` playback completes // (by starting a new `Assist` RPC call to send the new audio). DialogStateOut_DIALOG_FOLLOW_ON DialogStateOut_MicrophoneMode = 2 )
func (DialogStateOut_MicrophoneMode) Descriptor ¶
func (DialogStateOut_MicrophoneMode) Descriptor() protoreflect.EnumDescriptor
func (DialogStateOut_MicrophoneMode) Enum ¶
func (x DialogStateOut_MicrophoneMode) Enum() *DialogStateOut_MicrophoneMode
func (DialogStateOut_MicrophoneMode) EnumDescriptor
deprecated
func (DialogStateOut_MicrophoneMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use DialogStateOut_MicrophoneMode.Descriptor instead.
func (DialogStateOut_MicrophoneMode) Number ¶
func (x DialogStateOut_MicrophoneMode) Number() protoreflect.EnumNumber
func (DialogStateOut_MicrophoneMode) String ¶
func (x DialogStateOut_MicrophoneMode) String() string
func (DialogStateOut_MicrophoneMode) Type ¶
func (DialogStateOut_MicrophoneMode) Type() protoreflect.EnumType
type EmbeddedAssistantClient ¶
type EmbeddedAssistantClient interface { // Initiates or continues a conversation with the embedded Assistant Service. // Each call performs one round-trip, sending an audio request to the service // and receiving the audio response. Uses bidirectional streaming to receive // results, such as the `END_OF_UTTERANCE` event, while sending audio. // // A conversation is one or more gRPC connections, each consisting of several // streamed requests and responses. // For example, the user says *Add to my shopping list* and the Assistant // responds *What do you want to add?*. The sequence of streamed requests and // responses in the first gRPC message could be: // // * AssistRequest.config // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistResponse.event_type.END_OF_UTTERANCE // * AssistResponse.speech_results.transcript "add to my shopping list" // * AssistResponse.dialog_state_out.microphone_mode.DIALOG_FOLLOW_ON // * AssistResponse.audio_out // * AssistResponse.audio_out // * AssistResponse.audio_out // // // The user then says *bagels* and the Assistant responds // *OK, I've added bagels to your shopping list*. This is sent as another gRPC // connection call to the `Assist` method, again with streamed requests and // responses, such as: // // * AssistRequest.config // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistResponse.event_type.END_OF_UTTERANCE // * AssistResponse.dialog_state_out.microphone_mode.CLOSE_MICROPHONE // * AssistResponse.audio_out // * AssistResponse.audio_out // * AssistResponse.audio_out // * AssistResponse.audio_out // // Although the precise order of responses is not guaranteed, sequential // `AssistResponse.audio_out` messages will always contain sequential portions // of audio. Assist(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_AssistClient, error) }
EmbeddedAssistantClient is the client API for EmbeddedAssistant service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewEmbeddedAssistantClient ¶
func NewEmbeddedAssistantClient(cc grpc.ClientConnInterface) EmbeddedAssistantClient
type EmbeddedAssistantServer ¶
type EmbeddedAssistantServer interface { // Initiates or continues a conversation with the embedded Assistant Service. // Each call performs one round-trip, sending an audio request to the service // and receiving the audio response. Uses bidirectional streaming to receive // results, such as the `END_OF_UTTERANCE` event, while sending audio. // // A conversation is one or more gRPC connections, each consisting of several // streamed requests and responses. // For example, the user says *Add to my shopping list* and the Assistant // responds *What do you want to add?*. The sequence of streamed requests and // responses in the first gRPC message could be: // // * AssistRequest.config // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistResponse.event_type.END_OF_UTTERANCE // * AssistResponse.speech_results.transcript "add to my shopping list" // * AssistResponse.dialog_state_out.microphone_mode.DIALOG_FOLLOW_ON // * AssistResponse.audio_out // * AssistResponse.audio_out // * AssistResponse.audio_out // // // The user then says *bagels* and the Assistant responds // *OK, I've added bagels to your shopping list*. This is sent as another gRPC // connection call to the `Assist` method, again with streamed requests and // responses, such as: // // * AssistRequest.config // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistRequest.audio_in // * AssistResponse.event_type.END_OF_UTTERANCE // * AssistResponse.dialog_state_out.microphone_mode.CLOSE_MICROPHONE // * AssistResponse.audio_out // * AssistResponse.audio_out // * AssistResponse.audio_out // * AssistResponse.audio_out // // Although the precise order of responses is not guaranteed, sequential // `AssistResponse.audio_out` messages will always contain sequential portions // of audio. Assist(EmbeddedAssistant_AssistServer) error }
EmbeddedAssistantServer is the server API for EmbeddedAssistant service.
type EmbeddedAssistant_AssistClient ¶
type EmbeddedAssistant_AssistClient interface { Send(*AssistRequest) error Recv() (*AssistResponse, error) grpc.ClientStream }
type EmbeddedAssistant_AssistServer ¶
type EmbeddedAssistant_AssistServer interface { Send(*AssistResponse) error Recv() (*AssistRequest, error) grpc.ServerStream }
type ScreenOut ¶
type ScreenOut struct { // *Output-only* The format of the provided screen data. Format ScreenOut_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.assistant.embedded.v1alpha2.ScreenOut_Format" json:"format,omitempty"` // *Output-only* The raw screen data to be displayed as the result of the // Assistant query. Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // contains filtered or unexported fields }
The Assistant's visual output response to query. Enabled by `screen_out_config`.
func (*ScreenOut) Descriptor
deprecated
func (*ScreenOut) GetFormat ¶
func (x *ScreenOut) GetFormat() ScreenOut_Format
func (*ScreenOut) ProtoMessage ¶
func (*ScreenOut) ProtoMessage()
func (*ScreenOut) ProtoReflect ¶
func (x *ScreenOut) ProtoReflect() protoreflect.Message
type ScreenOutConfig ¶
type ScreenOutConfig struct { // Current visual screen-mode for the device while issuing the query. ScreenMode ScreenOutConfig_ScreenMode `` /* 159-byte string literal not displayed */ // contains filtered or unexported fields }
Specifies the desired format for the server to use when it returns `screen_out` response.
func (*ScreenOutConfig) Descriptor
deprecated
func (*ScreenOutConfig) Descriptor() ([]byte, []int)
Deprecated: Use ScreenOutConfig.ProtoReflect.Descriptor instead.
func (*ScreenOutConfig) GetScreenMode ¶
func (x *ScreenOutConfig) GetScreenMode() ScreenOutConfig_ScreenMode
func (*ScreenOutConfig) ProtoMessage ¶
func (*ScreenOutConfig) ProtoMessage()
func (*ScreenOutConfig) ProtoReflect ¶
func (x *ScreenOutConfig) ProtoReflect() protoreflect.Message
func (*ScreenOutConfig) Reset ¶
func (x *ScreenOutConfig) Reset()
func (*ScreenOutConfig) String ¶
func (x *ScreenOutConfig) String() string
type ScreenOutConfig_ScreenMode ¶
type ScreenOutConfig_ScreenMode int32
Possible modes for visual screen-output on the device.
const ( // No video mode specified. // The Assistant may respond as if in `OFF` mode. ScreenOutConfig_SCREEN_MODE_UNSPECIFIED ScreenOutConfig_ScreenMode = 0 // Screen is off (or has brightness or other settings set so low it is // not visible). The Assistant will typically not return a screen response // in this mode. ScreenOutConfig_OFF ScreenOutConfig_ScreenMode = 1 // The Assistant will typically return a partial-screen response in this // mode. ScreenOutConfig_PLAYING ScreenOutConfig_ScreenMode = 3 )
func (ScreenOutConfig_ScreenMode) Descriptor ¶
func (ScreenOutConfig_ScreenMode) Descriptor() protoreflect.EnumDescriptor
func (ScreenOutConfig_ScreenMode) Enum ¶
func (x ScreenOutConfig_ScreenMode) Enum() *ScreenOutConfig_ScreenMode
func (ScreenOutConfig_ScreenMode) EnumDescriptor
deprecated
func (ScreenOutConfig_ScreenMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use ScreenOutConfig_ScreenMode.Descriptor instead.
func (ScreenOutConfig_ScreenMode) Number ¶
func (x ScreenOutConfig_ScreenMode) Number() protoreflect.EnumNumber
func (ScreenOutConfig_ScreenMode) String ¶
func (x ScreenOutConfig_ScreenMode) String() string
func (ScreenOutConfig_ScreenMode) Type ¶
func (ScreenOutConfig_ScreenMode) Type() protoreflect.EnumType
type ScreenOut_Format ¶
type ScreenOut_Format int32
Possible formats of the screen data.
const ( // No format specified. ScreenOut_FORMAT_UNSPECIFIED ScreenOut_Format = 0 // Data will contain a fully-formed HTML5 layout encoded in UTF-8, e.g. // `<html><body><div>...</div></body></html>`. It is intended to be rendered // along with the audio response. Note that HTML5 doctype should be included // in the actual HTML data. ScreenOut_HTML ScreenOut_Format = 1 )
func (ScreenOut_Format) Descriptor ¶
func (ScreenOut_Format) Descriptor() protoreflect.EnumDescriptor
func (ScreenOut_Format) Enum ¶
func (x ScreenOut_Format) Enum() *ScreenOut_Format
func (ScreenOut_Format) EnumDescriptor
deprecated
func (ScreenOut_Format) EnumDescriptor() ([]byte, []int)
Deprecated: Use ScreenOut_Format.Descriptor instead.
func (ScreenOut_Format) Number ¶
func (x ScreenOut_Format) Number() protoreflect.EnumNumber
func (ScreenOut_Format) String ¶
func (x ScreenOut_Format) String() string
func (ScreenOut_Format) Type ¶
func (ScreenOut_Format) Type() protoreflect.EnumType
type SpeechRecognitionResult ¶
type SpeechRecognitionResult struct { // *Output-only* Transcript text representing the words that the user spoke. Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"` // *Output-only* An estimate of the likelihood that the Assistant will not // change its guess about this result. Values range from 0.0 (completely // unstable) to 1.0 (completely stable and final). The default of 0.0 is a // sentinel value indicating `stability` was not set. Stability float32 `protobuf:"fixed32,2,opt,name=stability,proto3" json:"stability,omitempty"` // contains filtered or unexported fields }
The estimated transcription of a phrase the user has spoken. This could be a single segment or the full guess of the user's spoken query.
func (*SpeechRecognitionResult) Descriptor
deprecated
func (*SpeechRecognitionResult) Descriptor() ([]byte, []int)
Deprecated: Use SpeechRecognitionResult.ProtoReflect.Descriptor instead.
func (*SpeechRecognitionResult) GetStability ¶
func (x *SpeechRecognitionResult) GetStability() float32
func (*SpeechRecognitionResult) GetTranscript ¶
func (x *SpeechRecognitionResult) GetTranscript() string
func (*SpeechRecognitionResult) ProtoMessage ¶
func (*SpeechRecognitionResult) ProtoMessage()
func (*SpeechRecognitionResult) ProtoReflect ¶
func (x *SpeechRecognitionResult) ProtoReflect() protoreflect.Message
func (*SpeechRecognitionResult) Reset ¶
func (x *SpeechRecognitionResult) Reset()
func (*SpeechRecognitionResult) String ¶
func (x *SpeechRecognitionResult) String() string
type UnimplementedEmbeddedAssistantServer ¶
type UnimplementedEmbeddedAssistantServer struct { }
UnimplementedEmbeddedAssistantServer can be embedded to have forward compatible implementations.
func (*UnimplementedEmbeddedAssistantServer) Assist ¶
func (*UnimplementedEmbeddedAssistantServer) Assist(EmbeddedAssistant_AssistServer) error