embedded

package
v0.0.0-...-bd15449 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 19, 2024 License: Apache-2.0 Imports: 10 Imported by: 1

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	AudioInConfig_Encoding_name = map[int32]string{
		0: "ENCODING_UNSPECIFIED",
		1: "LINEAR16",
		2: "FLAC",
	}
	AudioInConfig_Encoding_value = map[string]int32{
		"ENCODING_UNSPECIFIED": 0,
		"LINEAR16":             1,
		"FLAC":                 2,
	}
)

Enum value maps for AudioInConfig_Encoding.

View Source
var (
	AudioOutConfig_Encoding_name = map[int32]string{
		0: "ENCODING_UNSPECIFIED",
		1: "LINEAR16",
		2: "MP3",
		3: "OPUS_IN_OGG",
	}
	AudioOutConfig_Encoding_value = map[string]int32{
		"ENCODING_UNSPECIFIED": 0,
		"LINEAR16":             1,
		"MP3":                  2,
		"OPUS_IN_OGG":          3,
	}
)

Enum value maps for AudioOutConfig_Encoding.

View Source
var (
	ConverseResult_MicrophoneMode_name = map[int32]string{
		0: "MICROPHONE_MODE_UNSPECIFIED",
		1: "CLOSE_MICROPHONE",
		2: "DIALOG_FOLLOW_ON",
	}
	ConverseResult_MicrophoneMode_value = map[string]int32{
		"MICROPHONE_MODE_UNSPECIFIED": 0,
		"CLOSE_MICROPHONE":            1,
		"DIALOG_FOLLOW_ON":            2,
	}
)

Enum value maps for ConverseResult_MicrophoneMode.

View Source
var (
	ConverseResponse_EventType_name = map[int32]string{
		0: "EVENT_TYPE_UNSPECIFIED",
		1: "END_OF_UTTERANCE",
	}
	ConverseResponse_EventType_value = map[string]int32{
		"EVENT_TYPE_UNSPECIFIED": 0,
		"END_OF_UTTERANCE":       1,
	}
)

Enum value maps for ConverseResponse_EventType.

View Source
var File_google_assistant_embedded_v1alpha1_embedded_assistant_proto protoreflect.FileDescriptor

Functions

func RegisterEmbeddedAssistantServer

func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer)

Types

type AudioInConfig

type AudioInConfig struct {

	// *Required* Encoding of audio data sent in all `audio_in` messages.
	Encoding AudioInConfig_Encoding `` /* 133-byte string literal not displayed */
	// *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in`
	// messages. Valid values are from 16000-24000, but 16000 is optimal.
	// For best results, set the sampling rate of the audio source to 16000 Hz.
	// If that's not possible, use the native sample rate of the audio source
	// (instead of re-sampling).
	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
	// contains filtered or unexported fields
}

Specifies how to process the `audio_in` data that will be provided in subsequent requests. For recommended settings, see the Google Assistant SDK [best practices](https://developers.google.com/assistant/sdk/develop/grpc/best-practices/audio).

func (*AudioInConfig) Descriptor deprecated

func (*AudioInConfig) Descriptor() ([]byte, []int)

Deprecated: Use AudioInConfig.ProtoReflect.Descriptor instead.

func (*AudioInConfig) GetEncoding

func (x *AudioInConfig) GetEncoding() AudioInConfig_Encoding

func (*AudioInConfig) GetSampleRateHertz

func (x *AudioInConfig) GetSampleRateHertz() int32

func (*AudioInConfig) ProtoMessage

func (*AudioInConfig) ProtoMessage()

func (*AudioInConfig) ProtoReflect

func (x *AudioInConfig) ProtoReflect() protoreflect.Message

func (*AudioInConfig) Reset

func (x *AudioInConfig) Reset()

func (*AudioInConfig) String

func (x *AudioInConfig) String() string

type AudioInConfig_Encoding

type AudioInConfig_Encoding int32

Audio encoding of the data sent in the audio message. Audio must be one-channel (mono). The only language supported is "en-US".

const (
	// Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
	AudioInConfig_ENCODING_UNSPECIFIED AudioInConfig_Encoding = 0
	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
	// This encoding includes no header, only the raw audio bytes.
	AudioInConfig_LINEAR16 AudioInConfig_Encoding = 1
	// [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
	// Codec) is the recommended encoding because it is
	// lossless--therefore recognition is not compromised--and
	// requires only about half the bandwidth of `LINEAR16`. This encoding
	// includes the `FLAC` stream header followed by audio data. It supports
	// 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are
	// supported.
	AudioInConfig_FLAC AudioInConfig_Encoding = 2
)

func (AudioInConfig_Encoding) Descriptor

func (AudioInConfig_Encoding) Enum

func (AudioInConfig_Encoding) EnumDescriptor deprecated

func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int)

Deprecated: Use AudioInConfig_Encoding.Descriptor instead.

func (AudioInConfig_Encoding) Number

func (AudioInConfig_Encoding) String

func (x AudioInConfig_Encoding) String() string

func (AudioInConfig_Encoding) Type

type AudioOut

type AudioOut struct {

	// *Output-only* The audio data containing the assistant's response to the
	// query. Sequential chunks of audio data are received in sequential
	// `ConverseResponse` messages.
	AudioData []byte `protobuf:"bytes,1,opt,name=audio_data,json=audioData,proto3" json:"audio_data,omitempty"`
	// contains filtered or unexported fields
}

The audio containing the assistant's response to the query. Sequential chunks of audio data are received in sequential `ConverseResponse` messages.

func (*AudioOut) Descriptor deprecated

func (*AudioOut) Descriptor() ([]byte, []int)

Deprecated: Use AudioOut.ProtoReflect.Descriptor instead.

func (*AudioOut) GetAudioData

func (x *AudioOut) GetAudioData() []byte

func (*AudioOut) ProtoMessage

func (*AudioOut) ProtoMessage()

func (*AudioOut) ProtoReflect

func (x *AudioOut) ProtoReflect() protoreflect.Message

func (*AudioOut) Reset

func (x *AudioOut) Reset()

func (*AudioOut) String

func (x *AudioOut) String() string

type AudioOutConfig

type AudioOutConfig struct {

	// *Required* The encoding of audio data to be returned in all `audio_out`
	// messages.
	Encoding AudioOutConfig_Encoding `` /* 134-byte string literal not displayed */
	// *Required* The sample rate in Hertz of the audio data returned in
	// `audio_out` messages. Valid values are: 16000-24000.
	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
	// *Required* Current volume setting of the device's audio output.
	// Valid values are 1 to 100 (corresponding to 1% to 100%).
	VolumePercentage int32 `protobuf:"varint,3,opt,name=volume_percentage,json=volumePercentage,proto3" json:"volume_percentage,omitempty"`
	// contains filtered or unexported fields
}

Specifies the desired format for the server to use when it returns `audio_out` messages.

func (*AudioOutConfig) Descriptor deprecated

func (*AudioOutConfig) Descriptor() ([]byte, []int)

Deprecated: Use AudioOutConfig.ProtoReflect.Descriptor instead.

func (*AudioOutConfig) GetEncoding

func (x *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding

func (*AudioOutConfig) GetSampleRateHertz

func (x *AudioOutConfig) GetSampleRateHertz() int32

func (*AudioOutConfig) GetVolumePercentage

func (x *AudioOutConfig) GetVolumePercentage() int32

func (*AudioOutConfig) ProtoMessage

func (*AudioOutConfig) ProtoMessage()

func (*AudioOutConfig) ProtoReflect

func (x *AudioOutConfig) ProtoReflect() protoreflect.Message

func (*AudioOutConfig) Reset

func (x *AudioOutConfig) Reset()

func (*AudioOutConfig) String

func (x *AudioOutConfig) String() string

type AudioOutConfig_Encoding

type AudioOutConfig_Encoding int32

Audio encoding of the data returned in the audio message. All encodings are raw audio bytes with no header, except as indicated below.

const (
	// Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
	AudioOutConfig_ENCODING_UNSPECIFIED AudioOutConfig_Encoding = 0
	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
	AudioOutConfig_LINEAR16 AudioOutConfig_Encoding = 1
	// MP3 audio encoding. The sample rate is encoded in the payload.
	AudioOutConfig_MP3 AudioOutConfig_Encoding = 2
	// Opus-encoded audio wrapped in an ogg container. The result will be a
	// file which can be played natively on Android and in some browsers (such
	// as Chrome). The quality of the encoding is considerably higher than MP3
	// while using the same bitrate. The sample rate is encoded in the payload.
	AudioOutConfig_OPUS_IN_OGG AudioOutConfig_Encoding = 3
)

func (AudioOutConfig_Encoding) Descriptor

func (AudioOutConfig_Encoding) Enum

func (AudioOutConfig_Encoding) EnumDescriptor deprecated

func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int)

Deprecated: Use AudioOutConfig_Encoding.Descriptor instead.

func (AudioOutConfig_Encoding) Number

func (AudioOutConfig_Encoding) String

func (x AudioOutConfig_Encoding) String() string

func (AudioOutConfig_Encoding) Type

type ConverseConfig

type ConverseConfig struct {

	// *Required* Specifies how to process the subsequent incoming audio.
	AudioInConfig *AudioInConfig `protobuf:"bytes,1,opt,name=audio_in_config,json=audioInConfig,proto3" json:"audio_in_config,omitempty"`
	// *Required* Specifies how to format the audio that will be returned.
	AudioOutConfig *AudioOutConfig `protobuf:"bytes,2,opt,name=audio_out_config,json=audioOutConfig,proto3" json:"audio_out_config,omitempty"`
	// *Required* Represents the current dialog state.
	ConverseState *ConverseState `protobuf:"bytes,3,opt,name=converse_state,json=converseState,proto3" json:"converse_state,omitempty"`
	// contains filtered or unexported fields
}

Specifies how to process the `ConverseRequest` messages.

func (*ConverseConfig) Descriptor deprecated

func (*ConverseConfig) Descriptor() ([]byte, []int)

Deprecated: Use ConverseConfig.ProtoReflect.Descriptor instead.

func (*ConverseConfig) GetAudioInConfig

func (x *ConverseConfig) GetAudioInConfig() *AudioInConfig

func (*ConverseConfig) GetAudioOutConfig

func (x *ConverseConfig) GetAudioOutConfig() *AudioOutConfig

func (*ConverseConfig) GetConverseState

func (x *ConverseConfig) GetConverseState() *ConverseState

func (*ConverseConfig) ProtoMessage

func (*ConverseConfig) ProtoMessage()

func (*ConverseConfig) ProtoReflect

func (x *ConverseConfig) ProtoReflect() protoreflect.Message

func (*ConverseConfig) Reset

func (x *ConverseConfig) Reset()

func (*ConverseConfig) String

func (x *ConverseConfig) String() string

type ConverseRequest

type ConverseRequest struct {

	// Exactly one of these fields must be specified in each `ConverseRequest`.
	//
	// Types that are assignable to ConverseRequest:
	//
	//	*ConverseRequest_Config
	//	*ConverseRequest_AudioIn
	ConverseRequest isConverseRequest_ConverseRequest `protobuf_oneof:"converse_request"`
	// contains filtered or unexported fields
}

The top-level message sent by the client. Clients must send at least two, and typically numerous `ConverseRequest` messages. The first message must contain a `config` message and must not contain `audio_in` data. All subsequent messages must contain `audio_in` data and must not contain a `config` message.

func (*ConverseRequest) Descriptor deprecated

func (*ConverseRequest) Descriptor() ([]byte, []int)

Deprecated: Use ConverseRequest.ProtoReflect.Descriptor instead.

func (*ConverseRequest) GetAudioIn

func (x *ConverseRequest) GetAudioIn() []byte

func (*ConverseRequest) GetConfig

func (x *ConverseRequest) GetConfig() *ConverseConfig

func (*ConverseRequest) GetConverseRequest

func (m *ConverseRequest) GetConverseRequest() isConverseRequest_ConverseRequest

func (*ConverseRequest) ProtoMessage

func (*ConverseRequest) ProtoMessage()

func (*ConverseRequest) ProtoReflect

func (x *ConverseRequest) ProtoReflect() protoreflect.Message

func (*ConverseRequest) Reset

func (x *ConverseRequest) Reset()

func (*ConverseRequest) String

func (x *ConverseRequest) String() string

type ConverseRequest_AudioIn

type ConverseRequest_AudioIn struct {
	// The audio data to be recognized. Sequential chunks of audio data are sent
	// in sequential `ConverseRequest` messages. The first `ConverseRequest`
	// message must not contain `audio_in` data and all subsequent
	// `ConverseRequest` messages must contain `audio_in` data. The audio bytes
	// must be encoded as specified in `AudioInConfig`.
	// Audio must be sent at approximately real-time (16000 samples per second).
	// An error will be returned if audio is sent significantly faster or
	// slower.
	AudioIn []byte `protobuf:"bytes,2,opt,name=audio_in,json=audioIn,proto3,oneof"`
}

type ConverseRequest_Config

type ConverseRequest_Config struct {
	// The `config` message provides information to the recognizer that
	// specifies how to process the request.
	// The first `ConverseRequest` message must contain a `config` message.
	Config *ConverseConfig `protobuf:"bytes,1,opt,name=config,proto3,oneof"`
}

type ConverseResponse

type ConverseResponse struct {

	// Exactly one of these fields will be populated in each `ConverseResponse`.
	//
	// Types that are assignable to ConverseResponse:
	//
	//	*ConverseResponse_Error
	//	*ConverseResponse_EventType_
	//	*ConverseResponse_AudioOut
	//	*ConverseResponse_Result
	ConverseResponse isConverseResponse_ConverseResponse `protobuf_oneof:"converse_response"`
	// contains filtered or unexported fields
}

The top-level message received by the client. A series of one or more `ConverseResponse` messages are streamed back to the client.

func (*ConverseResponse) Descriptor deprecated

func (*ConverseResponse) Descriptor() ([]byte, []int)

Deprecated: Use ConverseResponse.ProtoReflect.Descriptor instead.

func (*ConverseResponse) GetAudioOut

func (x *ConverseResponse) GetAudioOut() *AudioOut

func (*ConverseResponse) GetConverseResponse

func (m *ConverseResponse) GetConverseResponse() isConverseResponse_ConverseResponse

func (*ConverseResponse) GetError

func (x *ConverseResponse) GetError() *status.Status

func (*ConverseResponse) GetEventType

func (x *ConverseResponse) GetEventType() ConverseResponse_EventType

func (*ConverseResponse) GetResult

func (x *ConverseResponse) GetResult() *ConverseResult

func (*ConverseResponse) ProtoMessage

func (*ConverseResponse) ProtoMessage()

func (*ConverseResponse) ProtoReflect

func (x *ConverseResponse) ProtoReflect() protoreflect.Message

func (*ConverseResponse) Reset

func (x *ConverseResponse) Reset()

func (*ConverseResponse) String

func (x *ConverseResponse) String() string

type ConverseResponse_AudioOut

type ConverseResponse_AudioOut struct {
	// *Output-only* The audio containing the assistant's response to the query.
	AudioOut *AudioOut `protobuf:"bytes,3,opt,name=audio_out,json=audioOut,proto3,oneof"`
}

type ConverseResponse_Error

type ConverseResponse_Error struct {
	// *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status]
	// message that specifies the error for the operation. If an error occurs
	// during processing, this message will be set and there will be no further
	// messages sent.
	Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3,oneof"`
}

type ConverseResponse_EventType

type ConverseResponse_EventType int32

Indicates the type of event.

const (
	// No event specified.
	ConverseResponse_EVENT_TYPE_UNSPECIFIED ConverseResponse_EventType = 0
	// This event indicates that the server has detected the end of the user's
	// speech utterance and expects no additional speech. Therefore, the server
	// will not process additional audio (although it may subsequently return
	// additional results). The client should stop sending additional audio
	// data, half-close the gRPC connection, and wait for any additional results
	// until the server closes the gRPC connection.
	ConverseResponse_END_OF_UTTERANCE ConverseResponse_EventType = 1
)

func (ConverseResponse_EventType) Descriptor

func (ConverseResponse_EventType) Enum

func (ConverseResponse_EventType) EnumDescriptor deprecated

func (ConverseResponse_EventType) EnumDescriptor() ([]byte, []int)

Deprecated: Use ConverseResponse_EventType.Descriptor instead.

func (ConverseResponse_EventType) Number

func (ConverseResponse_EventType) String

func (ConverseResponse_EventType) Type

type ConverseResponse_EventType_

type ConverseResponse_EventType_ struct {
	// *Output-only* Indicates the type of event.
	EventType ConverseResponse_EventType `` /* 134-byte string literal not displayed */
}

type ConverseResponse_Result

type ConverseResponse_Result struct {
	// *Output-only* The semantic result for the user's spoken query.
	Result *ConverseResult `protobuf:"bytes,5,opt,name=result,proto3,oneof"`
}

type ConverseResult

type ConverseResult struct {

	// *Output-only* The recognized transcript of what the user said.
	SpokenRequestText string `protobuf:"bytes,1,opt,name=spoken_request_text,json=spokenRequestText,proto3" json:"spoken_request_text,omitempty"`
	// *Output-only* The text of the assistant's spoken response. This is only
	// returned for an IFTTT action.
	SpokenResponseText string `protobuf:"bytes,2,opt,name=spoken_response_text,json=spokenResponseText,proto3" json:"spoken_response_text,omitempty"`
	// *Output-only* State information for subsequent `ConverseRequest`. This
	// value should be saved in the client and returned in the
	// `conversation_state` with the next `ConverseRequest`. (The client does not
	// need to interpret or otherwise use this value.) There is no need to save
	// this information across device restarts.
	ConversationState []byte `protobuf:"bytes,3,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
	// *Output-only* Specifies the mode of the microphone after this `Converse`
	// RPC is processed.
	MicrophoneMode ConverseResult_MicrophoneMode `` /* 174-byte string literal not displayed */
	// *Output-only* Updated volume level. The value will be 0 or omitted
	// (indicating no change) unless a voice command such as "Increase the volume"
	// or "Set volume level 4" was recognized, in which case the value will be
	// between 1 and 100 (corresponding to the new volume level of 1% to 100%).
	// Typically, a client should use this volume level when playing the
	// `audio_out` data, and retain this value as the current volume level and
	// supply it in the `AudioOutConfig` of the next `ConverseRequest`. (Some
	// clients may also implement other ways to allow the current volume level to
	// be changed, for example, by providing a knob that the user can turn.)
	VolumePercentage int32 `protobuf:"varint,5,opt,name=volume_percentage,json=volumePercentage,proto3" json:"volume_percentage,omitempty"`
	// contains filtered or unexported fields
}

The semantic result for the user's spoken query.

func (*ConverseResult) Descriptor deprecated

func (*ConverseResult) Descriptor() ([]byte, []int)

Deprecated: Use ConverseResult.ProtoReflect.Descriptor instead.

func (*ConverseResult) GetConversationState

func (x *ConverseResult) GetConversationState() []byte

func (*ConverseResult) GetMicrophoneMode

func (x *ConverseResult) GetMicrophoneMode() ConverseResult_MicrophoneMode

func (*ConverseResult) GetSpokenRequestText

func (x *ConverseResult) GetSpokenRequestText() string

func (*ConverseResult) GetSpokenResponseText

func (x *ConverseResult) GetSpokenResponseText() string

func (*ConverseResult) GetVolumePercentage

func (x *ConverseResult) GetVolumePercentage() int32

func (*ConverseResult) ProtoMessage

func (*ConverseResult) ProtoMessage()

func (*ConverseResult) ProtoReflect

func (x *ConverseResult) ProtoReflect() protoreflect.Message

func (*ConverseResult) Reset

func (x *ConverseResult) Reset()

func (*ConverseResult) String

func (x *ConverseResult) String() string

type ConverseResult_MicrophoneMode

type ConverseResult_MicrophoneMode int32

Possible states of the microphone after a `Converse` RPC completes.

const (
	// No mode specified.
	ConverseResult_MICROPHONE_MODE_UNSPECIFIED ConverseResult_MicrophoneMode = 0
	// The service is not expecting a follow-on question from the user.
	// The microphone should remain off until the user re-activates it.
	ConverseResult_CLOSE_MICROPHONE ConverseResult_MicrophoneMode = 1
	// The service is expecting a follow-on question from the user. The
	// microphone should be re-opened when the `AudioOut` playback completes
	// (by starting a new `Converse` RPC call to send the new audio).
	ConverseResult_DIALOG_FOLLOW_ON ConverseResult_MicrophoneMode = 2
)

func (ConverseResult_MicrophoneMode) Descriptor

func (ConverseResult_MicrophoneMode) Enum

func (ConverseResult_MicrophoneMode) EnumDescriptor deprecated

func (ConverseResult_MicrophoneMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use ConverseResult_MicrophoneMode.Descriptor instead.

func (ConverseResult_MicrophoneMode) Number

func (ConverseResult_MicrophoneMode) String

func (ConverseResult_MicrophoneMode) Type

type ConverseState

type ConverseState struct {

	// *Required* The `conversation_state` value returned in the prior
	// `ConverseResponse`. Omit (do not set the field) if there was no prior
	// `ConverseResponse`. If there was a prior `ConverseResponse`, do not omit
	// this field; doing so will end that conversation (and this new request will
	// start a new conversation).
	ConversationState []byte `protobuf:"bytes,1,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
	// contains filtered or unexported fields
}

Provides information about the current dialog state.

func (*ConverseState) Descriptor deprecated

func (*ConverseState) Descriptor() ([]byte, []int)

Deprecated: Use ConverseState.ProtoReflect.Descriptor instead.

func (*ConverseState) GetConversationState

func (x *ConverseState) GetConversationState() []byte

func (*ConverseState) ProtoMessage

func (*ConverseState) ProtoMessage()

func (*ConverseState) ProtoReflect

func (x *ConverseState) ProtoReflect() protoreflect.Message

func (*ConverseState) Reset

func (x *ConverseState) Reset()

func (*ConverseState) String

func (x *ConverseState) String() string

type EmbeddedAssistantClient

type EmbeddedAssistantClient interface {
	// Initiates or continues a conversation with the embedded assistant service.
	// Each call performs one round-trip, sending an audio request to the service
	// and receiving the audio response. Uses bidirectional streaming to receive
	// results, such as the `END_OF_UTTERANCE` event, while sending audio.
	//
	// A conversation is one or more gRPC connections, each consisting of several
	// streamed requests and responses.
	// For example, the user says *Add to my shopping list* and the assistant
	// responds *What do you want to add?*. The sequence of streamed requests and
	// responses in the first gRPC message could be:
	//
	// *   ConverseRequest.config
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseResponse.event_type.END_OF_UTTERANCE
	// *   ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	//
	// The user then says *bagels* and the assistant responds
	// *OK, I've added bagels to your shopping list*. This is sent as another gRPC
	// connection call to the `Converse` method, again with streamed requests and
	// responses, such as:
	//
	// *   ConverseRequest.config
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseResponse.event_type.END_OF_UTTERANCE
	// *   ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	//
	// Although the precise order of responses is not guaranteed, sequential
	// ConverseResponse.audio_out messages will always contain sequential portions
	// of audio.
	Converse(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_ConverseClient, error)
}

EmbeddedAssistantClient is the client API for EmbeddedAssistant service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type EmbeddedAssistantServer

type EmbeddedAssistantServer interface {
	// Initiates or continues a conversation with the embedded assistant service.
	// Each call performs one round-trip, sending an audio request to the service
	// and receiving the audio response. Uses bidirectional streaming to receive
	// results, such as the `END_OF_UTTERANCE` event, while sending audio.
	//
	// A conversation is one or more gRPC connections, each consisting of several
	// streamed requests and responses.
	// For example, the user says *Add to my shopping list* and the assistant
	// responds *What do you want to add?*. The sequence of streamed requests and
	// responses in the first gRPC message could be:
	//
	// *   ConverseRequest.config
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseResponse.event_type.END_OF_UTTERANCE
	// *   ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	//
	// The user then says *bagels* and the assistant responds
	// *OK, I've added bagels to your shopping list*. This is sent as another gRPC
	// connection call to the `Converse` method, again with streamed requests and
	// responses, such as:
	//
	// *   ConverseRequest.config
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseRequest.audio_in
	// *   ConverseResponse.event_type.END_OF_UTTERANCE
	// *   ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	// *   ConverseResponse.audio_out
	//
	// Although the precise order of responses is not guaranteed, sequential
	// ConverseResponse.audio_out messages will always contain sequential portions
	// of audio.
	Converse(EmbeddedAssistant_ConverseServer) error
}

EmbeddedAssistantServer is the server API for EmbeddedAssistant service.

type EmbeddedAssistant_ConverseClient

type EmbeddedAssistant_ConverseClient interface {
	Send(*ConverseRequest) error
	Recv() (*ConverseResponse, error)
	grpc.ClientStream
}

type EmbeddedAssistant_ConverseServer

type EmbeddedAssistant_ConverseServer interface {
	Send(*ConverseResponse) error
	Recv() (*ConverseRequest, error)
	grpc.ServerStream
}

type UnimplementedEmbeddedAssistantServer

type UnimplementedEmbeddedAssistantServer struct {
}

UnimplementedEmbeddedAssistantServer can be embedded to have forward compatible implementations.

func (*UnimplementedEmbeddedAssistantServer) Converse

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL