Documentation
¶
Index ¶
- Variables
- func RegisterGenerativeServiceServer(s *grpc.Server, srv GenerativeServiceServer)
- func RegisterModelServiceServer(s *grpc.Server, srv ModelServiceServer)
- type BatchEmbedContentsRequest
- func (*BatchEmbedContentsRequest) Descriptor() ([]byte, []int)deprecated
- func (x *BatchEmbedContentsRequest) GetModel() string
- func (x *BatchEmbedContentsRequest) GetRequests() []*EmbedContentRequest
- func (*BatchEmbedContentsRequest) ProtoMessage()
- func (x *BatchEmbedContentsRequest) ProtoReflect() protoreflect.Message
- func (x *BatchEmbedContentsRequest) Reset()
- func (x *BatchEmbedContentsRequest) String() string
- type BatchEmbedContentsResponse
- func (*BatchEmbedContentsResponse) Descriptor() ([]byte, []int)deprecated
- func (x *BatchEmbedContentsResponse) GetEmbeddings() []*ContentEmbedding
- func (*BatchEmbedContentsResponse) ProtoMessage()
- func (x *BatchEmbedContentsResponse) ProtoReflect() protoreflect.Message
- func (x *BatchEmbedContentsResponse) Reset()
- func (x *BatchEmbedContentsResponse) String() string
- type Blob
- type Candidate
- func (*Candidate) Descriptor() ([]byte, []int)deprecated
- func (x *Candidate) GetAvgLogprobs() float64
- func (x *Candidate) GetCitationMetadata() *CitationMetadata
- func (x *Candidate) GetContent() *Content
- func (x *Candidate) GetFinishReason() Candidate_FinishReason
- func (x *Candidate) GetIndex() int32
- func (x *Candidate) GetLogprobsResult() *LogprobsResult
- func (x *Candidate) GetSafetyRatings() []*SafetyRating
- func (x *Candidate) GetTokenCount() int32
- func (*Candidate) ProtoMessage()
- func (x *Candidate) ProtoReflect() protoreflect.Message
- func (x *Candidate) Reset()
- func (x *Candidate) String() string
- type Candidate_FinishReason
- func (Candidate_FinishReason) Descriptor() protoreflect.EnumDescriptor
- func (x Candidate_FinishReason) Enum() *Candidate_FinishReason
- func (Candidate_FinishReason) EnumDescriptor() ([]byte, []int)deprecated
- func (x Candidate_FinishReason) Number() protoreflect.EnumNumber
- func (x Candidate_FinishReason) String() string
- func (Candidate_FinishReason) Type() protoreflect.EnumType
- type CitationMetadata
- func (*CitationMetadata) Descriptor() ([]byte, []int)deprecated
- func (x *CitationMetadata) GetCitationSources() []*CitationSource
- func (*CitationMetadata) ProtoMessage()
- func (x *CitationMetadata) ProtoReflect() protoreflect.Message
- func (x *CitationMetadata) Reset()
- func (x *CitationMetadata) String() string
- type CitationSource
- func (*CitationSource) Descriptor() ([]byte, []int)deprecated
- func (x *CitationSource) GetEndIndex() int32
- func (x *CitationSource) GetLicense() string
- func (x *CitationSource) GetStartIndex() int32
- func (x *CitationSource) GetUri() string
- func (*CitationSource) ProtoMessage()
- func (x *CitationSource) ProtoReflect() protoreflect.Message
- func (x *CitationSource) Reset()
- func (x *CitationSource) String() string
- type Content
- type ContentEmbedding
- type CountTokensRequest
- func (*CountTokensRequest) Descriptor() ([]byte, []int)deprecated
- func (x *CountTokensRequest) GetContents() []*Content
- func (x *CountTokensRequest) GetGenerateContentRequest() *GenerateContentRequest
- func (x *CountTokensRequest) GetModel() string
- func (*CountTokensRequest) ProtoMessage()
- func (x *CountTokensRequest) ProtoReflect() protoreflect.Message
- func (x *CountTokensRequest) Reset()
- func (x *CountTokensRequest) String() string
- type CountTokensResponse
- func (*CountTokensResponse) Descriptor() ([]byte, []int)deprecated
- func (x *CountTokensResponse) GetTotalTokens() int32
- func (*CountTokensResponse) ProtoMessage()
- func (x *CountTokensResponse) ProtoReflect() protoreflect.Message
- func (x *CountTokensResponse) Reset()
- func (x *CountTokensResponse) String() string
- type EmbedContentRequest
- func (*EmbedContentRequest) Descriptor() ([]byte, []int)deprecated
- func (x *EmbedContentRequest) GetContent() *Content
- func (x *EmbedContentRequest) GetModel() string
- func (x *EmbedContentRequest) GetOutputDimensionality() int32
- func (x *EmbedContentRequest) GetTaskType() TaskType
- func (x *EmbedContentRequest) GetTitle() string
- func (*EmbedContentRequest) ProtoMessage()
- func (x *EmbedContentRequest) ProtoReflect() protoreflect.Message
- func (x *EmbedContentRequest) Reset()
- func (x *EmbedContentRequest) String() string
- type EmbedContentResponse
- func (*EmbedContentResponse) Descriptor() ([]byte, []int)deprecated
- func (x *EmbedContentResponse) GetEmbedding() *ContentEmbedding
- func (*EmbedContentResponse) ProtoMessage()
- func (x *EmbedContentResponse) ProtoReflect() protoreflect.Message
- func (x *EmbedContentResponse) Reset()
- func (x *EmbedContentResponse) String() string
- type GenerateContentRequest
- func (*GenerateContentRequest) Descriptor() ([]byte, []int)deprecated
- func (x *GenerateContentRequest) GetContents() []*Content
- func (x *GenerateContentRequest) GetGenerationConfig() *GenerationConfig
- func (x *GenerateContentRequest) GetModel() string
- func (x *GenerateContentRequest) GetSafetySettings() []*SafetySetting
- func (*GenerateContentRequest) ProtoMessage()
- func (x *GenerateContentRequest) ProtoReflect() protoreflect.Message
- func (x *GenerateContentRequest) Reset()
- func (x *GenerateContentRequest) String() string
- type GenerateContentResponse
- func (*GenerateContentResponse) Descriptor() ([]byte, []int)deprecated
- func (x *GenerateContentResponse) GetCandidates() []*Candidate
- func (x *GenerateContentResponse) GetPromptFeedback() *GenerateContentResponse_PromptFeedback
- func (x *GenerateContentResponse) GetUsageMetadata() *GenerateContentResponse_UsageMetadata
- func (*GenerateContentResponse) ProtoMessage()
- func (x *GenerateContentResponse) ProtoReflect() protoreflect.Message
- func (x *GenerateContentResponse) Reset()
- func (x *GenerateContentResponse) String() string
- type GenerateContentResponse_PromptFeedback
- func (*GenerateContentResponse_PromptFeedback) Descriptor() ([]byte, []int)deprecated
- func (x *GenerateContentResponse_PromptFeedback) GetBlockReason() GenerateContentResponse_PromptFeedback_BlockReason
- func (x *GenerateContentResponse_PromptFeedback) GetSafetyRatings() []*SafetyRating
- func (*GenerateContentResponse_PromptFeedback) ProtoMessage()
- func (x *GenerateContentResponse_PromptFeedback) ProtoReflect() protoreflect.Message
- func (x *GenerateContentResponse_PromptFeedback) Reset()
- func (x *GenerateContentResponse_PromptFeedback) String() string
- type GenerateContentResponse_PromptFeedback_BlockReason
- func (GenerateContentResponse_PromptFeedback_BlockReason) Descriptor() protoreflect.EnumDescriptor
- func (x GenerateContentResponse_PromptFeedback_BlockReason) Enum() *GenerateContentResponse_PromptFeedback_BlockReason
- func (GenerateContentResponse_PromptFeedback_BlockReason) EnumDescriptor() ([]byte, []int)deprecated
- func (x GenerateContentResponse_PromptFeedback_BlockReason) Number() protoreflect.EnumNumber
- func (x GenerateContentResponse_PromptFeedback_BlockReason) String() string
- func (GenerateContentResponse_PromptFeedback_BlockReason) Type() protoreflect.EnumType
- type GenerateContentResponse_UsageMetadata
- func (*GenerateContentResponse_UsageMetadata) Descriptor() ([]byte, []int)deprecated
- func (x *GenerateContentResponse_UsageMetadata) GetCandidatesTokenCount() int32
- func (x *GenerateContentResponse_UsageMetadata) GetPromptTokenCount() int32
- func (x *GenerateContentResponse_UsageMetadata) GetTotalTokenCount() int32
- func (*GenerateContentResponse_UsageMetadata) ProtoMessage()
- func (x *GenerateContentResponse_UsageMetadata) ProtoReflect() protoreflect.Message
- func (x *GenerateContentResponse_UsageMetadata) Reset()
- func (x *GenerateContentResponse_UsageMetadata) String() string
- type GenerationConfig
- func (*GenerationConfig) Descriptor() ([]byte, []int)deprecated
- func (x *GenerationConfig) GetCandidateCount() int32
- func (x *GenerationConfig) GetFrequencyPenalty() float32
- func (x *GenerationConfig) GetLogprobs() int32
- func (x *GenerationConfig) GetMaxOutputTokens() int32
- func (x *GenerationConfig) GetPresencePenalty() float32
- func (x *GenerationConfig) GetResponseLogprobs() bool
- func (x *GenerationConfig) GetStopSequences() []string
- func (x *GenerationConfig) GetTemperature() float32
- func (x *GenerationConfig) GetTopK() int32
- func (x *GenerationConfig) GetTopP() float32
- func (*GenerationConfig) ProtoMessage()
- func (x *GenerationConfig) ProtoReflect() protoreflect.Message
- func (x *GenerationConfig) Reset()
- func (x *GenerationConfig) String() string
- type GenerativeServiceClient
- type GenerativeServiceServer
- type GenerativeService_StreamGenerateContentClient
- type GenerativeService_StreamGenerateContentServer
- type GetModelRequest
- type HarmCategory
- func (HarmCategory) Descriptor() protoreflect.EnumDescriptor
- func (x HarmCategory) Enum() *HarmCategory
- func (HarmCategory) EnumDescriptor() ([]byte, []int)deprecated
- func (x HarmCategory) Number() protoreflect.EnumNumber
- func (x HarmCategory) String() string
- func (HarmCategory) Type() protoreflect.EnumType
- type ListModelsRequest
- func (*ListModelsRequest) Descriptor() ([]byte, []int)deprecated
- func (x *ListModelsRequest) GetPageSize() int32
- func (x *ListModelsRequest) GetPageToken() string
- func (*ListModelsRequest) ProtoMessage()
- func (x *ListModelsRequest) ProtoReflect() protoreflect.Message
- func (x *ListModelsRequest) Reset()
- func (x *ListModelsRequest) String() string
- type ListModelsResponse
- func (*ListModelsResponse) Descriptor() ([]byte, []int)deprecated
- func (x *ListModelsResponse) GetModels() []*Model
- func (x *ListModelsResponse) GetNextPageToken() string
- func (*ListModelsResponse) ProtoMessage()
- func (x *ListModelsResponse) ProtoReflect() protoreflect.Message
- func (x *ListModelsResponse) Reset()
- func (x *ListModelsResponse) String() string
- type LogprobsResult
- func (*LogprobsResult) Descriptor() ([]byte, []int)deprecated
- func (x *LogprobsResult) GetChosenCandidates() []*LogprobsResult_Candidate
- func (x *LogprobsResult) GetTopCandidates() []*LogprobsResult_TopCandidates
- func (*LogprobsResult) ProtoMessage()
- func (x *LogprobsResult) ProtoReflect() protoreflect.Message
- func (x *LogprobsResult) Reset()
- func (x *LogprobsResult) String() string
- type LogprobsResult_Candidate
- func (*LogprobsResult_Candidate) Descriptor() ([]byte, []int)deprecated
- func (x *LogprobsResult_Candidate) GetLogProbability() float32
- func (x *LogprobsResult_Candidate) GetToken() string
- func (x *LogprobsResult_Candidate) GetTokenId() int32
- func (*LogprobsResult_Candidate) ProtoMessage()
- func (x *LogprobsResult_Candidate) ProtoReflect() protoreflect.Message
- func (x *LogprobsResult_Candidate) Reset()
- func (x *LogprobsResult_Candidate) String() string
- type LogprobsResult_TopCandidates
- func (*LogprobsResult_TopCandidates) Descriptor() ([]byte, []int)deprecated
- func (x *LogprobsResult_TopCandidates) GetCandidates() []*LogprobsResult_Candidate
- func (*LogprobsResult_TopCandidates) ProtoMessage()
- func (x *LogprobsResult_TopCandidates) ProtoReflect() protoreflect.Message
- func (x *LogprobsResult_TopCandidates) Reset()
- func (x *LogprobsResult_TopCandidates) String() string
- type Model
- func (*Model) Descriptor() ([]byte, []int)deprecated
- func (x *Model) GetBaseModelId() string
- func (x *Model) GetDescription() string
- func (x *Model) GetDisplayName() string
- func (x *Model) GetInputTokenLimit() int32
- func (x *Model) GetMaxTemperature() float32
- func (x *Model) GetName() string
- func (x *Model) GetOutputTokenLimit() int32
- func (x *Model) GetSupportedGenerationMethods() []string
- func (x *Model) GetTemperature() float32
- func (x *Model) GetTopK() int32
- func (x *Model) GetTopP() float32
- func (x *Model) GetVersion() string
- func (*Model) ProtoMessage()
- func (x *Model) ProtoReflect() protoreflect.Message
- func (x *Model) Reset()
- func (x *Model) String() string
- type ModelServiceClient
- type ModelServiceServer
- type Part
- type Part_InlineData
- type Part_Text
- type SafetyRating
- func (*SafetyRating) Descriptor() ([]byte, []int)deprecated
- func (x *SafetyRating) GetBlocked() bool
- func (x *SafetyRating) GetCategory() HarmCategory
- func (x *SafetyRating) GetProbability() SafetyRating_HarmProbability
- func (*SafetyRating) ProtoMessage()
- func (x *SafetyRating) ProtoReflect() protoreflect.Message
- func (x *SafetyRating) Reset()
- func (x *SafetyRating) String() string
- type SafetyRating_HarmProbability
- func (SafetyRating_HarmProbability) Descriptor() protoreflect.EnumDescriptor
- func (x SafetyRating_HarmProbability) Enum() *SafetyRating_HarmProbability
- func (SafetyRating_HarmProbability) EnumDescriptor() ([]byte, []int)deprecated
- func (x SafetyRating_HarmProbability) Number() protoreflect.EnumNumber
- func (x SafetyRating_HarmProbability) String() string
- func (SafetyRating_HarmProbability) Type() protoreflect.EnumType
- type SafetySetting
- func (*SafetySetting) Descriptor() ([]byte, []int)deprecated
- func (x *SafetySetting) GetCategory() HarmCategory
- func (x *SafetySetting) GetThreshold() SafetySetting_HarmBlockThreshold
- func (*SafetySetting) ProtoMessage()
- func (x *SafetySetting) ProtoReflect() protoreflect.Message
- func (x *SafetySetting) Reset()
- func (x *SafetySetting) String() string
- type SafetySetting_HarmBlockThreshold
- func (SafetySetting_HarmBlockThreshold) Descriptor() protoreflect.EnumDescriptor
- func (x SafetySetting_HarmBlockThreshold) Enum() *SafetySetting_HarmBlockThreshold
- func (SafetySetting_HarmBlockThreshold) EnumDescriptor() ([]byte, []int)deprecated
- func (x SafetySetting_HarmBlockThreshold) Number() protoreflect.EnumNumber
- func (x SafetySetting_HarmBlockThreshold) String() string
- func (SafetySetting_HarmBlockThreshold) Type() protoreflect.EnumType
- type TaskType
- type UnimplementedGenerativeServiceServer
- func (*UnimplementedGenerativeServiceServer) BatchEmbedContents(context.Context, *BatchEmbedContentsRequest) (*BatchEmbedContentsResponse, error)
- func (*UnimplementedGenerativeServiceServer) CountTokens(context.Context, *CountTokensRequest) (*CountTokensResponse, error)
- func (*UnimplementedGenerativeServiceServer) EmbedContent(context.Context, *EmbedContentRequest) (*EmbedContentResponse, error)
- func (*UnimplementedGenerativeServiceServer) GenerateContent(context.Context, *GenerateContentRequest) (*GenerateContentResponse, error)
- func (*UnimplementedGenerativeServiceServer) StreamGenerateContent(*GenerateContentRequest, GenerativeService_StreamGenerateContentServer) error
- type UnimplementedModelServiceServer
Constants ¶
This section is empty.
Variables ¶
var ( TaskType_name = map[int32]string{ 0: "TASK_TYPE_UNSPECIFIED", 1: "RETRIEVAL_QUERY", 2: "RETRIEVAL_DOCUMENT", 3: "SEMANTIC_SIMILARITY", 4: "CLASSIFICATION", 5: "CLUSTERING", 6: "QUESTION_ANSWERING", 7: "FACT_VERIFICATION", } TaskType_value = map[string]int32{ "TASK_TYPE_UNSPECIFIED": 0, "RETRIEVAL_QUERY": 1, "RETRIEVAL_DOCUMENT": 2, "SEMANTIC_SIMILARITY": 3, "CLASSIFICATION": 4, "CLUSTERING": 5, "QUESTION_ANSWERING": 6, "FACT_VERIFICATION": 7, } )
Enum value maps for TaskType.
var ( GenerateContentResponse_PromptFeedback_BlockReason_name = map[int32]string{ 0: "BLOCK_REASON_UNSPECIFIED", 1: "SAFETY", 2: "OTHER", 3: "BLOCKLIST", 4: "PROHIBITED_CONTENT", } GenerateContentResponse_PromptFeedback_BlockReason_value = map[string]int32{ "BLOCK_REASON_UNSPECIFIED": 0, "SAFETY": 1, "OTHER": 2, "BLOCKLIST": 3, "PROHIBITED_CONTENT": 4, } )
Enum value maps for GenerateContentResponse_PromptFeedback_BlockReason.
var ( Candidate_FinishReason_name = map[int32]string{ 0: "FINISH_REASON_UNSPECIFIED", 1: "STOP", 2: "MAX_TOKENS", 3: "SAFETY", 4: "RECITATION", 6: "LANGUAGE", 5: "OTHER", 7: "BLOCKLIST", 8: "PROHIBITED_CONTENT", 9: "SPII", 10: "MALFORMED_FUNCTION_CALL", } Candidate_FinishReason_value = map[string]int32{ "FINISH_REASON_UNSPECIFIED": 0, "STOP": 1, "MAX_TOKENS": 2, "SAFETY": 3, "RECITATION": 4, "LANGUAGE": 6, "OTHER": 5, "BLOCKLIST": 7, "PROHIBITED_CONTENT": 8, "SPII": 9, "MALFORMED_FUNCTION_CALL": 10, } )
Enum value maps for Candidate_FinishReason.
var ( HarmCategory_name = map[int32]string{ 0: "HARM_CATEGORY_UNSPECIFIED", 1: "HARM_CATEGORY_DEROGATORY", 2: "HARM_CATEGORY_TOXICITY", 3: "HARM_CATEGORY_VIOLENCE", 4: "HARM_CATEGORY_SEXUAL", 5: "HARM_CATEGORY_MEDICAL", 6: "HARM_CATEGORY_DANGEROUS", 7: "HARM_CATEGORY_HARASSMENT", 8: "HARM_CATEGORY_HATE_SPEECH", 9: "HARM_CATEGORY_SEXUALLY_EXPLICIT", 10: "HARM_CATEGORY_DANGEROUS_CONTENT", 11: "HARM_CATEGORY_CIVIC_INTEGRITY", } HarmCategory_value = map[string]int32{ "HARM_CATEGORY_UNSPECIFIED": 0, "HARM_CATEGORY_DEROGATORY": 1, "HARM_CATEGORY_TOXICITY": 2, "HARM_CATEGORY_VIOLENCE": 3, "HARM_CATEGORY_SEXUAL": 4, "HARM_CATEGORY_MEDICAL": 5, "HARM_CATEGORY_DANGEROUS": 6, "HARM_CATEGORY_HARASSMENT": 7, "HARM_CATEGORY_HATE_SPEECH": 8, "HARM_CATEGORY_SEXUALLY_EXPLICIT": 9, "HARM_CATEGORY_DANGEROUS_CONTENT": 10, "HARM_CATEGORY_CIVIC_INTEGRITY": 11, } )
Enum value maps for HarmCategory.
var ( SafetyRating_HarmProbability_name = map[int32]string{ 0: "HARM_PROBABILITY_UNSPECIFIED", 1: "NEGLIGIBLE", 2: "LOW", 3: "MEDIUM", 4: "HIGH", } SafetyRating_HarmProbability_value = map[string]int32{ "HARM_PROBABILITY_UNSPECIFIED": 0, "NEGLIGIBLE": 1, "LOW": 2, "MEDIUM": 3, "HIGH": 4, } )
Enum value maps for SafetyRating_HarmProbability.
var ( SafetySetting_HarmBlockThreshold_name = map[int32]string{ 0: "HARM_BLOCK_THRESHOLD_UNSPECIFIED", 1: "BLOCK_LOW_AND_ABOVE", 2: "BLOCK_MEDIUM_AND_ABOVE", 3: "BLOCK_ONLY_HIGH", 4: "BLOCK_NONE", 5: "OFF", } SafetySetting_HarmBlockThreshold_value = map[string]int32{ "HARM_BLOCK_THRESHOLD_UNSPECIFIED": 0, "BLOCK_LOW_AND_ABOVE": 1, "BLOCK_MEDIUM_AND_ABOVE": 2, "BLOCK_ONLY_HIGH": 3, "BLOCK_NONE": 4, "OFF": 5, } )
Enum value maps for SafetySetting_HarmBlockThreshold.
var File_google_ai_generativelanguage_v1_citation_proto protoreflect.FileDescriptor
var File_google_ai_generativelanguage_v1_content_proto protoreflect.FileDescriptor
var File_google_ai_generativelanguage_v1_generative_service_proto protoreflect.FileDescriptor
var File_google_ai_generativelanguage_v1_model_proto protoreflect.FileDescriptor
var File_google_ai_generativelanguage_v1_model_service_proto protoreflect.FileDescriptor
var File_google_ai_generativelanguage_v1_safety_proto protoreflect.FileDescriptor
Functions ¶
func RegisterGenerativeServiceServer ¶
func RegisterGenerativeServiceServer(s *grpc.Server, srv GenerativeServiceServer)
func RegisterModelServiceServer ¶
func RegisterModelServiceServer(s *grpc.Server, srv ModelServiceServer)
Types ¶
type BatchEmbedContentsRequest ¶
type BatchEmbedContentsRequest struct { // Required. The model's resource name. This serves as an ID for the Model to // use. // // This name should match a model name returned by the `ListModels` method. // // Format: `models/{model}` Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"` // Required. Embed requests for the batch. The model in each of these requests // must match the model specified `BatchEmbedContentsRequest.model`. Requests []*EmbedContentRequest `protobuf:"bytes,2,rep,name=requests,proto3" json:"requests,omitempty"` // contains filtered or unexported fields }
Batch request to get embeddings from the model for a list of prompts.
func (*BatchEmbedContentsRequest) Descriptor
deprecated
func (*BatchEmbedContentsRequest) Descriptor() ([]byte, []int)
Deprecated: Use BatchEmbedContentsRequest.ProtoReflect.Descriptor instead.
func (*BatchEmbedContentsRequest) GetModel ¶
func (x *BatchEmbedContentsRequest) GetModel() string
func (*BatchEmbedContentsRequest) GetRequests ¶
func (x *BatchEmbedContentsRequest) GetRequests() []*EmbedContentRequest
func (*BatchEmbedContentsRequest) ProtoMessage ¶
func (*BatchEmbedContentsRequest) ProtoMessage()
func (*BatchEmbedContentsRequest) ProtoReflect ¶
func (x *BatchEmbedContentsRequest) ProtoReflect() protoreflect.Message
func (*BatchEmbedContentsRequest) Reset ¶
func (x *BatchEmbedContentsRequest) Reset()
func (*BatchEmbedContentsRequest) String ¶
func (x *BatchEmbedContentsRequest) String() string
type BatchEmbedContentsResponse ¶
type BatchEmbedContentsResponse struct { // Output only. The embeddings for each request, in the same order as provided // in the batch request. Embeddings []*ContentEmbedding `protobuf:"bytes,1,rep,name=embeddings,proto3" json:"embeddings,omitempty"` // contains filtered or unexported fields }
The response to a `BatchEmbedContentsRequest`.
func (*BatchEmbedContentsResponse) Descriptor
deprecated
func (*BatchEmbedContentsResponse) Descriptor() ([]byte, []int)
Deprecated: Use BatchEmbedContentsResponse.ProtoReflect.Descriptor instead.
func (*BatchEmbedContentsResponse) GetEmbeddings ¶
func (x *BatchEmbedContentsResponse) GetEmbeddings() []*ContentEmbedding
func (*BatchEmbedContentsResponse) ProtoMessage ¶
func (*BatchEmbedContentsResponse) ProtoMessage()
func (*BatchEmbedContentsResponse) ProtoReflect ¶
func (x *BatchEmbedContentsResponse) ProtoReflect() protoreflect.Message
func (*BatchEmbedContentsResponse) Reset ¶
func (x *BatchEmbedContentsResponse) Reset()
func (*BatchEmbedContentsResponse) String ¶
func (x *BatchEmbedContentsResponse) String() string
type Blob ¶
type Blob struct { // The IANA standard MIME type of the source data. // Examples: // - image/png // - image/jpeg // // If an unsupported MIME type is provided, an error will be returned. For a // complete list of supported types, see [Supported file // formats](https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats). MimeType string `protobuf:"bytes,1,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` // Raw bytes for media formats. Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // contains filtered or unexported fields }
Raw media bytes.
Text should not be sent as raw bytes, use the 'text' field.
func (*Blob) Descriptor
deprecated
func (*Blob) GetMimeType ¶
func (*Blob) ProtoMessage ¶
func (*Blob) ProtoMessage()
func (*Blob) ProtoReflect ¶
func (x *Blob) ProtoReflect() protoreflect.Message
type Candidate ¶
type Candidate struct { // Output only. Index of the candidate in the list of response candidates. Index *int32 `protobuf:"varint,3,opt,name=index,proto3,oneof" json:"index,omitempty"` // Output only. Generated content returned from the model. Content *Content `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` // Optional. Output only. The reason why the model stopped generating tokens. // // If empty, the model has not stopped generating tokens. FinishReason Candidate_FinishReason `` /* 158-byte string literal not displayed */ // List of ratings for the safety of a response candidate. // // There is at most one rating per category. SafetyRatings []*SafetyRating `protobuf:"bytes,5,rep,name=safety_ratings,json=safetyRatings,proto3" json:"safety_ratings,omitempty"` // Output only. Citation information for model-generated candidate. // // This field may be populated with recitation information for any text // included in the `content`. These are passages that are "recited" from // copyrighted material in the foundational LLM's training data. CitationMetadata *CitationMetadata `protobuf:"bytes,6,opt,name=citation_metadata,json=citationMetadata,proto3" json:"citation_metadata,omitempty"` // Output only. Token count for this candidate. TokenCount int32 `protobuf:"varint,7,opt,name=token_count,json=tokenCount,proto3" json:"token_count,omitempty"` // Output only. AvgLogprobs float64 `protobuf:"fixed64,10,opt,name=avg_logprobs,json=avgLogprobs,proto3" json:"avg_logprobs,omitempty"` // Output only. Log-likelihood scores for the response tokens and top tokens LogprobsResult *LogprobsResult `protobuf:"bytes,11,opt,name=logprobs_result,json=logprobsResult,proto3" json:"logprobs_result,omitempty"` // contains filtered or unexported fields }
A response candidate generated from the model.
func (*Candidate) Descriptor
deprecated
func (*Candidate) GetAvgLogprobs ¶ added in v0.9.0
func (*Candidate) GetCitationMetadata ¶
func (x *Candidate) GetCitationMetadata() *CitationMetadata
func (*Candidate) GetContent ¶
func (*Candidate) GetFinishReason ¶
func (x *Candidate) GetFinishReason() Candidate_FinishReason
func (*Candidate) GetLogprobsResult ¶ added in v0.9.0
func (x *Candidate) GetLogprobsResult() *LogprobsResult
func (*Candidate) GetSafetyRatings ¶
func (x *Candidate) GetSafetyRatings() []*SafetyRating
func (*Candidate) GetTokenCount ¶
func (*Candidate) ProtoMessage ¶
func (*Candidate) ProtoMessage()
func (*Candidate) ProtoReflect ¶
func (x *Candidate) ProtoReflect() protoreflect.Message
type Candidate_FinishReason ¶
type Candidate_FinishReason int32
Defines the reason why the model stopped generating tokens.
const ( // Default value. This value is unused. Candidate_FINISH_REASON_UNSPECIFIED Candidate_FinishReason = 0 // Natural stop point of the model or provided stop sequence. Candidate_STOP Candidate_FinishReason = 1 // The maximum number of tokens as specified in the request was reached. Candidate_MAX_TOKENS Candidate_FinishReason = 2 // The response candidate content was flagged for safety reasons. Candidate_SAFETY Candidate_FinishReason = 3 // The response candidate content was flagged for recitation reasons. Candidate_RECITATION Candidate_FinishReason = 4 // The response candidate content was flagged for using an unsupported // language. Candidate_LANGUAGE Candidate_FinishReason = 6 // Unknown reason. Candidate_OTHER Candidate_FinishReason = 5 // Token generation stopped because the content contains forbidden terms. Candidate_BLOCKLIST Candidate_FinishReason = 7 // Token generation stopped for potentially containing prohibited content. Candidate_PROHIBITED_CONTENT Candidate_FinishReason = 8 // Token generation stopped because the content potentially contains // Sensitive Personally Identifiable Information (SPII). Candidate_SPII Candidate_FinishReason = 9 // The function call generated by the model is invalid. Candidate_MALFORMED_FUNCTION_CALL Candidate_FinishReason = 10 )
func (Candidate_FinishReason) Descriptor ¶
func (Candidate_FinishReason) Descriptor() protoreflect.EnumDescriptor
func (Candidate_FinishReason) Enum ¶
func (x Candidate_FinishReason) Enum() *Candidate_FinishReason
func (Candidate_FinishReason) EnumDescriptor
deprecated
func (Candidate_FinishReason) EnumDescriptor() ([]byte, []int)
Deprecated: Use Candidate_FinishReason.Descriptor instead.
func (Candidate_FinishReason) Number ¶
func (x Candidate_FinishReason) Number() protoreflect.EnumNumber
func (Candidate_FinishReason) String ¶
func (x Candidate_FinishReason) String() string
func (Candidate_FinishReason) Type ¶
func (Candidate_FinishReason) Type() protoreflect.EnumType
type CitationMetadata ¶
type CitationMetadata struct { // Citations to sources for a specific response. CitationSources []*CitationSource `protobuf:"bytes,1,rep,name=citation_sources,json=citationSources,proto3" json:"citation_sources,omitempty"` // contains filtered or unexported fields }
A collection of source attributions for a piece of content.
func (*CitationMetadata) Descriptor
deprecated
func (*CitationMetadata) Descriptor() ([]byte, []int)
Deprecated: Use CitationMetadata.ProtoReflect.Descriptor instead.
func (*CitationMetadata) GetCitationSources ¶
func (x *CitationMetadata) GetCitationSources() []*CitationSource
func (*CitationMetadata) ProtoMessage ¶
func (*CitationMetadata) ProtoMessage()
func (*CitationMetadata) ProtoReflect ¶
func (x *CitationMetadata) ProtoReflect() protoreflect.Message
func (*CitationMetadata) Reset ¶
func (x *CitationMetadata) Reset()
func (*CitationMetadata) String ¶
func (x *CitationMetadata) String() string
type CitationSource ¶
type CitationSource struct { // Optional. Start of segment of the response that is attributed to this // source. // // Index indicates the start of the segment, measured in bytes. StartIndex *int32 `protobuf:"varint,1,opt,name=start_index,json=startIndex,proto3,oneof" json:"start_index,omitempty"` // Optional. End of the attributed segment, exclusive. EndIndex *int32 `protobuf:"varint,2,opt,name=end_index,json=endIndex,proto3,oneof" json:"end_index,omitempty"` // Optional. URI that is attributed as a source for a portion of the text. Uri *string `protobuf:"bytes,3,opt,name=uri,proto3,oneof" json:"uri,omitempty"` // Optional. License for the GitHub project that is attributed as a source for // segment. // // License info is required for code citations. License *string `protobuf:"bytes,4,opt,name=license,proto3,oneof" json:"license,omitempty"` // contains filtered or unexported fields }
A citation to a source for a portion of a specific response.
func (*CitationSource) Descriptor
deprecated
func (*CitationSource) Descriptor() ([]byte, []int)
Deprecated: Use CitationSource.ProtoReflect.Descriptor instead.
func (*CitationSource) GetEndIndex ¶
func (x *CitationSource) GetEndIndex() int32
func (*CitationSource) GetLicense ¶
func (x *CitationSource) GetLicense() string
func (*CitationSource) GetStartIndex ¶
func (x *CitationSource) GetStartIndex() int32
func (*CitationSource) GetUri ¶
func (x *CitationSource) GetUri() string
func (*CitationSource) ProtoMessage ¶
func (*CitationSource) ProtoMessage()
func (*CitationSource) ProtoReflect ¶
func (x *CitationSource) ProtoReflect() protoreflect.Message
func (*CitationSource) Reset ¶
func (x *CitationSource) Reset()
func (*CitationSource) String ¶
func (x *CitationSource) String() string
type Content ¶
type Content struct { // Ordered `Parts` that constitute a single message. Parts may have different // MIME types. Parts []*Part `protobuf:"bytes,1,rep,name=parts,proto3" json:"parts,omitempty"` // Optional. The producer of the content. Must be either 'user' or 'model'. // // Useful to set for multi-turn conversations, otherwise can be left blank // or unset. Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` // contains filtered or unexported fields }
The base structured datatype containing multi-part content of a message.
A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn.
func (*Content) Descriptor
deprecated
func (*Content) ProtoMessage ¶
func (*Content) ProtoMessage()
func (*Content) ProtoReflect ¶
func (x *Content) ProtoReflect() protoreflect.Message
type ContentEmbedding ¶
type ContentEmbedding struct { // The embedding values. Values []float32 `protobuf:"fixed32,1,rep,packed,name=values,proto3" json:"values,omitempty"` // contains filtered or unexported fields }
A list of floats representing an embedding.
func (*ContentEmbedding) Descriptor
deprecated
func (*ContentEmbedding) Descriptor() ([]byte, []int)
Deprecated: Use ContentEmbedding.ProtoReflect.Descriptor instead.
func (*ContentEmbedding) GetValues ¶
func (x *ContentEmbedding) GetValues() []float32
func (*ContentEmbedding) ProtoMessage ¶
func (*ContentEmbedding) ProtoMessage()
func (*ContentEmbedding) ProtoReflect ¶
func (x *ContentEmbedding) ProtoReflect() protoreflect.Message
func (*ContentEmbedding) Reset ¶
func (x *ContentEmbedding) Reset()
func (*ContentEmbedding) String ¶
func (x *ContentEmbedding) String() string
type CountTokensRequest ¶
type CountTokensRequest struct { // Required. The model's resource name. This serves as an ID for the Model to // use. // // This name should match a model name returned by the `ListModels` method. // // Format: `models/{model}` Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"` // Optional. The input given to the model as a prompt. This field is ignored // when `generate_content_request` is set. Contents []*Content `protobuf:"bytes,2,rep,name=contents,proto3" json:"contents,omitempty"` // Optional. The overall input given to the `Model`. This includes the prompt // as well as other model steering information like [system // instructions](https://ai.google.dev/gemini-api/docs/system-instructions), // and/or function declarations for [function // calling](https://ai.google.dev/gemini-api/docs/function-calling). // `Model`s/`Content`s and `generate_content_request`s are mutually // exclusive. You can either send `Model` + `Content`s or a // `generate_content_request`, but never both. GenerateContentRequest *GenerateContentRequest `` /* 129-byte string literal not displayed */ // contains filtered or unexported fields }
Counts the number of tokens in the `prompt` sent to a model.
Models may tokenize text differently, so each model may return a different `token_count`.
func (*CountTokensRequest) Descriptor
deprecated
func (*CountTokensRequest) Descriptor() ([]byte, []int)
Deprecated: Use CountTokensRequest.ProtoReflect.Descriptor instead.
func (*CountTokensRequest) GetContents ¶
func (x *CountTokensRequest) GetContents() []*Content
func (*CountTokensRequest) GetGenerateContentRequest ¶ added in v0.6.0
func (x *CountTokensRequest) GetGenerateContentRequest() *GenerateContentRequest
func (*CountTokensRequest) GetModel ¶
func (x *CountTokensRequest) GetModel() string
func (*CountTokensRequest) ProtoMessage ¶
func (*CountTokensRequest) ProtoMessage()
func (*CountTokensRequest) ProtoReflect ¶
func (x *CountTokensRequest) ProtoReflect() protoreflect.Message
func (*CountTokensRequest) Reset ¶
func (x *CountTokensRequest) Reset()
func (*CountTokensRequest) String ¶
func (x *CountTokensRequest) String() string
type CountTokensResponse ¶
type CountTokensResponse struct { // The number of tokens that the `Model` tokenizes the `prompt` into. Always // non-negative. TotalTokens int32 `protobuf:"varint,1,opt,name=total_tokens,json=totalTokens,proto3" json:"total_tokens,omitempty"` // contains filtered or unexported fields }
A response from `CountTokens`.
It returns the model's `token_count` for the `prompt`.
func (*CountTokensResponse) Descriptor
deprecated
func (*CountTokensResponse) Descriptor() ([]byte, []int)
Deprecated: Use CountTokensResponse.ProtoReflect.Descriptor instead.
func (*CountTokensResponse) GetTotalTokens ¶
func (x *CountTokensResponse) GetTotalTokens() int32
func (*CountTokensResponse) ProtoMessage ¶
func (*CountTokensResponse) ProtoMessage()
func (*CountTokensResponse) ProtoReflect ¶
func (x *CountTokensResponse) ProtoReflect() protoreflect.Message
func (*CountTokensResponse) Reset ¶
func (x *CountTokensResponse) Reset()
func (*CountTokensResponse) String ¶
func (x *CountTokensResponse) String() string
type EmbedContentRequest ¶
type EmbedContentRequest struct { // Required. The model's resource name. This serves as an ID for the Model to // use. // // This name should match a model name returned by the `ListModels` method. // // Format: `models/{model}` Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"` // Required. The content to embed. Only the `parts.text` fields will be // counted. Content *Content `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` // Optional. Optional task type for which the embeddings will be used. Can // only be set for `models/embedding-001`. TaskType *TaskType `` /* 138-byte string literal not displayed */ // Optional. An optional title for the text. Only applicable when TaskType is // `RETRIEVAL_DOCUMENT`. // // Note: Specifying a `title` for `RETRIEVAL_DOCUMENT` provides better quality // embeddings for retrieval. Title *string `protobuf:"bytes,4,opt,name=title,proto3,oneof" json:"title,omitempty"` // Optional. Optional reduced dimension for the output embedding. If set, // excessive values in the output embedding are truncated from the end. // Supported by newer models since 2024 only. You cannot set this value if // using the earlier model (`models/embedding-001`). OutputDimensionality *int32 `` /* 128-byte string literal not displayed */ // contains filtered or unexported fields }
Request containing the `Content` for the model to embed.
func (*EmbedContentRequest) Descriptor
deprecated
func (*EmbedContentRequest) Descriptor() ([]byte, []int)
Deprecated: Use EmbedContentRequest.ProtoReflect.Descriptor instead.
func (*EmbedContentRequest) GetContent ¶
func (x *EmbedContentRequest) GetContent() *Content
func (*EmbedContentRequest) GetModel ¶
func (x *EmbedContentRequest) GetModel() string
func (*EmbedContentRequest) GetOutputDimensionality ¶ added in v0.4.0
func (x *EmbedContentRequest) GetOutputDimensionality() int32
func (*EmbedContentRequest) GetTaskType ¶
func (x *EmbedContentRequest) GetTaskType() TaskType
func (*EmbedContentRequest) GetTitle ¶
func (x *EmbedContentRequest) GetTitle() string
func (*EmbedContentRequest) ProtoMessage ¶
func (*EmbedContentRequest) ProtoMessage()
func (*EmbedContentRequest) ProtoReflect ¶
func (x *EmbedContentRequest) ProtoReflect() protoreflect.Message
func (*EmbedContentRequest) Reset ¶
func (x *EmbedContentRequest) Reset()
func (*EmbedContentRequest) String ¶
func (x *EmbedContentRequest) String() string
type EmbedContentResponse ¶
type EmbedContentResponse struct { // Output only. The embedding generated from the input content. Embedding *ContentEmbedding `protobuf:"bytes,1,opt,name=embedding,proto3" json:"embedding,omitempty"` // contains filtered or unexported fields }
The response to an `EmbedContentRequest`.
func (*EmbedContentResponse) Descriptor
deprecated
func (*EmbedContentResponse) Descriptor() ([]byte, []int)
Deprecated: Use EmbedContentResponse.ProtoReflect.Descriptor instead.
func (*EmbedContentResponse) GetEmbedding ¶
func (x *EmbedContentResponse) GetEmbedding() *ContentEmbedding
func (*EmbedContentResponse) ProtoMessage ¶
func (*EmbedContentResponse) ProtoMessage()
func (*EmbedContentResponse) ProtoReflect ¶
func (x *EmbedContentResponse) ProtoReflect() protoreflect.Message
func (*EmbedContentResponse) Reset ¶
func (x *EmbedContentResponse) Reset()
func (*EmbedContentResponse) String ¶
func (x *EmbedContentResponse) String() string
type GenerateContentRequest ¶
type GenerateContentRequest struct { // Required. The name of the `Model` to use for generating the completion. // // Format: `name=models/{model}`. Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"` // Required. The content of the current conversation with the model. // // For single-turn queries, this is a single instance. For multi-turn queries // like [chat](https://ai.google.dev/gemini-api/docs/text-generation#chat), // this is a repeated field that contains the conversation history and the // latest request. Contents []*Content `protobuf:"bytes,2,rep,name=contents,proto3" json:"contents,omitempty"` // Optional. A list of unique `SafetySetting` instances for blocking unsafe // content. // // This will be enforced on the `GenerateContentRequest.contents` and // `GenerateContentResponse.candidates`. There should not be more than one // setting for each `SafetyCategory` type. The API will block any contents and // responses that fail to meet the thresholds set by these settings. This list // overrides the default settings for each `SafetyCategory` specified in the // safety_settings. If there is no `SafetySetting` for a given // `SafetyCategory` provided in the list, the API will use the default safety // setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH, // HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, // HARM_CATEGORY_HARASSMENT are supported. Refer to the // [guide](https://ai.google.dev/gemini-api/docs/safety-settings) // for detailed information on available safety settings. Also refer to the // [Safety guidance](https://ai.google.dev/gemini-api/docs/safety-guidance) to // learn how to incorporate safety considerations in your AI applications. SafetySettings []*SafetySetting `protobuf:"bytes,3,rep,name=safety_settings,json=safetySettings,proto3" json:"safety_settings,omitempty"` // Optional. Configuration options for model generation and outputs. GenerationConfig *GenerationConfig `protobuf:"bytes,4,opt,name=generation_config,json=generationConfig,proto3,oneof" json:"generation_config,omitempty"` // contains filtered or unexported fields }
Request to generate a completion from the model.
func (*GenerateContentRequest) Descriptor
deprecated
func (*GenerateContentRequest) Descriptor() ([]byte, []int)
Deprecated: Use GenerateContentRequest.ProtoReflect.Descriptor instead.
func (*GenerateContentRequest) GetContents ¶
func (x *GenerateContentRequest) GetContents() []*Content
func (*GenerateContentRequest) GetGenerationConfig ¶
func (x *GenerateContentRequest) GetGenerationConfig() *GenerationConfig
func (*GenerateContentRequest) GetModel ¶
func (x *GenerateContentRequest) GetModel() string
func (*GenerateContentRequest) GetSafetySettings ¶
func (x *GenerateContentRequest) GetSafetySettings() []*SafetySetting
func (*GenerateContentRequest) ProtoMessage ¶
func (*GenerateContentRequest) ProtoMessage()
func (*GenerateContentRequest) ProtoReflect ¶
func (x *GenerateContentRequest) ProtoReflect() protoreflect.Message
func (*GenerateContentRequest) Reset ¶
func (x *GenerateContentRequest) Reset()
func (*GenerateContentRequest) String ¶
func (x *GenerateContentRequest) String() string
type GenerateContentResponse ¶
type GenerateContentResponse struct { // Candidate responses from the model. Candidates []*Candidate `protobuf:"bytes,1,rep,name=candidates,proto3" json:"candidates,omitempty"` // Returns the prompt's feedback related to the content filters. PromptFeedback *GenerateContentResponse_PromptFeedback `protobuf:"bytes,2,opt,name=prompt_feedback,json=promptFeedback,proto3" json:"prompt_feedback,omitempty"` // Output only. Metadata on the generation requests' token usage. UsageMetadata *GenerateContentResponse_UsageMetadata `protobuf:"bytes,3,opt,name=usage_metadata,json=usageMetadata,proto3" json:"usage_metadata,omitempty"` // contains filtered or unexported fields }
Response from the model supporting multiple candidate responses.
Safety ratings and content filtering are reported for both prompt in `GenerateContentResponse.prompt_feedback` and for each candidate in `finish_reason` and in `safety_ratings`. The API:
- Returns either all requested candidates or none of them
- Returns no candidates at all only if there was something wrong with the prompt (check `prompt_feedback`)
- Reports feedback on each candidate in `finish_reason` and `safety_ratings`.
func (*GenerateContentResponse) Descriptor
deprecated
func (*GenerateContentResponse) Descriptor() ([]byte, []int)
Deprecated: Use GenerateContentResponse.ProtoReflect.Descriptor instead.
func (*GenerateContentResponse) GetCandidates ¶
func (x *GenerateContentResponse) GetCandidates() []*Candidate
func (*GenerateContentResponse) GetPromptFeedback ¶
func (x *GenerateContentResponse) GetPromptFeedback() *GenerateContentResponse_PromptFeedback
func (*GenerateContentResponse) GetUsageMetadata ¶ added in v0.6.0
func (x *GenerateContentResponse) GetUsageMetadata() *GenerateContentResponse_UsageMetadata
func (*GenerateContentResponse) ProtoMessage ¶
func (*GenerateContentResponse) ProtoMessage()
func (*GenerateContentResponse) ProtoReflect ¶
func (x *GenerateContentResponse) ProtoReflect() protoreflect.Message
func (*GenerateContentResponse) Reset ¶
func (x *GenerateContentResponse) Reset()
func (*GenerateContentResponse) String ¶
func (x *GenerateContentResponse) String() string
type GenerateContentResponse_PromptFeedback ¶
type GenerateContentResponse_PromptFeedback struct { // Optional. If set, the prompt was blocked and no candidates are returned. // Rephrase the prompt. BlockReason GenerateContentResponse_PromptFeedback_BlockReason `` /* 183-byte string literal not displayed */ // Ratings for safety of the prompt. // There is at most one rating per category. SafetyRatings []*SafetyRating `protobuf:"bytes,2,rep,name=safety_ratings,json=safetyRatings,proto3" json:"safety_ratings,omitempty"` // contains filtered or unexported fields }
A set of the feedback metadata the prompt specified in `GenerateContentRequest.content`.
func (*GenerateContentResponse_PromptFeedback) Descriptor
deprecated
func (*GenerateContentResponse_PromptFeedback) Descriptor() ([]byte, []int)
Deprecated: Use GenerateContentResponse_PromptFeedback.ProtoReflect.Descriptor instead.
func (*GenerateContentResponse_PromptFeedback) GetBlockReason ¶
func (x *GenerateContentResponse_PromptFeedback) GetBlockReason() GenerateContentResponse_PromptFeedback_BlockReason
func (*GenerateContentResponse_PromptFeedback) GetSafetyRatings ¶
func (x *GenerateContentResponse_PromptFeedback) GetSafetyRatings() []*SafetyRating
func (*GenerateContentResponse_PromptFeedback) ProtoMessage ¶
func (*GenerateContentResponse_PromptFeedback) ProtoMessage()
func (*GenerateContentResponse_PromptFeedback) ProtoReflect ¶
func (x *GenerateContentResponse_PromptFeedback) ProtoReflect() protoreflect.Message
func (*GenerateContentResponse_PromptFeedback) Reset ¶
func (x *GenerateContentResponse_PromptFeedback) Reset()
func (*GenerateContentResponse_PromptFeedback) String ¶
func (x *GenerateContentResponse_PromptFeedback) String() string
type GenerateContentResponse_PromptFeedback_BlockReason ¶
type GenerateContentResponse_PromptFeedback_BlockReason int32
Specifies the reason why the prompt was blocked.
const ( // Default value. This value is unused. GenerateContentResponse_PromptFeedback_BLOCK_REASON_UNSPECIFIED GenerateContentResponse_PromptFeedback_BlockReason = 0 // Prompt was blocked due to safety reasons. Inspect `safety_ratings` // to understand which safety category blocked it. GenerateContentResponse_PromptFeedback_SAFETY GenerateContentResponse_PromptFeedback_BlockReason = 1 // Prompt was blocked due to unknown reasons. GenerateContentResponse_PromptFeedback_OTHER GenerateContentResponse_PromptFeedback_BlockReason = 2 // Prompt was blocked due to the terms which are included from the // terminology blocklist. GenerateContentResponse_PromptFeedback_BLOCKLIST GenerateContentResponse_PromptFeedback_BlockReason = 3 // Prompt was blocked due to prohibited content. GenerateContentResponse_PromptFeedback_PROHIBITED_CONTENT GenerateContentResponse_PromptFeedback_BlockReason = 4 )
func (GenerateContentResponse_PromptFeedback_BlockReason) Descriptor ¶
func (GenerateContentResponse_PromptFeedback_BlockReason) Descriptor() protoreflect.EnumDescriptor
func (GenerateContentResponse_PromptFeedback_BlockReason) EnumDescriptor
deprecated
func (GenerateContentResponse_PromptFeedback_BlockReason) EnumDescriptor() ([]byte, []int)
Deprecated: Use GenerateContentResponse_PromptFeedback_BlockReason.Descriptor instead.
func (GenerateContentResponse_PromptFeedback_BlockReason) Number ¶
func (x GenerateContentResponse_PromptFeedback_BlockReason) Number() protoreflect.EnumNumber
func (GenerateContentResponse_PromptFeedback_BlockReason) String ¶
func (x GenerateContentResponse_PromptFeedback_BlockReason) String() string
type GenerateContentResponse_UsageMetadata ¶ added in v0.6.0
type GenerateContentResponse_UsageMetadata struct { // Number of tokens in the prompt. When `cached_content` is set, this is // still the total effective prompt size meaning this includes the number of // tokens in the cached content. PromptTokenCount int32 `protobuf:"varint,1,opt,name=prompt_token_count,json=promptTokenCount,proto3" json:"prompt_token_count,omitempty"` // Total number of tokens across all the generated response candidates. CandidatesTokenCount int32 `protobuf:"varint,2,opt,name=candidates_token_count,json=candidatesTokenCount,proto3" json:"candidates_token_count,omitempty"` // Total token count for the generation request (prompt + response // candidates). TotalTokenCount int32 `protobuf:"varint,3,opt,name=total_token_count,json=totalTokenCount,proto3" json:"total_token_count,omitempty"` // contains filtered or unexported fields }
Metadata on the generation request's token usage.
func (*GenerateContentResponse_UsageMetadata) Descriptor
deprecated
added in
v0.6.0
func (*GenerateContentResponse_UsageMetadata) Descriptor() ([]byte, []int)
Deprecated: Use GenerateContentResponse_UsageMetadata.ProtoReflect.Descriptor instead.
func (*GenerateContentResponse_UsageMetadata) GetCandidatesTokenCount ¶ added in v0.6.0
func (x *GenerateContentResponse_UsageMetadata) GetCandidatesTokenCount() int32
func (*GenerateContentResponse_UsageMetadata) GetPromptTokenCount ¶ added in v0.6.0
func (x *GenerateContentResponse_UsageMetadata) GetPromptTokenCount() int32
func (*GenerateContentResponse_UsageMetadata) GetTotalTokenCount ¶ added in v0.6.0
func (x *GenerateContentResponse_UsageMetadata) GetTotalTokenCount() int32
func (*GenerateContentResponse_UsageMetadata) ProtoMessage ¶ added in v0.6.0
func (*GenerateContentResponse_UsageMetadata) ProtoMessage()
func (*GenerateContentResponse_UsageMetadata) ProtoReflect ¶ added in v0.6.0
func (x *GenerateContentResponse_UsageMetadata) ProtoReflect() protoreflect.Message
func (*GenerateContentResponse_UsageMetadata) Reset ¶ added in v0.6.0
func (x *GenerateContentResponse_UsageMetadata) Reset()
func (*GenerateContentResponse_UsageMetadata) String ¶ added in v0.6.0
func (x *GenerateContentResponse_UsageMetadata) String() string
type GenerationConfig ¶
type GenerationConfig struct { // Optional. Number of generated responses to return. // // Currently, this value can only be set to 1. If unset, this will default // to 1. CandidateCount *int32 `protobuf:"varint,1,opt,name=candidate_count,json=candidateCount,proto3,oneof" json:"candidate_count,omitempty"` // Optional. The set of character sequences (up to 5) that will stop output // generation. If specified, the API will stop at the first appearance of a // `stop_sequence`. The stop sequence will not be included as part of the // response. StopSequences []string `protobuf:"bytes,2,rep,name=stop_sequences,json=stopSequences,proto3" json:"stop_sequences,omitempty"` // Optional. The maximum number of tokens to include in a response candidate. // // Note: The default value varies by model, see the `Model.output_token_limit` // attribute of the `Model` returned from the `getModel` function. MaxOutputTokens *int32 `protobuf:"varint,4,opt,name=max_output_tokens,json=maxOutputTokens,proto3,oneof" json:"max_output_tokens,omitempty"` // Optional. Controls the randomness of the output. // // Note: The default value varies by model, see the `Model.temperature` // attribute of the `Model` returned from the `getModel` function. // // Values can range from [0.0, 2.0]. Temperature *float32 `protobuf:"fixed32,5,opt,name=temperature,proto3,oneof" json:"temperature,omitempty"` // Optional. The maximum cumulative probability of tokens to consider when // sampling. // // The model uses combined Top-k and Top-p (nucleus) sampling. // // Tokens are sorted based on their assigned probabilities so that only the // most likely tokens are considered. Top-k sampling directly limits the // maximum number of tokens to consider, while Nucleus sampling limits the // number of tokens based on the cumulative probability. // // Note: The default value varies by `Model` and is specified by // the`Model.top_p` attribute returned from the `getModel` function. An empty // `top_k` attribute indicates that the model doesn't apply top-k sampling // and doesn't allow setting `top_k` on requests. TopP *float32 `protobuf:"fixed32,6,opt,name=top_p,json=topP,proto3,oneof" json:"top_p,omitempty"` // Optional. The maximum number of tokens to consider when sampling. // // Gemini models use Top-p (nucleus) sampling or a combination of Top-k and // nucleus sampling. Top-k sampling considers the set of `top_k` most probable // tokens. Models running with nucleus sampling don't allow top_k setting. // // Note: The default value varies by `Model` and is specified by // the`Model.top_p` attribute returned from the `getModel` function. An empty // `top_k` attribute indicates that the model doesn't apply top-k sampling // and doesn't allow setting `top_k` on requests. TopK *int32 `protobuf:"varint,7,opt,name=top_k,json=topK,proto3,oneof" json:"top_k,omitempty"` // Optional. Presence penalty applied to the next token's logprobs if the // token has already been seen in the response. // // This penalty is binary on/off and not dependant on the number of times the // token is used (after the first). Use // [frequency_penalty][google.ai.generativelanguage.v1.GenerationConfig.frequency_penalty] // for a penalty that increases with each use. // // A positive penalty will discourage the use of tokens that have already // been used in the response, increasing the vocabulary. // // A negative penalty will encourage the use of tokens that have already been // used in the response, decreasing the vocabulary. PresencePenalty *float32 `protobuf:"fixed32,15,opt,name=presence_penalty,json=presencePenalty,proto3,oneof" json:"presence_penalty,omitempty"` // Optional. Frequency penalty applied to the next token's logprobs, // multiplied by the number of times each token has been seen in the respponse // so far. // // A positive penalty will discourage the use of tokens that have already // been used, proportional to the number of times the token has been used: // The more a token is used, the more dificult it is for the model to use // that token again increasing the vocabulary of responses. // // Caution: A _negative_ penalty will encourage the model to reuse tokens // proportional to the number of times the token has been used. Small // negative values will reduce the vocabulary of a response. Larger negative // values will cause the model to start repeating a common token until it // hits the // [max_output_tokens][google.ai.generativelanguage.v1.GenerationConfig.max_output_tokens] // limit: "...the the the the the...". FrequencyPenalty *float32 `protobuf:"fixed32,16,opt,name=frequency_penalty,json=frequencyPenalty,proto3,oneof" json:"frequency_penalty,omitempty"` // Optional. If true, export the logprobs results in response. ResponseLogprobs *bool `protobuf:"varint,17,opt,name=response_logprobs,json=responseLogprobs,proto3,oneof" json:"response_logprobs,omitempty"` // Optional. Only valid if // [response_logprobs=True][google.ai.generativelanguage.v1.GenerationConfig.response_logprobs]. // This sets the number of top logprobs to return at each decoding step in the // [Candidate.logprobs_result][google.ai.generativelanguage.v1.Candidate.logprobs_result]. Logprobs *int32 `protobuf:"varint,18,opt,name=logprobs,proto3,oneof" json:"logprobs,omitempty"` // contains filtered or unexported fields }
Configuration options for model generation and outputs. Not all parameters are configurable for every model.
func (*GenerationConfig) Descriptor
deprecated
func (*GenerationConfig) Descriptor() ([]byte, []int)
Deprecated: Use GenerationConfig.ProtoReflect.Descriptor instead.
func (*GenerationConfig) GetCandidateCount ¶
func (x *GenerationConfig) GetCandidateCount() int32
func (*GenerationConfig) GetFrequencyPenalty ¶ added in v0.9.0
func (x *GenerationConfig) GetFrequencyPenalty() float32
func (*GenerationConfig) GetLogprobs ¶ added in v0.9.0
func (x *GenerationConfig) GetLogprobs() int32
func (*GenerationConfig) GetMaxOutputTokens ¶
func (x *GenerationConfig) GetMaxOutputTokens() int32
func (*GenerationConfig) GetPresencePenalty ¶ added in v0.9.0
func (x *GenerationConfig) GetPresencePenalty() float32
func (*GenerationConfig) GetResponseLogprobs ¶ added in v0.9.0
func (x *GenerationConfig) GetResponseLogprobs() bool
func (*GenerationConfig) GetStopSequences ¶
func (x *GenerationConfig) GetStopSequences() []string
func (*GenerationConfig) GetTemperature ¶
func (x *GenerationConfig) GetTemperature() float32
func (*GenerationConfig) GetTopK ¶
func (x *GenerationConfig) GetTopK() int32
func (*GenerationConfig) GetTopP ¶
func (x *GenerationConfig) GetTopP() float32
func (*GenerationConfig) ProtoMessage ¶
func (*GenerationConfig) ProtoMessage()
func (*GenerationConfig) ProtoReflect ¶
func (x *GenerationConfig) ProtoReflect() protoreflect.Message
func (*GenerationConfig) Reset ¶
func (x *GenerationConfig) Reset()
func (*GenerationConfig) String ¶
func (x *GenerationConfig) String() string
type GenerativeServiceClient ¶
type GenerativeServiceClient interface { // Generates a model response given an input `GenerateContentRequest`. // Refer to the [text generation // guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed // usage information. Input capabilities differ between models, including // tuned models. Refer to the [model // guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning // guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details. GenerateContent(ctx context.Context, in *GenerateContentRequest, opts ...grpc.CallOption) (*GenerateContentResponse, error) // Generates a [streamed // response](https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream) // from the model given an input `GenerateContentRequest`. StreamGenerateContent(ctx context.Context, in *GenerateContentRequest, opts ...grpc.CallOption) (GenerativeService_StreamGenerateContentClient, error) // Generates a text embedding vector from the input `Content` using the // specified [Gemini Embedding // model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding). EmbedContent(ctx context.Context, in *EmbedContentRequest, opts ...grpc.CallOption) (*EmbedContentResponse, error) // Generates multiple embedding vectors from the input `Content` which // consists of a batch of strings represented as `EmbedContentRequest` // objects. BatchEmbedContents(ctx context.Context, in *BatchEmbedContentsRequest, opts ...grpc.CallOption) (*BatchEmbedContentsResponse, error) // Runs a model's tokenizer on input `Content` and returns the token count. // Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens) // to learn more about tokens. CountTokens(ctx context.Context, in *CountTokensRequest, opts ...grpc.CallOption) (*CountTokensResponse, error) }
GenerativeServiceClient is the client API for GenerativeService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewGenerativeServiceClient ¶
func NewGenerativeServiceClient(cc grpc.ClientConnInterface) GenerativeServiceClient
type GenerativeServiceServer ¶
type GenerativeServiceServer interface { // Generates a model response given an input `GenerateContentRequest`. // Refer to the [text generation // guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed // usage information. Input capabilities differ between models, including // tuned models. Refer to the [model // guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning // guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details. GenerateContent(context.Context, *GenerateContentRequest) (*GenerateContentResponse, error) // Generates a [streamed // response](https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream) // from the model given an input `GenerateContentRequest`. StreamGenerateContent(*GenerateContentRequest, GenerativeService_StreamGenerateContentServer) error // Generates a text embedding vector from the input `Content` using the // specified [Gemini Embedding // model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding). EmbedContent(context.Context, *EmbedContentRequest) (*EmbedContentResponse, error) // Generates multiple embedding vectors from the input `Content` which // consists of a batch of strings represented as `EmbedContentRequest` // objects. BatchEmbedContents(context.Context, *BatchEmbedContentsRequest) (*BatchEmbedContentsResponse, error) // Runs a model's tokenizer on input `Content` and returns the token count. // Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens) // to learn more about tokens. CountTokens(context.Context, *CountTokensRequest) (*CountTokensResponse, error) }
GenerativeServiceServer is the server API for GenerativeService service.
type GenerativeService_StreamGenerateContentClient ¶
type GenerativeService_StreamGenerateContentClient interface { Recv() (*GenerateContentResponse, error) grpc.ClientStream }
type GenerativeService_StreamGenerateContentServer ¶
type GenerativeService_StreamGenerateContentServer interface { Send(*GenerateContentResponse) error grpc.ServerStream }
type GetModelRequest ¶
type GetModelRequest struct { // Required. The resource name of the model. // // This name should match a model name returned by the `ListModels` method. // // Format: `models/{model}` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // contains filtered or unexported fields }
Request for getting information about a specific Model.
func (*GetModelRequest) Descriptor
deprecated
func (*GetModelRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetModelRequest.ProtoReflect.Descriptor instead.
func (*GetModelRequest) GetName ¶
func (x *GetModelRequest) GetName() string
func (*GetModelRequest) ProtoMessage ¶
func (*GetModelRequest) ProtoMessage()
func (*GetModelRequest) ProtoReflect ¶
func (x *GetModelRequest) ProtoReflect() protoreflect.Message
func (*GetModelRequest) Reset ¶
func (x *GetModelRequest) Reset()
func (*GetModelRequest) String ¶
func (x *GetModelRequest) String() string
type HarmCategory ¶
type HarmCategory int32
The category of a rating.
These categories cover various kinds of harms that developers may wish to adjust.
const ( // Category is unspecified. HarmCategory_HARM_CATEGORY_UNSPECIFIED HarmCategory = 0 // **PaLM** - Negative or harmful comments targeting identity and/or protected // attribute. HarmCategory_HARM_CATEGORY_DEROGATORY HarmCategory = 1 // **PaLM** - Content that is rude, disrespectful, or profane. HarmCategory_HARM_CATEGORY_TOXICITY HarmCategory = 2 // **PaLM** - Describes scenarios depicting violence against an individual or // group, or general descriptions of gore. HarmCategory_HARM_CATEGORY_VIOLENCE HarmCategory = 3 // **PaLM** - Contains references to sexual acts or other lewd content. HarmCategory_HARM_CATEGORY_SEXUAL HarmCategory = 4 // **PaLM** - Promotes unchecked medical advice. HarmCategory_HARM_CATEGORY_MEDICAL HarmCategory = 5 // **PaLM** - Dangerous content that promotes, facilitates, or encourages // harmful acts. HarmCategory_HARM_CATEGORY_DANGEROUS HarmCategory = 6 // **Gemini** - Harassment content. HarmCategory_HARM_CATEGORY_HARASSMENT HarmCategory = 7 // **Gemini** - Hate speech and content. HarmCategory_HARM_CATEGORY_HATE_SPEECH HarmCategory = 8 // **Gemini** - Sexually explicit content. HarmCategory_HARM_CATEGORY_SEXUALLY_EXPLICIT HarmCategory = 9 // **Gemini** - Dangerous content. HarmCategory_HARM_CATEGORY_DANGEROUS_CONTENT HarmCategory = 10 // **Gemini** - Content that may be used to harm civic integrity. HarmCategory_HARM_CATEGORY_CIVIC_INTEGRITY HarmCategory = 11 )
func (HarmCategory) Descriptor ¶
func (HarmCategory) Descriptor() protoreflect.EnumDescriptor
func (HarmCategory) Enum ¶
func (x HarmCategory) Enum() *HarmCategory
func (HarmCategory) EnumDescriptor
deprecated
func (HarmCategory) EnumDescriptor() ([]byte, []int)
Deprecated: Use HarmCategory.Descriptor instead.
func (HarmCategory) Number ¶
func (x HarmCategory) Number() protoreflect.EnumNumber
func (HarmCategory) String ¶
func (x HarmCategory) String() string
func (HarmCategory) Type ¶
func (HarmCategory) Type() protoreflect.EnumType
type ListModelsRequest ¶
type ListModelsRequest struct { // The maximum number of `Models` to return (per page). // // If unspecified, 50 models will be returned per page. // This method returns at most 1000 models per page, even if you pass a larger // page_size. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // A page token, received from a previous `ListModels` call. // // Provide the `page_token` returned by one request as an argument to the next // request to retrieve the next page. // // When paginating, all other parameters provided to `ListModels` must match // the call that provided the page token. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` // contains filtered or unexported fields }
Request for listing all Models.
func (*ListModelsRequest) Descriptor
deprecated
func (*ListModelsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListModelsRequest.ProtoReflect.Descriptor instead.
func (*ListModelsRequest) GetPageSize ¶
func (x *ListModelsRequest) GetPageSize() int32
func (*ListModelsRequest) GetPageToken ¶
func (x *ListModelsRequest) GetPageToken() string
func (*ListModelsRequest) ProtoMessage ¶
func (*ListModelsRequest) ProtoMessage()
func (*ListModelsRequest) ProtoReflect ¶
func (x *ListModelsRequest) ProtoReflect() protoreflect.Message
func (*ListModelsRequest) Reset ¶
func (x *ListModelsRequest) Reset()
func (*ListModelsRequest) String ¶
func (x *ListModelsRequest) String() string
type ListModelsResponse ¶
type ListModelsResponse struct { // The returned Models. Models []*Model `protobuf:"bytes,1,rep,name=models,proto3" json:"models,omitempty"` // A token, which can be sent as `page_token` to retrieve the next page. // // If this field is omitted, there are no more pages. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` // contains filtered or unexported fields }
Response from `ListModel` containing a paginated list of Models.
func (*ListModelsResponse) Descriptor
deprecated
func (*ListModelsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListModelsResponse.ProtoReflect.Descriptor instead.
func (*ListModelsResponse) GetModels ¶
func (x *ListModelsResponse) GetModels() []*Model
func (*ListModelsResponse) GetNextPageToken ¶
func (x *ListModelsResponse) GetNextPageToken() string
func (*ListModelsResponse) ProtoMessage ¶
func (*ListModelsResponse) ProtoMessage()
func (*ListModelsResponse) ProtoReflect ¶
func (x *ListModelsResponse) ProtoReflect() protoreflect.Message
func (*ListModelsResponse) Reset ¶
func (x *ListModelsResponse) Reset()
func (*ListModelsResponse) String ¶
func (x *ListModelsResponse) String() string
type LogprobsResult ¶ added in v0.9.0
type LogprobsResult struct { // Length = total number of decoding steps. TopCandidates []*LogprobsResult_TopCandidates `protobuf:"bytes,1,rep,name=top_candidates,json=topCandidates,proto3" json:"top_candidates,omitempty"` // Length = total number of decoding steps. // The chosen candidates may or may not be in top_candidates. ChosenCandidates []*LogprobsResult_Candidate `protobuf:"bytes,2,rep,name=chosen_candidates,json=chosenCandidates,proto3" json:"chosen_candidates,omitempty"` // contains filtered or unexported fields }
Logprobs Result
func (*LogprobsResult) Descriptor
deprecated
added in
v0.9.0
func (*LogprobsResult) Descriptor() ([]byte, []int)
Deprecated: Use LogprobsResult.ProtoReflect.Descriptor instead.
func (*LogprobsResult) GetChosenCandidates ¶ added in v0.9.0
func (x *LogprobsResult) GetChosenCandidates() []*LogprobsResult_Candidate
func (*LogprobsResult) GetTopCandidates ¶ added in v0.9.0
func (x *LogprobsResult) GetTopCandidates() []*LogprobsResult_TopCandidates
func (*LogprobsResult) ProtoMessage ¶ added in v0.9.0
func (*LogprobsResult) ProtoMessage()
func (*LogprobsResult) ProtoReflect ¶ added in v0.9.0
func (x *LogprobsResult) ProtoReflect() protoreflect.Message
func (*LogprobsResult) Reset ¶ added in v0.9.0
func (x *LogprobsResult) Reset()
func (*LogprobsResult) String ¶ added in v0.9.0
func (x *LogprobsResult) String() string
type LogprobsResult_Candidate ¶ added in v0.9.0
type LogprobsResult_Candidate struct { // The candidate’s token string value. Token *string `protobuf:"bytes,1,opt,name=token,proto3,oneof" json:"token,omitempty"` // The candidate’s token id value. TokenId *int32 `protobuf:"varint,3,opt,name=token_id,json=tokenId,proto3,oneof" json:"token_id,omitempty"` // The candidate's log probability. LogProbability *float32 `protobuf:"fixed32,2,opt,name=log_probability,json=logProbability,proto3,oneof" json:"log_probability,omitempty"` // contains filtered or unexported fields }
Candidate for the logprobs token and score.
func (*LogprobsResult_Candidate) Descriptor
deprecated
added in
v0.9.0
func (*LogprobsResult_Candidate) Descriptor() ([]byte, []int)
Deprecated: Use LogprobsResult_Candidate.ProtoReflect.Descriptor instead.
func (*LogprobsResult_Candidate) GetLogProbability ¶ added in v0.9.0
func (x *LogprobsResult_Candidate) GetLogProbability() float32
func (*LogprobsResult_Candidate) GetToken ¶ added in v0.9.0
func (x *LogprobsResult_Candidate) GetToken() string
func (*LogprobsResult_Candidate) GetTokenId ¶ added in v0.9.0
func (x *LogprobsResult_Candidate) GetTokenId() int32
func (*LogprobsResult_Candidate) ProtoMessage ¶ added in v0.9.0
func (*LogprobsResult_Candidate) ProtoMessage()
func (*LogprobsResult_Candidate) ProtoReflect ¶ added in v0.9.0
func (x *LogprobsResult_Candidate) ProtoReflect() protoreflect.Message
func (*LogprobsResult_Candidate) Reset ¶ added in v0.9.0
func (x *LogprobsResult_Candidate) Reset()
func (*LogprobsResult_Candidate) String ¶ added in v0.9.0
func (x *LogprobsResult_Candidate) String() string
type LogprobsResult_TopCandidates ¶ added in v0.9.0
type LogprobsResult_TopCandidates struct { // Sorted by log probability in descending order. Candidates []*LogprobsResult_Candidate `protobuf:"bytes,1,rep,name=candidates,proto3" json:"candidates,omitempty"` // contains filtered or unexported fields }
Candidates with top log probabilities at each decoding step.
func (*LogprobsResult_TopCandidates) Descriptor
deprecated
added in
v0.9.0
func (*LogprobsResult_TopCandidates) Descriptor() ([]byte, []int)
Deprecated: Use LogprobsResult_TopCandidates.ProtoReflect.Descriptor instead.
func (*LogprobsResult_TopCandidates) GetCandidates ¶ added in v0.9.0
func (x *LogprobsResult_TopCandidates) GetCandidates() []*LogprobsResult_Candidate
func (*LogprobsResult_TopCandidates) ProtoMessage ¶ added in v0.9.0
func (*LogprobsResult_TopCandidates) ProtoMessage()
func (*LogprobsResult_TopCandidates) ProtoReflect ¶ added in v0.9.0
func (x *LogprobsResult_TopCandidates) ProtoReflect() protoreflect.Message
func (*LogprobsResult_TopCandidates) Reset ¶ added in v0.9.0
func (x *LogprobsResult_TopCandidates) Reset()
func (*LogprobsResult_TopCandidates) String ¶ added in v0.9.0
func (x *LogprobsResult_TopCandidates) String() string
type Model ¶
type Model struct { // Required. The resource name of the `Model`. Refer to [Model // variants](https://ai.google.dev/gemini-api/docs/models/gemini#model-variations) // for all allowed values. // // Format: `models/{model}` with a `{model}` naming convention of: // // * "{base_model_id}-{version}" // // Examples: // // * `models/gemini-1.5-flash-001` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The name of the base model, pass this to the generation request. // // Examples: // // * `gemini-1.5-flash` BaseModelId string `protobuf:"bytes,2,opt,name=base_model_id,json=baseModelId,proto3" json:"base_model_id,omitempty"` // Required. The version number of the model. // // This represents the major version (`1.0` or `1.5`) Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` // The human-readable name of the model. E.g. "Gemini 1.5 Flash". // // The name can be up to 128 characters long and can consist of any UTF-8 // characters. DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` // A short description of the model. Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` // Maximum number of input tokens allowed for this model. InputTokenLimit int32 `protobuf:"varint,6,opt,name=input_token_limit,json=inputTokenLimit,proto3" json:"input_token_limit,omitempty"` // Maximum number of output tokens available for this model. OutputTokenLimit int32 `protobuf:"varint,7,opt,name=output_token_limit,json=outputTokenLimit,proto3" json:"output_token_limit,omitempty"` // The model's supported generation methods. // // The corresponding API method names are defined as Pascal case // strings, such as `generateMessage` and `generateContent`. SupportedGenerationMethods []string `` /* 141-byte string literal not displayed */ // Controls the randomness of the output. // // Values can range over `[0.0,max_temperature]`, inclusive. A higher value // will produce responses that are more varied, while a value closer to `0.0` // will typically result in less surprising responses from the model. // This value specifies default to be used by the backend while making the // call to the model. Temperature *float32 `protobuf:"fixed32,9,opt,name=temperature,proto3,oneof" json:"temperature,omitempty"` // The maximum temperature this model can use. MaxTemperature *float32 `protobuf:"fixed32,13,opt,name=max_temperature,json=maxTemperature,proto3,oneof" json:"max_temperature,omitempty"` // For [Nucleus // sampling](https://ai.google.dev/gemini-api/docs/prompting-strategies#top-p). // // Nucleus sampling considers the smallest set of tokens whose probability // sum is at least `top_p`. // This value specifies default to be used by the backend while making the // call to the model. TopP *float32 `protobuf:"fixed32,10,opt,name=top_p,json=topP,proto3,oneof" json:"top_p,omitempty"` // For Top-k sampling. // // Top-k sampling considers the set of `top_k` most probable tokens. // This value specifies default to be used by the backend while making the // call to the model. // If empty, indicates the model doesn't use top-k sampling, and `top_k` isn't // allowed as a generation parameter. TopK *int32 `protobuf:"varint,11,opt,name=top_k,json=topK,proto3,oneof" json:"top_k,omitempty"` // contains filtered or unexported fields }
Information about a Generative Language Model.
func (*Model) Descriptor
deprecated
func (*Model) GetBaseModelId ¶
func (*Model) GetDescription ¶
func (*Model) GetDisplayName ¶
func (*Model) GetInputTokenLimit ¶
func (*Model) GetMaxTemperature ¶ added in v0.9.0
func (*Model) GetOutputTokenLimit ¶
func (*Model) GetSupportedGenerationMethods ¶
func (*Model) GetTemperature ¶
func (*Model) GetVersion ¶
func (*Model) ProtoMessage ¶
func (*Model) ProtoMessage()
func (*Model) ProtoReflect ¶
func (x *Model) ProtoReflect() protoreflect.Message
type ModelServiceClient ¶
type ModelServiceClient interface { // Gets information about a specific `Model` such as its version number, token // limits, // [parameters](https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters) // and other metadata. Refer to the [Gemini models // guide](https://ai.google.dev/gemini-api/docs/models/gemini) for detailed // model information. GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) // Lists the [`Model`s](https://ai.google.dev/gemini-api/docs/models/gemini) // available through the Gemini API. ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) }
ModelServiceClient is the client API for ModelService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewModelServiceClient ¶
func NewModelServiceClient(cc grpc.ClientConnInterface) ModelServiceClient
type ModelServiceServer ¶
type ModelServiceServer interface { // Gets information about a specific `Model` such as its version number, token // limits, // [parameters](https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters) // and other metadata. Refer to the [Gemini models // guide](https://ai.google.dev/gemini-api/docs/models/gemini) for detailed // model information. GetModel(context.Context, *GetModelRequest) (*Model, error) // Lists the [`Model`s](https://ai.google.dev/gemini-api/docs/models/gemini) // available through the Gemini API. ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error) }
ModelServiceServer is the server API for ModelService service.
type Part ¶
type Part struct { // Types that are assignable to Data: // // *Part_Text // *Part_InlineData Data isPart_Data `protobuf_oneof:"data"` // contains filtered or unexported fields }
A datatype containing media that is part of a multi-part `Content` message.
A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`.
A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if the `inline_data` field is filled with raw bytes.
func (*Part) Descriptor
deprecated
func (*Part) GetInlineData ¶
func (*Part) ProtoMessage ¶
func (*Part) ProtoMessage()
func (*Part) ProtoReflect ¶
func (x *Part) ProtoReflect() protoreflect.Message
type Part_InlineData ¶
type Part_InlineData struct { // Inline media bytes. InlineData *Blob `protobuf:"bytes,3,opt,name=inline_data,json=inlineData,proto3,oneof"` }
type Part_Text ¶
type Part_Text struct { // Inline text. Text string `protobuf:"bytes,2,opt,name=text,proto3,oneof"` }
type SafetyRating ¶
type SafetyRating struct { // Required. The category for this rating. Category HarmCategory `protobuf:"varint,3,opt,name=category,proto3,enum=google.ai.generativelanguage.v1.HarmCategory" json:"category,omitempty"` // Required. The probability of harm for this content. Probability SafetyRating_HarmProbability `` /* 142-byte string literal not displayed */ // Was this content blocked because of this rating? Blocked bool `protobuf:"varint,5,opt,name=blocked,proto3" json:"blocked,omitempty"` // contains filtered or unexported fields }
Safety rating for a piece of content.
The safety rating contains the category of harm and the harm probability level in that category for a piece of content. Content is classified for safety across a number of harm categories and the probability of the harm classification is included here.
func (*SafetyRating) Descriptor
deprecated
func (*SafetyRating) Descriptor() ([]byte, []int)
Deprecated: Use SafetyRating.ProtoReflect.Descriptor instead.
func (*SafetyRating) GetBlocked ¶
func (x *SafetyRating) GetBlocked() bool
func (*SafetyRating) GetCategory ¶
func (x *SafetyRating) GetCategory() HarmCategory
func (*SafetyRating) GetProbability ¶
func (x *SafetyRating) GetProbability() SafetyRating_HarmProbability
func (*SafetyRating) ProtoMessage ¶
func (*SafetyRating) ProtoMessage()
func (*SafetyRating) ProtoReflect ¶
func (x *SafetyRating) ProtoReflect() protoreflect.Message
func (*SafetyRating) Reset ¶
func (x *SafetyRating) Reset()
func (*SafetyRating) String ¶
func (x *SafetyRating) String() string
type SafetyRating_HarmProbability ¶
type SafetyRating_HarmProbability int32
The probability that a piece of content is harmful.
The classification system gives the probability of the content being unsafe. This does not indicate the severity of harm for a piece of content.
const ( // Probability is unspecified. SafetyRating_HARM_PROBABILITY_UNSPECIFIED SafetyRating_HarmProbability = 0 // Content has a negligible chance of being unsafe. SafetyRating_NEGLIGIBLE SafetyRating_HarmProbability = 1 // Content has a low chance of being unsafe. SafetyRating_LOW SafetyRating_HarmProbability = 2 // Content has a medium chance of being unsafe. SafetyRating_MEDIUM SafetyRating_HarmProbability = 3 // Content has a high chance of being unsafe. SafetyRating_HIGH SafetyRating_HarmProbability = 4 )
func (SafetyRating_HarmProbability) Descriptor ¶
func (SafetyRating_HarmProbability) Descriptor() protoreflect.EnumDescriptor
func (SafetyRating_HarmProbability) Enum ¶
func (x SafetyRating_HarmProbability) Enum() *SafetyRating_HarmProbability
func (SafetyRating_HarmProbability) EnumDescriptor
deprecated
func (SafetyRating_HarmProbability) EnumDescriptor() ([]byte, []int)
Deprecated: Use SafetyRating_HarmProbability.Descriptor instead.
func (SafetyRating_HarmProbability) Number ¶
func (x SafetyRating_HarmProbability) Number() protoreflect.EnumNumber
func (SafetyRating_HarmProbability) String ¶
func (x SafetyRating_HarmProbability) String() string
func (SafetyRating_HarmProbability) Type ¶
func (SafetyRating_HarmProbability) Type() protoreflect.EnumType
type SafetySetting ¶
type SafetySetting struct { // Required. The category for this setting. Category HarmCategory `protobuf:"varint,3,opt,name=category,proto3,enum=google.ai.generativelanguage.v1.HarmCategory" json:"category,omitempty"` // Required. Controls the probability threshold at which harm is blocked. Threshold SafetySetting_HarmBlockThreshold `` /* 142-byte string literal not displayed */ // contains filtered or unexported fields }
Safety setting, affecting the safety-blocking behavior.
Passing a safety setting for a category changes the allowed probability that content is blocked.
func (*SafetySetting) Descriptor
deprecated
func (*SafetySetting) Descriptor() ([]byte, []int)
Deprecated: Use SafetySetting.ProtoReflect.Descriptor instead.
func (*SafetySetting) GetCategory ¶
func (x *SafetySetting) GetCategory() HarmCategory
func (*SafetySetting) GetThreshold ¶
func (x *SafetySetting) GetThreshold() SafetySetting_HarmBlockThreshold
func (*SafetySetting) ProtoMessage ¶
func (*SafetySetting) ProtoMessage()
func (*SafetySetting) ProtoReflect ¶
func (x *SafetySetting) ProtoReflect() protoreflect.Message
func (*SafetySetting) Reset ¶
func (x *SafetySetting) Reset()
func (*SafetySetting) String ¶
func (x *SafetySetting) String() string
type SafetySetting_HarmBlockThreshold ¶
type SafetySetting_HarmBlockThreshold int32
Block at and beyond a specified harm probability.
const ( // Threshold is unspecified. SafetySetting_HARM_BLOCK_THRESHOLD_UNSPECIFIED SafetySetting_HarmBlockThreshold = 0 // Content with NEGLIGIBLE will be allowed. SafetySetting_BLOCK_LOW_AND_ABOVE SafetySetting_HarmBlockThreshold = 1 // Content with NEGLIGIBLE and LOW will be allowed. SafetySetting_BLOCK_MEDIUM_AND_ABOVE SafetySetting_HarmBlockThreshold = 2 // Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed. SafetySetting_BLOCK_ONLY_HIGH SafetySetting_HarmBlockThreshold = 3 // All content will be allowed. SafetySetting_BLOCK_NONE SafetySetting_HarmBlockThreshold = 4 // Turn off the safety filter. SafetySetting_OFF SafetySetting_HarmBlockThreshold = 5 )
func (SafetySetting_HarmBlockThreshold) Descriptor ¶
func (SafetySetting_HarmBlockThreshold) Descriptor() protoreflect.EnumDescriptor
func (SafetySetting_HarmBlockThreshold) Enum ¶
func (x SafetySetting_HarmBlockThreshold) Enum() *SafetySetting_HarmBlockThreshold
func (SafetySetting_HarmBlockThreshold) EnumDescriptor
deprecated
func (SafetySetting_HarmBlockThreshold) EnumDescriptor() ([]byte, []int)
Deprecated: Use SafetySetting_HarmBlockThreshold.Descriptor instead.
func (SafetySetting_HarmBlockThreshold) Number ¶
func (x SafetySetting_HarmBlockThreshold) Number() protoreflect.EnumNumber
func (SafetySetting_HarmBlockThreshold) String ¶
func (x SafetySetting_HarmBlockThreshold) String() string
func (SafetySetting_HarmBlockThreshold) Type ¶
func (SafetySetting_HarmBlockThreshold) Type() protoreflect.EnumType
type TaskType ¶
type TaskType int32
Type of task for which the embedding will be used.
const ( // Unset value, which will default to one of the other enum values. TaskType_TASK_TYPE_UNSPECIFIED TaskType = 0 // Specifies the given text is a query in a search/retrieval setting. TaskType_RETRIEVAL_QUERY TaskType = 1 // Specifies the given text is a document from the corpus being searched. TaskType_RETRIEVAL_DOCUMENT TaskType = 2 // Specifies the given text will be used for STS. TaskType_SEMANTIC_SIMILARITY TaskType = 3 // Specifies that the given text will be classified. TaskType_CLASSIFICATION TaskType = 4 // Specifies that the embeddings will be used for clustering. TaskType_CLUSTERING TaskType = 5 // Specifies that the given text will be used for question answering. TaskType_QUESTION_ANSWERING TaskType = 6 // Specifies that the given text will be used for fact verification. TaskType_FACT_VERIFICATION TaskType = 7 )
func (TaskType) Descriptor ¶
func (TaskType) Descriptor() protoreflect.EnumDescriptor
func (TaskType) EnumDescriptor
deprecated
func (TaskType) Number ¶
func (x TaskType) Number() protoreflect.EnumNumber
func (TaskType) Type ¶
func (TaskType) Type() protoreflect.EnumType
type UnimplementedGenerativeServiceServer ¶
type UnimplementedGenerativeServiceServer struct { }
UnimplementedGenerativeServiceServer can be embedded to have forward compatible implementations.
func (*UnimplementedGenerativeServiceServer) BatchEmbedContents ¶
func (*UnimplementedGenerativeServiceServer) BatchEmbedContents(context.Context, *BatchEmbedContentsRequest) (*BatchEmbedContentsResponse, error)
func (*UnimplementedGenerativeServiceServer) CountTokens ¶
func (*UnimplementedGenerativeServiceServer) CountTokens(context.Context, *CountTokensRequest) (*CountTokensResponse, error)
func (*UnimplementedGenerativeServiceServer) EmbedContent ¶
func (*UnimplementedGenerativeServiceServer) EmbedContent(context.Context, *EmbedContentRequest) (*EmbedContentResponse, error)
func (*UnimplementedGenerativeServiceServer) GenerateContent ¶
func (*UnimplementedGenerativeServiceServer) GenerateContent(context.Context, *GenerateContentRequest) (*GenerateContentResponse, error)
func (*UnimplementedGenerativeServiceServer) StreamGenerateContent ¶
func (*UnimplementedGenerativeServiceServer) StreamGenerateContent(*GenerateContentRequest, GenerativeService_StreamGenerateContentServer) error
type UnimplementedModelServiceServer ¶
type UnimplementedModelServiceServer struct { }
UnimplementedModelServiceServer can be embedded to have forward compatible implementations.
func (*UnimplementedModelServiceServer) GetModel ¶
func (*UnimplementedModelServiceServer) GetModel(context.Context, *GetModelRequest) (*Model, error)
func (*UnimplementedModelServiceServer) ListModels ¶
func (*UnimplementedModelServiceServer) ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error)