Documentation
¶
Index ¶
- Constants
- Variables
- type APIError
- type Base64Embedding
- type BotActionUsage
- type BotChatCompletionRequest
- type BotChatCompletionResponse
- type BotChatCompletionStreamResponse
- type BotChatResultReference
- type BotCoverImage
- type BotModelUsage
- type BotUsage
- type ChatCompletionChoice
- type ChatCompletionMessage
- type ChatCompletionMessageContent
- type ChatCompletionMessageContentPart
- type ChatCompletionMessageContentPartType
- type ChatCompletionRequest
- type ChatCompletionResponse
- type ChatCompletionResponseChoicesElemModerationHitType
- type ChatCompletionStreamChoice
- type ChatCompletionStreamChoiceDelta
- type ChatCompletionStreamResponse
- type ChatMessageImageURL
- type ChatRequest
- type CompletionTokensDetails
- type Content
- type ContentGenerationContentItemType
- type ContentGenerationError
- type ContentGenerationUsage
- type ContextChatCompletionRequest
- type ContextMode
- type CreateChatCompletionRequest
- type CreateContentGenerationContentItem
- type CreateContentGenerationTaskRequest
- type CreateContentGenerationTaskResponse
- type CreateContextRequest
- type CreateContextResponse
- type DeleteContentGenerationTaskRequest
- type Embedding
- type EmbeddingEncodingFormat
- type EmbeddingRequest
- type EmbeddingRequestConverter
- type EmbeddingRequestStrings
- type EmbeddingRequestTokens
- type EmbeddingResponse
- type EmbeddingResponseBase64
- type ErrorResponse
- type FinishReason
- type FunctionCall
- type FunctionDefinedeprecated
- type FunctionDefinition
- type GetContentGenerationTaskRequest
- type GetContentGenerationTaskResponse
- type HttpHeader
- type ImageURL
- type ImageURLDetail
- type ListContentGenerationTaskItem
- type ListContentGenerationTasksFilter
- type ListContentGenerationTasksRequest
- type ListContentGenerationTasksResponse
- type LogProb
- type LogProbs
- type MultiModalEmbeddingInputType
- type MultiModalEmbeddingRequest
- type MultiModalEmbeddingResponseBase64
- type MultimodalEmbedding
- type MultimodalEmbeddingImageURL
- type MultimodalEmbeddingInput
- type MultimodalEmbeddingPromptTokensDetail
- type MultimodalEmbeddingResponse
- type MultimodalEmbeddingUsage
- type PromptTokensDetail
- type RawResponse
- type RequestError
- type Response
- type ResponseFormat
- type ResponseFormatType
- type StreamOptions
- type Tokenization
- type TokenizationRequest
- type TokenizationRequestConverter
- type TokenizationRequestString
- type TokenizationRequestStrings
- type TokenizationResponse
- type Tool
- type ToolCall
- type ToolChoice
- type ToolChoiceFunction
- type ToolType
- type TopLogProbs
- type TruncationStrategy
- type TruncationStrategyType
- type Usage
Constants ¶
const ( ChatMessageRoleSystem = "system" ChatMessageRoleUser = "user" ChatMessageRoleAssistant = "assistant" ChatMessageRoleTool = "tool" )
const ( ToolChoiceStringTypeAuto = "auto" ToolChoiceStringTypeNone = "none" ToolChoiceStringTypeRequired = "required" )
const ( ClientRequestHeader = "X-Client-Request-Id" RetryAfterHeader = "Retry-After" DefaultMandatoryRefreshTimeout = 10 * 60 // 10 min DefaultAdvisoryRefreshTimeout = 30 * 60 // 30 min DefaultStsTimeout = 7 * 24 * 60 * 60 // 7 days InitialRetryDelay = 0.5 MaxRetryDelay = 8.0 ErrorRetryBaseDelay = 500 * time.Millisecond ErrorRetryMaxDelay = 8 * time.Second )
const ( StatusSucceeded = "succeeded" StatusCancelled = "cancelled" StatusFailed = "failed" StatusRunning = "running" StatusQueued = "queued" )
Variables ¶
var ( ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages") ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") //nolint:lll ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously") ErrBodyWithoutEndpoint = errors.New("can't fetch endpoint sts token without endpoint") ErrBodyWithoutBot = errors.New("can't fetch bot sts token without bot id") ErrAKSKNotSupported = errors.New("ak&sk authentication is currently not supported for this method, please use api key instead") )
Functions ¶
This section is empty.
Types ¶
type APIError ¶
type Base64Embedding ¶
type Base64Embedding struct { Object string `json:"object"` Embedding base64String `json:"embedding"` Index int `json:"index"` }
Base64Embedding is a container for base64 encoded embeddings.
type BotActionUsage ¶ added in v1.0.146
type BotChatCompletionRequest ¶ added in v1.0.146
type BotChatCompletionRequest struct { BotId string `json:"bot_id,omitempty"` Model string `json:"model"` Messages []*ChatCompletionMessage `json:"messages"` MaxTokens int `json:"max_tokens,omitempty"` Temperature float32 `json:"temperature,omitempty"` TopP float32 `json:"top_p,omitempty"` Stream bool `json:"stream,omitempty"` Stop []string `json:"stop,omitempty"` FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` LogitBias map[string]int `json:"logit_bias,omitempty"` LogProbs bool `json:"logprobs,omitempty"` TopLogProbs int `json:"top_logprobs,omitempty"` User string `json:"user,omitempty"` FunctionCall interface{} `json:"function_call,omitempty"` Tools []*Tool `json:"tools,omitempty"` ToolChoice interface{} `json:"tool_choice,omitempty"` StreamOptions *StreamOptions `json:"stream_options,omitempty"` PresencePenalty float32 `json:"presence_penalty,omitempty"` RepetitionPenalty float32 `json:"repetition_penalty,omitempty"` N int `json:"n,omitempty"` ResponseFormat *ResponseFormat `json:"response_format,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
type BotChatCompletionResponse ¶ added in v1.0.146
type BotChatCompletionResponse struct { ChatCompletionResponse Metadata map[string]interface{} `json:"metadata,omitempty"` BotUsage *BotUsage `json:"bot_usage,omitempty"` References []*BotChatResultReference `json:"references,omitempty"` }
type BotChatCompletionStreamResponse ¶ added in v1.0.146
type BotChatCompletionStreamResponse struct { ChatCompletionStreamResponse Metadata map[string]interface{} `json:"metadata,omitempty"` BotUsage *BotUsage `json:"bot_usage,omitempty"` References []*BotChatResultReference `json:"references,omitempty"` }
type BotChatResultReference ¶ added in v1.0.146
type BotChatResultReference struct { Url string `json:"url,omitempty"` LogoUrl string `json:"logo_url,omitempty"` MobileUrl string `json:"mobile_url,omitempty"` SiteName string `json:"site_name,omitempty"` Title string `json:"title,omitempty"` CoverImage *BotCoverImage `json:"cover_image,omitempty"` Summary string `json:"summary,omitempty"` PublishTime string `json:"publish_time,omitempty"` CollectionName string `json:"collection_name,omitempty"` Project string `json:"project,omitempty"` DocId string `json:"doc_id,omitempty"` DocName string `json:"doc_name,omitempty"` DocType string `json:"doc_type,omitempty"` DocTitle string `json:"doc_title,omitempty"` ChunkId string `json:"chunk_id,omitempty"` ChunkTitle string `json:"chunk_title,omitempty"` PageNums string `json:"page_nums,omitempty"` OriginTextTokenLen int `json:"origin_text_token_len,omitempty"` FileName string `json:"file_name,omitempty"` Extra map[string]interface{} `json:"extra,omitempty"` }
type BotCoverImage ¶ added in v1.0.146
type BotModelUsage ¶ added in v1.0.146
type BotUsage ¶ added in v1.0.146
type BotUsage struct { ModelUsage []*BotModelUsage `json:"model_usage,omitempty"` ActionUsage []*BotActionUsage `json:"action_usage,omitempty"` }
type ChatCompletionChoice ¶
type ChatCompletionChoice struct { Index int `json:"index"` Message ChatCompletionMessage `json:"message"` // FinishReason // stop: API returned complete message, // or a message terminated by one of the stop sequences provided via the stop parameter // length: Incomplete model output due to max_tokens parameter or token limit // function_call: The model decided to call a function // content_filter: Omitted content due to a flag from our content filters // null: API response still in progress or incomplete FinishReason FinishReason `json:"finish_reason"` // ModerationHitType // The type of content moderation strategy hit. // Only after selecting a moderation strategy for the endpoint that supports returning moderation hit types, // API will return the corresponding values. ModerationHitType *ChatCompletionResponseChoicesElemModerationHitType `json:"moderation_hit_type,omitempty" yaml:"moderation_hit_type,omitempty" mapstructure:"moderation_hit_type,omitempty"` LogProbs *LogProbs `json:"logprobs,omitempty"` }
type ChatCompletionMessage ¶
type ChatCompletionMessage struct { Role string `json:"role"` Content *ChatCompletionMessageContent `json:"content"` ReasoningContent *string `json:"reasoning_content,omitempty"` FunctionCall *FunctionCall `json:"function_call,omitempty"` ToolCalls []*ToolCall `json:"tool_calls,omitempty"` ToolCallID string `json:"tool_call_id,omitempty"` }
type ChatCompletionMessageContent ¶
type ChatCompletionMessageContent struct { StringValue *string ListValue []*ChatCompletionMessageContentPart }
func (ChatCompletionMessageContent) MarshalJSON ¶
func (j ChatCompletionMessageContent) MarshalJSON() ([]byte, error)
MarshalJSON implements json.Marshaler.
func (*ChatCompletionMessageContent) UnmarshalJSON ¶
func (j *ChatCompletionMessageContent) UnmarshalJSON(b []byte) error
type ChatCompletionMessageContentPart ¶
type ChatCompletionMessageContentPart struct { Type ChatCompletionMessageContentPartType `json:"type,omitempty"` Text string `json:"text,omitempty"` ImageURL *ChatMessageImageURL `json:"image_url,omitempty"` }
type ChatCompletionMessageContentPartType ¶
type ChatCompletionMessageContentPartType string
const ( ChatCompletionMessageContentPartTypeText ChatCompletionMessageContentPartType = "text" ChatCompletionMessageContentPartTypeImageURL ChatCompletionMessageContentPartType = "image_url" )
type ChatCompletionRequest ¶
type ChatCompletionRequest struct { Model string `json:"model"` Messages []*ChatCompletionMessage `json:"messages"` MaxTokens int `json:"max_tokens,omitempty"` Temperature float32 `json:"temperature,omitempty"` TopP float32 `json:"top_p,omitempty"` Stream bool `json:"stream,omitempty"` Stop []string `json:"stop,omitempty"` FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` LogitBias map[string]int `json:"logit_bias,omitempty"` LogProbs bool `json:"logprobs,omitempty"` TopLogProbs int `json:"top_logprobs,omitempty"` User string `json:"user,omitempty"` FunctionCall interface{} `json:"function_call,omitempty"` Tools []*Tool `json:"tools,omitempty"` ToolChoice interface{} `json:"tool_choice,omitempty"` StreamOptions *StreamOptions `json:"stream_options,omitempty"` PresencePenalty float32 `json:"presence_penalty,omitempty"` RepetitionPenalty float32 `json:"repetition_penalty,omitempty"` N int `json:"n,omitempty"` ResponseFormat *ResponseFormat `json:"response_format,omitempty"` }
ChatCompletionRequest[Deprecated] - When making a request using this struct, only non-zero fields take effect. This means that if your field value is 0, an empty string (""), false, or other zero values, it will not be sent to the server. The server will handle these fields according to their default values. If you need to specify a zero value, please use CreateChatCompletionRequest.
func (ChatCompletionRequest) GetModel ¶ added in v1.0.159
func (r ChatCompletionRequest) GetModel() string
func (ChatCompletionRequest) IsStream ¶ added in v1.0.159
func (r ChatCompletionRequest) IsStream() bool
func (ChatCompletionRequest) MarshalJSON ¶ added in v1.0.159
func (r ChatCompletionRequest) MarshalJSON() ([]byte, error)
func (ChatCompletionRequest) WithStream ¶ added in v1.0.159
func (r ChatCompletionRequest) WithStream(stream bool) ChatRequest
type ChatCompletionResponse ¶
type ChatCompletionResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` Choices []*ChatCompletionChoice `json:"choices"` Usage Usage `json:"usage"` HttpHeader }
ChatCompletionResponse represents a response structure for chat completion API.
type ChatCompletionResponseChoicesElemModerationHitType ¶ added in v1.0.177
type ChatCompletionResponseChoicesElemModerationHitType string
const ( ChatCompletionResponseChoicesElemModerationHitTypeViolence ChatCompletionResponseChoicesElemModerationHitType = "violence" ChatCompletionResponseChoicesElemModerationHitTypeSevereViolation ChatCompletionResponseChoicesElemModerationHitType = "severe_violation" )
type ChatCompletionStreamChoice ¶
type ChatCompletionStreamChoice struct { Index int `json:"index"` Delta ChatCompletionStreamChoiceDelta `json:"delta"` LogProbs *LogProbs `json:"logprobs,omitempty"` FinishReason FinishReason `json:"finish_reason"` ModerationHitType *ChatCompletionResponseChoicesElemModerationHitType `json:"moderation_hit_type,omitempty" yaml:"moderation_hit_type,omitempty" mapstructure:"moderation_hit_type,omitempty"` }
type ChatCompletionStreamChoiceDelta ¶
type ChatCompletionStreamChoiceDelta struct { Content string `json:"content,omitempty"` Role string `json:"role,omitempty"` ReasoningContent *string `json:"reasoning_content,omitempty"` FunctionCall *FunctionCall `json:"function_call,omitempty"` ToolCalls []*ToolCall `json:"tool_calls,omitempty"` }
type ChatCompletionStreamResponse ¶
type ChatCompletionStreamResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` Choices []*ChatCompletionStreamChoice `json:"choices"` // An optional field that will only be present when you set stream_options: {"include_usage": true} in your request. // When present, it contains a null value except for the last chunk which contains the token usage statistics // for the entire request. Usage *Usage `json:"usage,omitempty"` }
type ChatMessageImageURL ¶
type ChatMessageImageURL struct { URL string `json:"url,omitempty"` Detail ImageURLDetail `json:"detail,omitempty"` }
type ChatRequest ¶ added in v1.0.159
type CompletionTokensDetails ¶ added in v1.0.180
type CompletionTokensDetails struct {
ReasoningTokens int `json:"reasoning_tokens"`
}
type ContentGenerationContentItemType ¶ added in v1.0.177
type ContentGenerationContentItemType string
const ( ContentGenerationContentItemTypeText ContentGenerationContentItemType = "text" ContentGenerationContentItemTypeImage ContentGenerationContentItemType = "image_url" )
type ContentGenerationError ¶ added in v1.0.179
type ContentGenerationUsage ¶ added in v1.0.177
type ContentGenerationUsage struct {
CompletionTokens int `json:"completion_tokens"`
}
type ContextChatCompletionRequest ¶ added in v1.0.173
type ContextChatCompletionRequest struct { ContextID string `json:"context_id"` Mode ContextMode `json:"mode"` Model string `json:"model"` Messages []*ChatCompletionMessage `json:"messages"` MaxTokens int `json:"max_tokens,omitempty"` Temperature float32 `json:"temperature,omitempty"` TopP float32 `json:"top_p,omitempty"` Stream bool `json:"stream,omitempty"` Stop []string `json:"stop,omitempty"` FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` LogitBias map[string]int `json:"logit_bias,omitempty"` LogProbs bool `json:"logprobs,omitempty"` TopLogProbs int `json:"top_logprobs,omitempty"` User string `json:"user,omitempty"` FunctionCall interface{} `json:"function_call,omitempty"` Tools []*Tool `json:"tools,omitempty"` ToolChoice interface{} `json:"tool_choice,omitempty"` StreamOptions *StreamOptions `json:"stream_options,omitempty"` Metadata map[string]interface{} `json:"metadata,omitempty"` }
type ContextMode ¶ added in v1.0.173
type ContextMode string
const ( ContextModeSession ContextMode = "session" ContextModeCommonPrefix ContextMode = "common_prefix" )
type CreateChatCompletionRequest ¶ added in v1.0.159
type CreateChatCompletionRequest struct { Model string `json:"model"` Messages []*ChatCompletionMessage `json:"messages"` MaxTokens *int `json:"max_tokens,omitempty"` Temperature *float32 `json:"temperature,omitempty"` TopP *float32 `json:"top_p,omitempty"` Stream *bool `json:"stream,omitempty"` Stop []string `json:"stop,omitempty"` FrequencyPenalty *float32 `json:"frequency_penalty,omitempty"` LogitBias map[string]int `json:"logit_bias,omitempty"` LogProbs *bool `json:"logprobs,omitempty"` TopLogProbs *int `json:"top_logprobs,omitempty"` User *string `json:"user,omitempty"` FunctionCall interface{} `json:"function_call,omitempty"` Tools []*Tool `json:"tools,omitempty"` ToolChoice interface{} `json:"tool_choice,omitempty"` StreamOptions *StreamOptions `json:"stream_options,omitempty"` PresencePenalty *float32 `json:"presence_penalty,omitempty"` RepetitionPenalty *float32 `json:"repetition_penalty,omitempty"` N *int `json:"n,omitempty"` ResponseFormat *ResponseFormat `json:"response_format,omitempty"` }
CreateChatCompletionRequest - When making a request using this struct, if your field value is 0, an empty string (""), false, or other zero values, it will be sent to the server. The server will handle these fields according to the specified values.
func (CreateChatCompletionRequest) GetModel ¶ added in v1.0.159
func (r CreateChatCompletionRequest) GetModel() string
func (CreateChatCompletionRequest) IsStream ¶ added in v1.0.159
func (r CreateChatCompletionRequest) IsStream() bool
func (CreateChatCompletionRequest) MarshalJSON ¶ added in v1.0.159
func (r CreateChatCompletionRequest) MarshalJSON() ([]byte, error)
func (CreateChatCompletionRequest) WithStream ¶ added in v1.0.159
func (r CreateChatCompletionRequest) WithStream(stream bool) ChatRequest
type CreateContentGenerationContentItem ¶ added in v1.0.177
type CreateContentGenerationContentItem struct { Type ContentGenerationContentItemType `json:"type"` Text *string `json:"text,omitempty"` ImageURL *ImageURL `json:"image_url,omitempty"` }
type CreateContentGenerationTaskRequest ¶ added in v1.0.177
type CreateContentGenerationTaskRequest struct { Model string `json:"model"` Content []*CreateContentGenerationContentItem `json:"content"` }
type CreateContentGenerationTaskResponse ¶ added in v1.0.177
type CreateContentGenerationTaskResponse struct { ID string `json:"id"` HttpHeader }
type CreateContextRequest ¶ added in v1.0.173
type CreateContextRequest struct { Model string `json:"model"` Mode ContextMode `json:"mode"` Messages []*ChatCompletionMessage `json:"messages"` TTL *int `json:"ttl,omitempty"` TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"` }
type CreateContextResponse ¶ added in v1.0.173
type CreateContextResponse struct { ID string `json:"id"` Mode ContextMode `json:"mode"` Model string `json:"model"` TTL *int `json:"ttl,omitempty"` TruncationStrategy *TruncationStrategy `json:"truncation_strategy,omitempty"` Usage Usage `json:"usage"` HttpHeader }
type DeleteContentGenerationTaskRequest ¶ added in v1.0.177
type DeleteContentGenerationTaskRequest struct {
ID string `json:"id"`
}
type Embedding ¶
type Embedding struct { Object string `json:"object"` Embedding []float32 `json:"embedding"` Index int `json:"index"` }
Embedding is a special format of data representation that can be easily utilized by machine learning models and algorithms. The embedding is an information dense representation of the semantic meaning of a piece of text. Each embedding is a vector of floating point numbers, such that the distance between two embeddings in the vector space is correlated with semantic similarity between two inputs in the original format. For example, if two texts are similar, then their vector representations should also be similar.
type EmbeddingEncodingFormat ¶
type EmbeddingEncodingFormat string
EmbeddingEncodingFormat is the format of the embeddings data. Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. If not specified will use "float".
const ( EmbeddingEncodingFormatFloat EmbeddingEncodingFormat = "float" EmbeddingEncodingFormatBase64 EmbeddingEncodingFormat = "base64" )
type EmbeddingRequest ¶
type EmbeddingRequest struct { Input interface{} `json:"input"` Model string `json:"model"` User string `json:"user"` EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"` // Dimensions The number of dimensions the resulting output embeddings should have. // Only supported in text-embedding-3 and later models. Dimensions int `json:"dimensions,omitempty"` }
func (EmbeddingRequest) Convert ¶
func (r EmbeddingRequest) Convert() EmbeddingRequest
type EmbeddingRequestConverter ¶
type EmbeddingRequestConverter interface { // Needs to be of type EmbeddingRequestStrings or EmbeddingRequestTokens Convert() EmbeddingRequest }
type EmbeddingRequestStrings ¶
type EmbeddingRequestStrings struct { // Input is a slice of strings for which you want to generate an Embedding vector. // Each input must not exceed 8192 tokens in length. // OpenAPI suggests replacing newlines (\n) in your input with a single space, as they // have observed inferior results when newlines are present. // E.g. // "The food was delicious and the waiter..." Input []string `json:"input"` // ID of the model to use. You can use the List models API to see all of your available models, // or see our Model overview for descriptions of them. Model string `json:"model"` // A unique identifier representing your end-user, which will help to monitor and detect abuse. User string `json:"user"` // EmbeddingEncodingFormat is the format of the embeddings data. // Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. // If not specified will use "float". EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"` // Dimensions The number of dimensions the resulting output embeddings should have. // Only supported in text-embedding-3 and later models. Dimensions int `json:"dimensions,omitempty"` }
EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings.
func (EmbeddingRequestStrings) Convert ¶
func (r EmbeddingRequestStrings) Convert() EmbeddingRequest
type EmbeddingRequestTokens ¶
type EmbeddingRequestTokens struct { // Input is a slice of slices of ints ([][]int) for which you want to generate an Embedding vector. // Each input must not exceed 8192 tokens in length. // OpenAPI suggests replacing newlines (\n) in your input with a single space, as they // have observed inferior results when newlines are present. // E.g. // "The food was delicious and the waiter..." Input [][]int `json:"input"` // ID of the model to use. You can use the List models API to see all of your available models, // or see our Model overview for descriptions of them. Model string `json:"model"` // A unique identifier representing your end-user, which will help to monitor and detect abuse. User string `json:"user"` // EmbeddingEncodingFormat is the format of the embeddings data. // Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. // If not specified will use "float". EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"` // Dimensions The number of dimensions the resulting output embeddings should have. // Only supported in text-embedding-3 and later models. Dimensions int `json:"dimensions,omitempty"` }
func (EmbeddingRequestTokens) Convert ¶
func (r EmbeddingRequestTokens) Convert() EmbeddingRequest
type EmbeddingResponse ¶
type EmbeddingResponse struct { ID string `json:"id"` Created int `json:"created"` Object string `json:"object"` Data []Embedding `json:"data"` Model string `json:"model"` Usage Usage `json:"usage"` HttpHeader }
EmbeddingResponse is the response from a Create embeddings request.
type EmbeddingResponseBase64 ¶
type EmbeddingResponseBase64 struct { Object string `json:"object"` Data []Base64Embedding `json:"data"` Model string `json:"model"` Usage Usage `json:"usage"` HttpHeader }
EmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.
func (*EmbeddingResponseBase64) ToEmbeddingResponse ¶
func (r *EmbeddingResponseBase64) ToEmbeddingResponse() (EmbeddingResponse, error)
ToEmbeddingResponse converts an embeddingResponseBase64 to an EmbeddingResponse.
type ErrorResponse ¶
type ErrorResponse struct {
Error *APIError `json:"error,omitempty"`
}
type FinishReason ¶
type FinishReason string
const ( FinishReasonStop FinishReason = "stop" FinishReasonLength FinishReason = "length" FinishReasonFunctionCall FinishReason = "function_call" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonContentFilter FinishReason = "content_filter" FinishReasonNull FinishReason = "null" )
func (FinishReason) MarshalJSON ¶
func (r FinishReason) MarshalJSON() ([]byte, error)
type FunctionCall ¶
type FunctionDefine
deprecated
type FunctionDefine = FunctionDefinition
Deprecated: use FunctionDefinition instead.
type FunctionDefinition ¶
type FunctionDefinition struct { Name string `json:"name"` Description string `json:"description,omitempty"` // Parameters is an object describing the function. // You can pass json.RawMessage to describe the schema, // or you can pass in a struct which serializes to the proper JSON schema. // The jsonschema package is provided for convenience, but you should // consider another specialized library if you require more complex schemas. Parameters interface{} `json:"parameters"` }
type GetContentGenerationTaskRequest ¶ added in v1.0.177
type GetContentGenerationTaskRequest struct {
ID string `json:"id"`
}
type GetContentGenerationTaskResponse ¶ added in v1.0.177
type GetContentGenerationTaskResponse struct { ID string `json:"id"` Model string `json:"model"` Status string `json:"status"` Error *ContentGenerationError `json:"error,omitempty"` Content Content `json:"content"` Usage Usage `json:"usage"` CreatedAt int64 `json:"created_at"` UpdatedAt int64 `json:"updated_at"` HttpHeader }
type HttpHeader ¶
func (*HttpHeader) GetHeader ¶ added in v1.0.172
func (h *HttpHeader) GetHeader() http.Header
func (*HttpHeader) Header ¶
func (h *HttpHeader) Header() http.Header
func (*HttpHeader) SetHeader ¶
func (h *HttpHeader) SetHeader(header http.Header)
type ImageURLDetail ¶
type ImageURLDetail string
const ( ImageURLDetailHigh ImageURLDetail = "high" ImageURLDetailLow ImageURLDetail = "low" ImageURLDetailAuto ImageURLDetail = "auto" )
type ListContentGenerationTaskItem ¶ added in v1.0.177
type ListContentGenerationTaskItem struct { ID string `json:"id"` Model string `json:"model"` Status string `json:"status"` FailureReason *ContentGenerationError `json:"failure_reason,omitempty"` Content Content `json:"content"` Usage Usage `json:"usage"` CreatedAt int64 `json:"created_at"` UpdatedAt int64 `json:"updated_at"` }
type ListContentGenerationTasksFilter ¶ added in v1.0.177
type ListContentGenerationTasksRequest ¶ added in v1.0.177
type ListContentGenerationTasksRequest struct { PageNum *int `json:"page_num,omitempty"` PageSize *int `json:"page_size,omitempty"` Filter *ListContentGenerationTasksFilter `json:"filter,omitempty"` }
type ListContentGenerationTasksResponse ¶ added in v1.0.177
type ListContentGenerationTasksResponse struct { Total int64 `json:"total"` Items []ListContentGenerationTaskItem `json:"items"` HttpHeader }
type LogProb ¶
type LogProb struct { Token string `json:"token"` LogProb float64 `json:"logprob"` Bytes []rune `json:"bytes,omitempty"` // Omitting the field if it is null // TopLogProbs is a list of the most likely tokens and their log probability, at this token position. // In rare cases, there may be fewer than the number of requested top_logprobs returned. TopLogProbs []*TopLogProbs `json:"top_logprobs"` }
LogProb represents the probability information for a token.
type LogProbs ¶
type LogProbs struct { // Content is a list of message content tokens with log probability information. Content []*LogProb `json:"content"` }
LogProbs is the top-level structure containing the log probability information.
type MultiModalEmbeddingInputType ¶ added in v1.0.179
type MultiModalEmbeddingInputType string
const ( MultiModalEmbeddingInputTypeText MultiModalEmbeddingInputType = "text" MultiModalEmbeddingInputTypeImageURL MultiModalEmbeddingInputType = "image_url" )
type MultiModalEmbeddingRequest ¶ added in v1.0.179
type MultiModalEmbeddingRequest struct { Input []MultimodalEmbeddingInput `json:"input"` // ID of the model to use. You can use the List models API to see all of your available models, // or see our Model overview for descriptions of them. Model string `json:"model"` // EmbeddingEncodingFormat is the format of the embeddings data. // Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. // If not specified will use "float". EncodingFormat *EmbeddingEncodingFormat `json:"encoding_format,omitempty"` }
MultiModalEmbeddingRequest is the input to a create embeddings request.
type MultiModalEmbeddingResponseBase64 ¶ added in v1.0.179
type MultiModalEmbeddingResponseBase64 struct { Id string `json:"id"` Model string `json:"model"` Created int64 `json:"created"` Object string `json:"object"` Data Base64Embedding `json:"data"` Usage MultimodalEmbeddingUsage `json:"usage"` HttpHeader }
MultiModalEmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.
func (*MultiModalEmbeddingResponseBase64) ToMultiModalEmbeddingResponse ¶ added in v1.0.179
func (r *MultiModalEmbeddingResponseBase64) ToMultiModalEmbeddingResponse() (MultimodalEmbeddingResponse, error)
ToMultiModalEmbeddingResponse converts an embeddingResponseBase64 to an MultimodalEmbeddingResponse.
type MultimodalEmbedding ¶ added in v1.0.179
type MultimodalEmbeddingImageURL ¶ added in v1.0.179
type MultimodalEmbeddingImageURL struct {
URL string `json:"url"`
}
type MultimodalEmbeddingInput ¶ added in v1.0.179
type MultimodalEmbeddingInput struct { Type MultiModalEmbeddingInputType `json:"type"` Text *string `json:"text,omitempty"` ImageURL *MultimodalEmbeddingImageURL `json:"image_url,omitempty"` }
type MultimodalEmbeddingPromptTokensDetail ¶ added in v1.0.179
type MultimodalEmbeddingResponse ¶ added in v1.0.179
type MultimodalEmbeddingResponse struct { Id string `json:"id"` Model string `json:"model"` Created int64 `json:"created"` Object string `json:"object"` Data MultimodalEmbedding `json:"data"` Usage MultimodalEmbeddingUsage `json:"usage"` HttpHeader }
type MultimodalEmbeddingUsage ¶ added in v1.0.179
type MultimodalEmbeddingUsage struct { PromptTokens int `json:"prompt_tokens"` TotalTokens int `json:"total_tokens"` PromptTokensDetails MultimodalEmbeddingPromptTokensDetail `json:"prompt_tokens_details"` }
type PromptTokensDetail ¶ added in v1.0.173
type PromptTokensDetail struct {
CachedTokens int `json:"cached_tokens"`
}
type RawResponse ¶
type RawResponse struct { io.ReadCloser HttpHeader }
type RequestError ¶
RequestError provides information about generic request errors.
func NewRequestError ¶ added in v1.0.160
func NewRequestError(httpStatusCode int, rawErr error, requestID string) *RequestError
func (*RequestError) Error ¶
func (e *RequestError) Error() string
func (*RequestError) Unwrap ¶
func (e *RequestError) Unwrap() error
type ResponseFormat ¶ added in v1.0.151
type ResponseFormat struct { Type ResponseFormatType `json:"type"` Schema interface{} `json:"schema,omitempty"` }
type ResponseFormatType ¶ added in v1.0.151
type ResponseFormatType string
const ( ResponseFormatJsonObject ResponseFormatType = "json_object" ResponseFormatText ResponseFormatType = "text" )
type StreamOptions ¶
type StreamOptions struct { // If set, an additional chunk will be streamed before the data: [DONE] message. // The usage field on this chunk shows the token usage statistics for the entire request, // and the choices field will always be an empty array. // All other chunks will also include a usage field, but with a null value. IncludeUsage bool `json:"include_usage,omitempty"` }
type Tokenization ¶ added in v1.0.151
type TokenizationRequest ¶ added in v1.0.151
type TokenizationRequest struct { Text interface{} `json:"text"` Model string `json:"model"` User string `json:"user"` }
func (TokenizationRequest) Convert ¶ added in v1.0.151
func (r TokenizationRequest) Convert() TokenizationRequest
type TokenizationRequestConverter ¶ added in v1.0.151
type TokenizationRequestConverter interface {
Convert() TokenizationRequest
}
type TokenizationRequestString ¶ added in v1.0.151
type TokenizationRequestString struct { Text string `json:"text"` Model string `json:"model"` User string `json:"user"` }
TokenizationRequestString is the input to a create tokenization request with a slice of strings.
func (TokenizationRequestString) Convert ¶ added in v1.0.151
func (r TokenizationRequestString) Convert() TokenizationRequest
type TokenizationRequestStrings ¶ added in v1.0.151
type TokenizationRequestStrings struct { Text []string `json:"text"` Model string `json:"model"` User string `json:"user"` }
TokenizationRequestStrings is the input to a create tokenization request with a slice of strings.
func (TokenizationRequestStrings) Convert ¶ added in v1.0.151
func (r TokenizationRequestStrings) Convert() TokenizationRequest
type TokenizationResponse ¶ added in v1.0.151
type TokenizationResponse struct { ID string `json:"id"` Created int `json:"created"` Model string `json:"model"` Object string `json:"object"` Data []*Tokenization `json:"data"` HttpHeader }
TokenizationResponse is the response from a Create tokenization request.
type Tool ¶
type Tool struct { Type ToolType `json:"type"` Function *FunctionDefinition `json:"function,omitempty"` }
type ToolCall ¶
type ToolCall struct { ID string `json:"id"` Type ToolType `json:"type"` Function FunctionCall `json:"function"` }
type ToolChoice ¶
type ToolChoice struct { Type ToolType `json:"type"` Function ToolChoiceFunction `json:"function,omitempty"` }
type ToolChoiceFunction ¶ added in v1.0.151
type ToolChoiceFunction struct {
Name string `json:"name"`
}
type TopLogProbs ¶
type TruncationStrategy ¶ added in v1.0.173
type TruncationStrategy struct { Type TruncationStrategyType `json:"type"` LastHistoryTokens *int `json:"last_history_tokens,omitempty"` RollingTokens *bool `json:"rolling_tokens,omitempty"` }
type TruncationStrategyType ¶ added in v1.0.173
type TruncationStrategyType string
const ( TruncationStrategyTypeLastHistoryTokens TruncationStrategyType = "last_history_tokens" TruncationStrategyTypeRollingTokens TruncationStrategyType = "rolling_tokens" )
type Usage ¶
type Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` TotalTokens int `json:"total_tokens"` PromptTokensDetails PromptTokensDetail `json:"prompt_tokens_details"` CompletionTokensDetails CompletionTokensDetails `json:"completion_tokens_details"` }