Documentation
¶
Index ¶
Constants ¶
View Source
const ( ModelGpt4O = "gpt-4o" // Safe default ModelO1Preview = "o1-preview" // Expensive reasoning model ModelO1Mini = "o1-mini" // Cheaper reasoning model ModelO3Mini = "o3-mini" // Cheaper yet powerful reasoning model )
View Source
const ChatAPIPath = "/v1/chat/completions"
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type ChatMessage ¶
type ChatRequest ¶
type ChatRequest struct { Model string `json:"model"` Messages []ChatMessage `json:"messages"` Temperature *float64 `json:"temperature,omitempty"` // What sampling temperature to use, between 0 and 2. TopP *float64 `json:"top_p,omitempty"` // Nucleus sampling. Specify this or temperature but not both. N int `json:"n,omitempty"` // How many chat completion choices to generate for each input message. Stream bool `json:"stream,omitempty"` // If set, partial message deltas will be sent as data-only server-sent events as they become available. Stop []string `json:"stop,omitempty"` // Up to 4 sequences where the API will stop generating further tokens. MaxTokens int `json:"max_tokens,omitempty"` // Deprecated: in favor of `max_completion_tokens` MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` // Including visible output tokens and reasoning tokens. PresencePenalty *float64 `json:"presence_penalty,omitempty"` // Number between -2.0 and 2.0. FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // Number between -2.0 and 2.0. LogitBias map[string]float64 `json:"logit_bias,omitempty"` // Modify the likelihood of specified tokens appearing in the completion. User string `json:"user,omitempty"` // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. ReasoningEffort ReasoningEffort `json:"reasoning_effort,omitempty"` // Constrains effort on reasoning for reasoning models. }
type ChatResponse ¶
type ChatResponseChoice ¶
type ChatResponseChoice struct { Message ChatMessage `json:"message"` FinishReason string `json:"finish_reason"` Index int `json:"index"` }
type ChatResponseStream ¶
type ChatResponseStreamChunk ¶
type ChatResponseStreamChunk struct { ID string Object string Created int Model string Choices []ChatResponseStreamChoice }
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
func (*Client) ChatCompletion ¶
func (c *Client) ChatCompletion(request ChatRequest) (*ChatResponse, error)
func (*Client) ChatCompletionStream ¶
func (c *Client) ChatCompletionStream(request ChatRequest) (*ChatResponseStream, error)
type ReasoningEffort ¶
type ReasoningEffort string
const ( ReasoningEffortLow ReasoningEffort = "low" ReasoningEffortMedium ReasoningEffort = "medium" ReasoningEffortHigh ReasoningEffort = "high" )
Click to show internal directories.
Click to hide internal directories.