Documentation ¶
Index ¶
- Constants
- Variables
- func HandleAPIError(resp *http.Response) error
- type APIError
- type APIType
- type BalanceInfo
- type BalanceResponse
- type ChatCompletionMessage
- type ChatCompletionRequest
- type ChatCompletionStream
- type Client
- type ClientConfig
- type ErrorResponse
- type Function
- type HTTPDoer
- type Parameters
- type ResponseFormat
- type StreamChatCompletionMessage
- type StreamChatCompletionRequest
- type StreamChatCompletionResponse
- type StreamChoices
- type StreamDelta
- type StreamOptions
- type StreamUsage
- type Tools
Constants ¶
View Source
const ( ChatMessageRoleSystem = "system" ChatMessageRoleUser = "user" ChatMessageRoleAssistant = "assistant" )
View Source
const ( DeepSeekChat = "deepseek-chat" DeepSeekCoder = "deepseek-coder" )
Create chat and coder models here!
View Source
const BaseURL string = "https://api.deepseek.com/v1"
Variables ¶
View Source
var ( ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method") ErrUnexpectedResponseFormat = errors.New("unexpected response format") )
Functions ¶
func HandleAPIError ¶
Tries to handel errors listed on: https://api-docs.deepseek.com/quick_start/error_codes
Types ¶
type BalanceInfo ¶
type BalanceResponse ¶
type BalanceResponse struct { IsAvailable bool `json:"is_available"` BalanceInfos []BalanceInfo `json:"balance_infos"` }
func GetBalance ¶
func GetBalance(client *Client, ctx context.Context) (*BalanceResponse, error)
type ChatCompletionMessage ¶
type ChatCompletionRequest ¶
type ChatCompletionRequest struct { Model string `json:"model"` // Required: Model ID, e.g., "deepseek-chat" Messages []ChatCompletionMessage `json:"messages"` // Required: List of messages FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` // Optional: Frequency penalty, >= -2 and <= 2 MaxTokens int `json:"max_tokens,omitempty"` // Optional: Maximum tokens, > 1 PresencePenalty float32 `json:"presence_penalty,omitempty"` // Optional: Presence penalty, >= -2 and <= 2 Temperature float32 `json:"temperature,omitempty"` // Optional: Sampling temperature, <= 2 TopP float32 `json:"top_p,omitempty"` // Optional: Nucleus sampling parameter, <= 1 ResponseFormat *ResponseFormat `json:"response_format,omitempty"` // Optional: Custom response format Stop []string `json:"stop,omitempty"` // Optional: Stop signals Tools []Tools `json:"tools,omitempty"` // Optional: List of tools LogProbs bool `json:"logprobs,omitempty"` // Optional: Enable log probabilities TopLogProbs int `json:"top_logprobs,omitempty"` // Optional: Number of top tokens with log probabilities, <= 20 }
make a different struct for streaming with streaming options parameter
type ChatCompletionStream ¶
type ChatCompletionStream interface { Recv() (*StreamChatCompletionResponse, error) Close() error }
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
func (*Client) CreateChatCompletion ¶
func (c *Client) CreateChatCompletion( ctx context.Context, request *ChatCompletionRequest, ) (*utils.ChatCompletionResponse, error)
CreateChatCompletion sends a chat completion request and returns the generated response.
func (*Client) CreateChatCompletionStream ¶
func (c *Client) CreateChatCompletionStream( ctx context.Context, request *StreamChatCompletionRequest, ) (ChatCompletionStream, error)
CreateStreamChatCompletion send a chat completion request with stream = true and returns the delta
type ClientConfig ¶
type ClientConfig struct { BaseURL string HTTPClient HTTPDoer // contains filtered or unexported fields }
func DefaultConfig ¶
func DefaultConfig(authToken string) ClientConfig
type ErrorResponse ¶
func (ErrorResponse) Error ¶
func (e ErrorResponse) Error() string
type Function ¶
type Function struct { Name string `json:"name"` // The name of the function (required) Description string `json:"description"` // Description of the function (required) Parameters *Parameters `json:"parameters,omitempty"` // Parameters schema (optional) }
Function defines the structure of a function tool
type Parameters ¶
type ResponseFormat ¶
type ResponseFormat struct {
Type string `json:"type"` //either text or json_object. If json_object, please mention "json" anywhere in your prompt.
}
type StreamChatCompletionRequest ¶
type StreamChatCompletionRequest struct { Stream bool `json:"stream,omitempty"` //Comments: Defaults to true, since it's "STREAM" Model string `json:"model"` // Required: Model ID, e.g., "deepseek-chat" Messages []ChatCompletionMessage `json:"messages"` // Required: List of messages FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` // Optional: Frequency penalty, >= -2 and <= 2 MaxTokens int `json:"max_tokens,omitempty"` // Optional: Maximum tokens, > 1 PresencePenalty float32 `json:"presence_penalty,omitempty"` // Optional: Presence penalty, >= -2 and <= 2 Temperature float32 `json:"temperature,omitempty"` // Optional: Sampling temperature, <= 2 TopP float32 `json:"top_p,omitempty"` // Optional: Nucleus sampling parameter, <= 1 ResponseFormat *ResponseFormat `json:"response_format,omitempty"` // Optional: Custom response format: just don't try, it breaks rn ;) Stop []string `json:"stop,omitempty"` // Optional: Stop signals Tools []Tools `json:"tools,omitempty"` // Optional: List of tools LogProbs bool `json:"logprobs,omitempty"` // Optional: Enable log probabilities TopLogProbs int `json:"top_logprobs,omitempty"` // Optional: Number of top tokens with log probabilities, <= 20 }
type StreamChatCompletionResponse ¶
type StreamChatCompletionResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` Choices []StreamChoices `json:"choices"` Usage *StreamUsage `json:"usage,omitempty"` }
type StreamChoices ¶
type StreamChoices struct { Index int `json:"index"` Delta StreamDelta FinishReason string `json:"finish_reason"` }
type StreamDelta ¶
type StreamOptions ¶
type StreamOptions struct {
IncludeUsage bool
}
type StreamUsage ¶
Source Files ¶
Click to show internal directories.
Click to hide internal directories.