Documentation ¶
Index ¶
- Variables
- type ChatCompletionRequest
- type ChatGPTModel
- type ChatGPTModelRole
- type ChatMessage
- type ChatResponse
- type ChatResponseChoice
- type ChatResponseUsage
- type Client
- func (c *Client) CancelFineTuningJob(ctx context.Context, fineTuningJobID string) (*FineTuningJob, error)
- func (c *Client) CreateFineTuningRequest(ctx context.Context, req FineTuningRequest) (*FineTuningResponse, error)
- func (c *Client) DeleteFile(ctx context.Context, fileID string) (*DeleteFileResponse, error)
- func (c *Client) ListFiles(ctx context.Context) (*FileList, error)
- func (c *Client) ListFineTuningEvents(ctx context.Context, fineTuningJobID string, opts *ListOptions) (*FineTuningEventsList, error)
- func (c *Client) ListFineTuningJobs(ctx context.Context, opts *ListOptions) (*FineTuningList, error)
- func (c *Client) RetrieveFile(ctx context.Context, fileID string) (*File, error)
- func (c *Client) RetrieveFileContent(ctx context.Context, fileID string) (string, error)
- func (c *Client) RetrieveFineTuningJob(ctx context.Context, fineTuningJobID string) (*FineTuningJob, error)
- func (c *Client) Send(ctx context.Context, req *ChatCompletionRequest) (*ChatResponse, error)
- func (c *Client) SimpleSend(ctx context.Context, message string) (*ChatResponse, error)
- func (c *Client) UploadFile(ctx context.Context, file io.Reader, purpose FilePurpose) (*File, error)
- type Config
- type DeleteFileResponse
- type File
- type FileList
- type FilePurpose
- type FileStatus
- type FineTuningEvent
- type FineTuningEventsList
- type FineTuningJob
- type FineTuningJobStatus
- type FineTuningList
- type FineTuningRequest
- type FineTuningResponse
- type ListOptions
Constants ¶
This section is empty.
Variables ¶
var ( // ErrAPIKeyRequired is returned when the API Key is not provided ErrAPIKeyRequired = errors.New("API Key is required") // ErrInvalidModel is returned when the model is invalid ErrInvalidModel = errors.New("invalid model") // ErrNoMessages is returned when no messages are provided ErrNoMessages = errors.New("no messages provided") // ErrInvalidRole is returned when the role is invalid ErrInvalidRole = errors.New("invalid role. Only `user`, `system` and `assistant` are supported") // ErrInvalidTemperature is returned when the temperature is invalid ErrInvalidTemperature = errors.New("invalid temperature. 0<= temp <= 2") // ErrInvalidPresencePenalty ErrInvalidPresencePenalty = errors.New("invalid presence penalty. -2<= presence penalty <= 2") // ErrInvalidFrequencyPenalty ErrInvalidFrequencyPenalty = errors.New("invalid frequency penalty. -2<= frequency penalty <= 2") )
Functions ¶
This section is empty.
Types ¶
type ChatCompletionRequest ¶
type ChatCompletionRequest struct { // (Required) // ID of the model to use. Model ChatGPTModel `json:"model"` // Required // The messages to generate chat completions for Messages []ChatMessage `json:"messages"` // (Optional - default: 1) // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. // We generally recommend altering this or top_p but not both. Temperature float64 `json:"temperature,omitempty"` // (Optional - default: 1) // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. // We generally recommend altering this or temperature but not both. Top_P float64 `json:"top_p,omitempty"` // (Optional - default: 1) // How many chat completion choices to generate for each input message. N int `json:"n,omitempty"` // (Optional - default: infinite) // The maximum number of tokens allowed for the generated answer. By default, // the number of tokens the model can return will be (4096 - prompt tokens). MaxTokens int `json:"max_tokens,omitempty"` // (Optional - default: 0) // Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, // increasing the model's likelihood to talk about new topics. PresencePenalty float64 `json:"presence_penalty,omitempty"` // (Optional - default: 0) // Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, // decreasing the model's likelihood to repeat the same line verbatim. FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` // (Optional) // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse User string `json:"user,omitempty"` }
type ChatGPTModel ¶
type ChatGPTModel string
const ( GPT35Turbo ChatGPTModel = "gpt-3.5-turbo" // Deprecated: Use gpt-3.5-turbo-0613 instead, model will discontinue on 09/13/2023 GPT35Turbo0301 ChatGPTModel = "gpt-3.5-turbo-0301" GPT35Turbo0613 ChatGPTModel = "gpt-3.5-turbo-0613" GPT35Turbo16k ChatGPTModel = "gpt-3.5-turbo-16k" GPT35Turbo16k0613 ChatGPTModel = "gpt-3.5-turbo-16k-0613" GPT4 ChatGPTModel = "gpt-4" // Deprecated: Use gpt-4-0613 instead, model will discontinue on 09/13/2023 GPT4_0314 ChatGPTModel = "gpt-4-0314" GPT4_0613 ChatGPTModel = "gpt-4-0613" GPT4_32k ChatGPTModel = "gpt-4-32k" // Deprecated: Use gpt-4-32k-0613 instead, model will discontinue on 09/13/2023 GPT4_32k_0314 ChatGPTModel = "gpt-4-32k-0314" GPT4_32k_0613 ChatGPTModel = "gpt-4-32k-0613" )
type ChatGPTModelRole ¶
type ChatGPTModelRole string
const ( ChatGPTModelRoleUser ChatGPTModelRole = "user" ChatGPTModelRoleSystem ChatGPTModelRole = "system" ChatGPTModelRoleAssistant ChatGPTModelRole = "assistant" )
type ChatMessage ¶
type ChatMessage struct { Role ChatGPTModelRole `json:"role"` Content string `json:"content"` }
type ChatResponse ¶
type ChatResponse struct { ID string `json:"id"` Object string `json:"object"` CreatedAt int64 `json:"created_at"` Choices []ChatResponseChoice `json:"choices"` Usage ChatResponseUsage `json:"usage"` }
type ChatResponseChoice ¶
type ChatResponseChoice struct { Index int `json:"index"` Message ChatMessage `json:"message"` FinishReason string `json:"finish_reason"` }
type ChatResponseUsage ¶
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
func NewClientWithConfig ¶
func (*Client) CancelFineTuningJob ¶
func (c *Client) CancelFineTuningJob(ctx context.Context, fineTuningJobID string) (*FineTuningJob, error)
CancelFineTuningJob implements https://platform.openai.com/docs/api-reference/fine-tuning/cancel.
func (*Client) CreateFineTuningRequest ¶
func (c *Client) CreateFineTuningRequest(ctx context.Context, req FineTuningRequest) (*FineTuningResponse, error)
CreateFineTuningRequest implements https://platform.openai.com/docs/api-reference/fine-tuning/create.
func (*Client) DeleteFile ¶
DeleteFile implements https://platform.openai.com/docs/api-reference/files/delete.
func (*Client) ListFiles ¶
ListFiles implements https://platform.openai.com/docs/api-reference/files/list.
func (*Client) ListFineTuningEvents ¶
func (c *Client) ListFineTuningEvents(ctx context.Context, fineTuningJobID string, opts *ListOptions) (*FineTuningEventsList, error)
ListFineTuningEvents implements https://platform.openai.com/docs/api-reference/fine-tuning/list-events.
func (*Client) ListFineTuningJobs ¶
func (c *Client) ListFineTuningJobs(ctx context.Context, opts *ListOptions) (*FineTuningList, error)
ListFineTuningJobs implements https://platform.openai.com/docs/api-reference/fine-tuning/list.
func (*Client) RetrieveFile ¶
RetrieveFile implements https://platform.openai.com/docs/api-reference/files/retrieve.
func (*Client) RetrieveFileContent ¶
RetrieveFileContent implements https://platform.openai.com/docs/api-reference/files/retrieve-contents.
func (*Client) RetrieveFineTuningJob ¶
func (c *Client) RetrieveFineTuningJob(ctx context.Context, fineTuningJobID string) (*FineTuningJob, error)
RetrieveFineTuningJob implements https://platform.openai.com/docs/api-reference/fine-tuning/retrieve.
func (*Client) Send ¶
func (c *Client) Send(ctx context.Context, req *ChatCompletionRequest) (*ChatResponse, error)
func (*Client) SimpleSend ¶
func (*Client) UploadFile ¶
func (c *Client) UploadFile(ctx context.Context, file io.Reader, purpose FilePurpose) (*File, error)
UploadFile implements https://platform.openai.com/docs/api-reference/files/create.
type DeleteFileResponse ¶
type File ¶
type File struct { ID string `json:"id"` Object string `json:"object"` Bytes int `json:"bytes"` CreatedAt int `json:"created_at"` Filename string `json:"filename"` Purpose FilePurpose `json:"purpose"` Status FileStatus `json:"status"` // Deprecated StatusDetails string `json:"status_details"` // Deprecated }
type FilePurpose ¶
type FilePurpose string
const ( FilePurposeFinetune FilePurpose = "fine-tune" FilePurposeFinetuneResults FilePurpose = "fine-tune-results" FilePurposeAssistants FilePurpose = "assistants" FilePurposeAssistantsOutput FilePurpose = "assistants_output" )
type FileStatus ¶
type FileStatus string
const ( FilestatusUploaded FileStatus = "uploaded" FilestatusProcessed FileStatus = "processed" FilestatusError FileStatus = "error" )
type FineTuningEvent ¶
type FineTuningEventsList ¶
type FineTuningEventsList struct { Object string `json:"object"` Data []FineTuningEvent `json:"data"` HasMore bool `json:"has_more"` }
TODO: Use generics to create an abstract List type.
type FineTuningJob ¶
type FineTuningJob struct { ID string `json:"id"` CreatedAt int `json:"created_at"` Error struct { Code string `json:"code"` Message string `json:"message"` Param string `json:"param,omitempty"` } `json:"error,omitempty"` FineTunedModel string `json:"fine_tuned_model,omitempty"` FinishedAt int `json:"finished_at,omitempty"` Hyperparameters struct { NEpochs int `json:"n_epochs"` } `json:"hyperparameters,omitempty"` Model string `json:"model,omitempty"` Object string `json:"object"` OrganizationID string `json:"organization_id"` ResultFiles []string `json:"result_files"` Status FineTuningJobStatus `json:"status"` TrainedTokens int `json:"trained_tokens,omitempty"` TrainingFile string `json:"training_file"` ValidationFile string `json:"validation_file,omitempty"` }
type FineTuningJobStatus ¶
type FineTuningJobStatus string
const ( FineTuningJobStatusValidatingFile FineTuningJobStatus = "validating_files" FineTuningJobStatusQueued FineTuningJobStatus = "queued" FineTuningJobStatusRunning FineTuningJobStatus = "running" FineTuningJobStatusSucceeded FineTuningJobStatus = "succeeded" FineTuningJobStatusFailed FineTuningJobStatus = "failed" FineTuningJobStatusCancelled FineTuningJobStatus = "cancelled" )
type FineTuningList ¶
type FineTuningList struct { Object string `json:"object"` Data []FineTuningJob `json:"data"` HasMore bool `json:"has_more"` }
TODO: Use generics to create an abstract List type.
type FineTuningRequest ¶
type FineTuningRequest struct { Model ChatGPTModel `json:"model"` TrainingFile string `json:"training_file"` Hyperparameters struct { NEpochs int `json:"n_epochs,omitempty"` // Optional } `json:"hyperparameters,omitempty"` // Optional Suffix string `json:"suffix,omitempty"` // Optional ValidationFile string `json:"validation_file,omitempty"` // Optional }
type FineTuningResponse ¶
type FineTuningResponse struct { Object string `json:"object"` ID string `json:"id"` Model string `json:"model"` CreatedAt int `json:"created_at"` FineTunedModel any `json:"fine_tuned_model"` OrganizationID string `json:"organization_id"` ResultFiles []any `json:"result_files"` Status string `json:"status"` ValidationFile any `json:"validation_file"` TrainingFile string `json:"training_file"` }
type ListOptions ¶
type ListOptions struct { After *string // Identifier for the last event from the previous pagination request. Limit *int // Number of events to retrieve. Defaults to 20. }
func (*ListOptions) Encode ¶
func (opts *ListOptions) Encode() string