openai

package
v0.0.0-...-bc747da Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 2, 2024 License: MIT Imports: 13 Imported by: 1

Documentation

Index

Constants

View Source
const AssistantsEndpointPath = "/assistants/"
View Source
const AudioEndpointPath = "/audio/"
View Source
const ChatEndpointPath = "/chat/"
View Source
const CompletionsEndpointPath = "/completions/"
View Source
const EditsEndpointPath = "/edits/"
View Source
const EmbeddingsEndpointPath = "/embeddings/"
View Source
const FilesEndpointPath = "/files/"
View Source
const FineTuningEndpointPath = "/fine_tuning/"
View Source
const ImagesEndpointPath = "/images/"
View Source
const ModelsEndpointPath = "/models/"
View Source
const ModerationsEndpointPath = "/moderations/"
View Source
const VectorStoresEndpointPath = "/vector_stores/"

Variables

This section is empty.

Functions

This section is empty.

Types

type APIError

type APIError struct {
	Code           any     `json:"code,omitempty"`
	Message        string  `json:"message"`
	Param          *string `json:"param,omitempty"`
	Type           string  `json:"type"`
	HTTPStatusCode int     `json:"-"`
}

APIError provides error information returned by the OpenAI API.

func (*APIError) Error

func (e *APIError) Error() string

func (*APIError) UnmarshalJSON

func (e *APIError) UnmarshalJSON(data []byte) (err error)

type Assistant

type Assistant struct {
	Id            string                  `json:"id"`
	Object        string                  `json:"object"` // The object type, which is always assistant.
	CreatedAt     int64                   `json:"created_at"`
	Name          *string                 `json:"name"`
	Description   *string                 `json:"description"`
	Model         string                  `json:"model"`
	Instructions  *string                 `json:"instructions"`
	Tools         []AssistantTool         `json:"tools,omitempty"`
	ToolResources *AssistantToolResources `json:"tool_resources,omitempty"`
	MetaData      map[string]string       `json:"metadata,omitempty"`
	Temperature   float64                 `json:"temperature,omitempty"`
	TopP          float64                 `json:"top_p,omitempty"`
}

type AssistantFile

type AssistantFile struct {
	Id          string `json:"id"`
	Object      string `json:"object"` // The object type, which is always assistant.file.
	CreatedAt   int64  `json:"created_at"`
	AssistantId string `json:"assistant_id"`
}

type AssistantFileRequest

type AssistantFileRequest struct {
	// A File ID (with purpose="assistants") that the assistant should use. Useful for tools like retrieval and code_interpreter that can access files.
	FileId *string `json:"file_id"`
}

type AssistantFiles

type AssistantFiles struct {
	Object  string          `json:"object"`
	Data    []AssistantFile `json:"data"`
	HasMore bool            `json:"has_more"`
}

type AssistantRequest

type AssistantRequest struct {
	// ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them.
	Model string `json:"model" binding:"required"`

	// The name of the assistant. The maximum length is 256 characters.
	Name *string `json:"name"`

	// The description of the assistant. The maximum length is 512 characters.
	Description *string `json:"description"`

	// The system instructions that the assistant uses. The maximum length is 32768 characters.
	Instructions *string `json:"instructions"`

	// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types code_interpreter, retrieval, or function.
	// Defaults to []
	Tools []AssistantTool `json:"tools,omitempty"`

	// A list of file IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order.
	ToolResources *AssistantToolResources `json:"tool_resources,omitempty"`

	// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and va
	MetaData map[string]string `json:"metadata,omitempty"`

	Temperature float64 `json:"temperature,omitempty"`
	TopP        float64 `json:"top_p,omitempty"`
}

type AssistantTool

type AssistantTool struct {
	Type     string `json:"type"`
	Function *struct {
		Description *string                `json:"description,omitempty"`
		Name        string                 `json:"name"`
		Parameters  map[string]interface{} `json:"parameters"`
	} `json:"function,omitempty"`
}

type AssistantToolResources

type AssistantToolResources struct {
	CodeInterpreter *struct {
		FileIDs []string `json:"file_ids"`
	} `json:"code_interpreter,omitempty"`
	FileSearch *struct {
		VectorStoreIDs []string `json:"vector_store_ids"`
		VectorStores   []struct {
			FileIDs  []string          `json:"file_ids"`
			MetaData map[string]string `json:"metadata,omitempty"`
		} `json:"vector_stores,omitempty"`
	} `json:"file_search,omitempty"`
}

type Assistants

type Assistants struct {
	Object  string      `json:"object"`
	Data    []Assistant `json:"data"`
	HasMore bool        `json:"has_more"`
}

type AssistantsEndpoint

type AssistantsEndpoint struct {
	// contains filtered or unexported fields
}

Assistants Endpoint

Manage fine-tuning jobs to tailor a model to your specific training data. Related guide: [Fine-tuning models]: https://platform.openai.com/docs/guides/fine-tuning [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/fine-tuning

func (*AssistantsEndpoint) CreateAssistant

func (e *AssistantsEndpoint) CreateAssistant(req *AssistantRequest) (*Assistant, error)

Create an assistant with a model and instructions. [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/assistants/createAssistant

func (*AssistantsEndpoint) CreateAssistantFile

func (e *AssistantsEndpoint) CreateAssistantFile(assistantId string, fileId string) (*AssistantFile, error)

Creates an assistant file.

func (*AssistantsEndpoint) DeleteAssistant

func (e *AssistantsEndpoint) DeleteAssistant(assistantId string) (bool, error)

Deletes an assistant.

func (*AssistantsEndpoint) DeleteAssistantFile

func (e *AssistantsEndpoint) DeleteAssistantFile(assistantId string, fileId string) (bool, error)

Deletes an assistant file.

func (*AssistantsEndpoint) ListAssistantFiles

func (e *AssistantsEndpoint) ListAssistantFiles(after *string, limit *int) ([]AssistantFile, error)

Returns a list of assistant files.

func (*AssistantsEndpoint) ListAssistants

func (e *AssistantsEndpoint) ListAssistants(after *string, limit *int) ([]Assistant, error)

Returns a list of assistants.

func (*AssistantsEndpoint) ModifyAssistant

func (e *AssistantsEndpoint) ModifyAssistant(assistantId string, req *AssistantRequest) (*Assistant, error)

Modifies an assistant.

func (*AssistantsEndpoint) RetrieveAssistant

func (e *AssistantsEndpoint) RetrieveAssistant(assistantId string) (*Assistant, error)

Retrieves an assistant.

func (*AssistantsEndpoint) RetrieveAssistantFile

func (e *AssistantsEndpoint) RetrieveAssistantFile(assistantId string, fileId string) (*AssistantFile, error)

Retrieves an assistant file.

type AudioEndpoint

type AudioEndpoint struct {
	// contains filtered or unexported fields
}

Audio Endpoint

	Learn how to turn audio into text.
 Related guide: [Speech to text]: https://platform.openai.com/docs/guides/speech-to-text

func (*AudioEndpoint) CreateTranscription

func (e *AudioEndpoint) CreateTranscription(req *AudioTranscriptionRequest) (*AudioResponse, error)

Transcribes audio into the input language.

func (*AudioEndpoint) CreateTranslation

func (e *AudioEndpoint) CreateTranslation(req *AudioTranslationRequest) (*AudioResponse, error)

Translates audio into into English.

type AudioResponse

type AudioResponse struct {
	Text string `json:"text"`
}

type AudioTranscriptionRequest

type AudioTranscriptionRequest struct {
	// The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
	File string `json:"file" binding:"required"`
	// 	ID of the model to use. Only whisper-1 is currently available.
	Model string `json:"model" binding:"required"`
	// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
	Prompt string `json:"prompt,omitempty"`
	// 	Defaults to json
	// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
	ResponseFormat string `json:"response_format,omitempty"`
	// Defaults to 0
	// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
	Temperature int `json:"temperature,omitempty"`
	// The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
	Language string `json:"language,omitempty"`
}

type AudioTranslationRequest

type AudioTranslationRequest struct {
	// The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
	File string `json:"file" binding:"required"`
	// 	ID of the model to use. Only whisper-1 is currently available.
	Model string `json:"model" binding:"required"`
	// An optional text to guide the model's style or continue a previous audio segment. The prompt should be in English.
	Prompt string `json:"prompt,omitempty"`
	// 	Defaults to json
	// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
	ResponseFormat string `json:"response_format,omitempty"`
	// Defaults to 0
	// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
	Temperature int `json:"temperature,omitempty"`
}

type ChatCompletionRequest

type ChatCompletionRequest struct {
	// ID of the model to use. See the [model endpoint compatibility]: https://platform.openai.com/docs/models/model-endpoint-compatibility table for details on which models work with the Chat API.
	Model string `json:"model" binding:"required"`
	// A list of messages describing the conversation so far.
	Messages []string `json:"messages" binding:"required"`
	// The role of the author of this message. One of system, user, or assistant.
	Role string `json:"role" binding:"required"`
	// The contents of the message.
	Content string `json:"content" binding:"required"`
	// The name of the author of this message. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
	Name string `json:"name,omitempty"`
	// Defaults to 1
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
	// We generally recommend altering this or top_p but not both.
	Temperature int `json:"temperature,omitempty"`
	// Defaults to 1
	// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	TopP int `json:"top_p,omitempty"`
	// Defaults to 1
	// How many chat completion choices to generate for each input message.
	N int `json:"n,omitempty"`
	// Defaults to false
	// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. See the OpenAI Cookbook for example code.
	Stream bool `json:"stream,omitempty"`
	// Defaults to null
	// Up to 4 sequences where the API will stop generating further tokens.
	Stop []string `json:"stop,omitempty"`
	// Defaults to inf
	// The maximum number of tokens to generate in the chat completion.
	// The total length of input tokens and generated tokens is limited by the model's context length.
	MaxTokens int `json:"max_tokens,omitempty"`
	// Defaults to 0
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
	// See more information about frequency and presence penalties.
	PresencePenalty int `json:"presence_penalty,omitempty"`
	// Defaults to 0
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
	// See more information about frequency and presence penalties.
	FrequencyPenalty int `json:"frequency_penalty,omitempty"`
	// Defaults to null
	// Modify the likelihood of specified tokens appearing in the completion.
	// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	LogitBias map[string]string `json:"logit_bias,omitempty"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	User string `json:"user,omitempty"`
}

type ChatCompletionResponse

type ChatCompletionResponse struct {
	Id      string `json:"id"`
	Object  string `json:"object"`
	Created int    `json:"created"`
	Choices []struct {
		Index   int `json:"index"`
		Message struct {
			Role    string `json:"role"`
			Content string `json:"content"`
		} `json:"message"`
		FinishReason string `json:"finish_reason"`
	} `json:"choices"`
	Usage struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
}

type ChatEndpoint

type ChatEndpoint struct {
	// contains filtered or unexported fields
}

Completions Endpoint

	Given a prompt, the model will return one or more predicted completions,
 and can also return the probabilities of alternative tokens at each position.

func (*ChatEndpoint) CreateChatCompletion

func (e *ChatEndpoint) CreateChatCompletion(req *ChatCompletionRequest) (*ChatCompletionResponse, error)

Creates a model response for the given chat conversation.

type Client

type Client struct {
	BaseURL        *url.URL
	OrganizationID string
	HTTPClient     *http.Client
	UserAgent      string
	// contains filtered or unexported fields
}

Client - OpenAI client.

func NewClient

func NewClient(authToken string) *Client

NewClient creates new OpenAI client.

func (*Client) Assistants

func (c *Client) Assistants() *AssistantsEndpoint

Assistants Endpoint

func (*Client) Audio

func (c *Client) Audio() *AudioEndpoint

Audio Endpoint

func (*Client) Chat

func (c *Client) Chat() *ChatEndpoint

Completions Endpoint

func (*Client) Completions

func (c *Client) Completions() *CompletionsEndpoint

Completions Endpoint

func (*Client) Edits

func (c *Client) Edits() *EditsEndpoint

Edits Endpoint

func (*Client) Embeddings

func (c *Client) Embeddings() *EmbeddingsEndpoint

Completions Endpoint

func (*Client) Files

func (c *Client) Files() *FilesEndpoint

Files Endpoint

func (*Client) FineTuning

func (c *Client) FineTuning() *FineTuningEndpoint

Files Endpoint

func (*Client) Images

func (c *Client) Images() *ImagesEndpoint

Images Endpoint

func (*Client) Models

func (c *Client) Models() *ModelsEndpoint

Models - Models Endpoint

func (*Client) Moderations

func (c *Client) Moderations() *ModerationsEndpoint

Moderations Endpoint

func (*Client) VectorStores

func (c *Client) VectorStores() *VectorStoresEndpoint

VectorStores Endpoint

type CompletionRequest

type CompletionRequest struct {
	// ID of the model to use.
	// You can use the [List models]: https://platform.openai.com/docs/api-reference/models/list API to see all of your available models,
	// or see our [Model overview]: https://platform.openai.com/docs/models/overview for descriptions of them.
	Model string `json:"model" binding:"required"`
	// Defaults to <|endoftext|>
	// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
	// Note that <|endoftext|> is the document separator that the model sees during training,
	// so if a prompt is not specified the model will generate as if from the beginning of a new document.
	Prompt []string `json:"prompt,omitempty"`
	// Defaults to null
	// The suffix that comes after a completion of inserted text.
	Suffix string `json:"suffix,omitempty"`
	// Defaults to 16
	// The maximum number of tokens to generate in the completion.
	// The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
	MaxTokens int `json:"max_tokens,omitempty"`
	// Defaults to 1
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
	// We generally recommend altering this or top_p but not both.
	Temperature int `json:"temperature,omitempty"`
	// Defaults to 1
	// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	TopP int `json:"top_p,omitempty"`
	// Defaults to 1
	// How many completions to generate for each prompt.
	// Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.
	N int `json:"n,omitempty"`
	// Defaults to false
	// Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.
	Stream bool `json:"stream,omitempty"`
	// Defaults to null
	// Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens. For example, if logprobs is 5, the API will return a list of the 5 most likely tokens. The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
	// The maximum value for logprobs is 5. If you need more than this, please contact us through our Help center and describe your use case.
	LogProbs int `json:"logprobs,omitempty"`
	// Defaults to false
	// Echo back the prompt in addition to the completion
	Echo bool `json:"echo,omitempty"`
	// Defaults to null
	// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
	Stop []string `json:"stop,omitempty"`
	// Defaults to 0
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
	PresencePenalty int `json:"presence_penalty,omitempty"`
	// Defaults to 0
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
	FrequencyPenalty int `json:"frequency_penalty,omitempty"`
	// Defaults to 1
	// Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
	// When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.
	// Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.
	BestOf int `json:"best_of,omitempty"`
	// Defaults to null
	// Modify the likelihood of specified tokens appearing in the completion.
	// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	// As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated.
	LogitBias map[string]string `json:"logit_bias,omitempty"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	User string `json:"user,omitempty"`
}

type CompletionResponse

type CompletionResponse struct {
	Id      string `json:"id"`
	Object  string `json:"object"`
	Created int    `json:"created"`
	Model   string `json:"model"`
	Choices []struct {
		Text         string `json:"text"`
		Index        int    `json:"index"`
		LogProbs     int    `json:"logprobs,omitempty"`
		FinishReason string `json:"finish_reason"`
	} `json:"choices"`
	Usage struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
}

type CompletionsEndpoint

type CompletionsEndpoint struct {
	// contains filtered or unexported fields
}

Completions Endpoint

	Given a prompt, the model will return one or more predicted completions,
 and can also return the probabilities of alternative tokens at each position.

func (*CompletionsEndpoint) CreateCompletion

func (e *CompletionsEndpoint) CreateCompletion(req *CompletionRequest) (*CompletionResponse, error)

Creates a completion for the provided prompt and parameters.

type CreateFineTuningJobRequest

type CreateFineTuningJobRequest struct {
	// The ID of an uploaded file that contains training data.
	// See [upload file]: https://platform.openai.com/docs/api-reference/files/upload for how to upload a file.
	// Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys "prompt" and "completion". Additionally, you must upload your file with the purpose fine-tune.
	// See the [fine-tuning guide]: https://platform.openai.com/docs/guides/fine-tuning/creating-training-data for more details.
	TrainingFile string `json:"training_file" binding:"required"`

	// The ID of an uploaded file that contains validation data.
	// If you provide this file, the data is used to generate validation metrics periodically during fine-tuning.
	// These metrics can be viewed in the [fine-tuning results file]: https://platform.openai.com/docs/guides/fine-tuning/analyzing-your-fine-tuned-model. Your train and validation data should be mutually exclusive.
	// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion". Additionally, you must upload your file with the purpose fine-tune.
	// See the [fine-tuning guide]: https://platform.openai.com/docs/guides/fine-tuning/creating-training-data for more details.
	ValidationFile string `json:"validation_file,omitempty"`

	// The name of the base model to fine-tune. You can select one of the supported models.
	Model string `json:"model,omitempty" binding:"required"`

	// The hyperparameters used for the fine-tuning job.
	Hyperparameters *struct {
		// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
		NEpochs int64 `json:"n_epochs,omitempty"`
	} `json:"hyperparameters,omitempty"`

	// Defaults to null
	// A string of up to 40 characters that will be added to your fine-tuned model name.
	// For example, a suffix of "custom-model-name" would produce a model name like ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel.
	Suffix string `json:"suffix,omitempty"`
}

type CreateImageEditRequest

type CreateImageEditRequest struct {
	// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
	Image string `json:"image" binding:"required"`
	// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
	Mask string `json:"mask,omitempty"`
	// A text description of the desired image(s). The maximum length is 1000 characters.
	Prompt string `json:"prompt" binding:"required"`
	// Defaults to 1
	// The number of images to generate. Must be between 1 and 10.
	N int `json:"n,omitempty"`
	// Defaults to 1024x1024
	// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
	Size string `json:"size,omitempty"`
	// Defaults to url
	// The format in which the generated images are returned. Must be one of url or b64_json.
	ResponseFormat string `json:"response_format,omitempty"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	User string `json:"user,omitempty"`
}

type CreateImageRequest

type CreateImageRequest struct {
	// A text description of the desired image(s). The maximum length is 1000 characters.
	Prompt string `json:"prompt" binding:"required"`
	// Defaults to 1
	// The number of images to generate. Must be between 1 and 10.
	N int `json:"n,omitempty"`
	// 	Defaults to 1024x1024
	// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
	Size string `json:"size,omitempty"`
	// Defaults to url
	// The format in which the generated images are returned. Must be one of url or b64_json.
	ResponseFormat string `json:"response_format,omitempty"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	User string `json:"user,omitempty"`
}

type CreateImageVariationRequest

type CreateImageVariationRequest struct {
	// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
	Image string `json:"image" binding:"required"`
	// Defaults to 1
	// The number of images to generate. Must be between 1 and 10.
	N int `json:"n,omitempty"`
	// Defaults to 1024x1024
	// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
	Size string `json:"size,omitempty"`
	// Defaults to url
	// The format in which the generated images are returned. Must be one of url or b64_json.
	ResponseFormat string `json:"response_format,omitempty"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	User string `json:"user,omitempty"`
}

type CreateVectorStoresRequest

type CreateVectorStoresRequest struct {
	FileIDs      []string          `json:"file_ids"`
	Name         string            `json:"name"`
	ExpiresAfter *ExpiresAfter     `json:"expires_after,omitempty"`
	MetaData     map[string]string `json:"metadata,omitempty"`
}

type DeleteFileResponse

type DeleteFileResponse struct {
	Id      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

type DeletionStatus

type DeletionStatus struct {
	Id      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

type EditRequest

type EditRequest struct {
	// ID of the model to use. You can use the text-davinci-edit-001 or code-davinci-edit-001 model with this endpoint.
	Model string `json:"model" binding:"required"`
	// Defaults to ”
	// The input text to use as a starting point for the edit.
	Input string `json:"input,omitempty"`
	// The instruction that tells the model how to edit the prompt.
	Instruction string `json:"instruction" binding:"required"`
	// Defaults to 1
	// How many edits to generate for the input and instruction.
	N int `json:"n,omitempty"`
	// Defaults to 1
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
	// We generally recommend altering this or top_p but not both.
	Temperature int `json:"temperature,omitempty"`
	// Defaults to 1
	// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	TopP int `json:"top_p,omitempty"`
}

type EditResponse

type EditResponse struct {
	Object  string `json:"object"`
	Created int    `json:"created"`
	Choices []struct {
		Text  string `json:"text"`
		Index int    `json:"index"`
	} `json:"choices"`
	Usage struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
}

type EditsEndpoint

type EditsEndpoint struct {
	// contains filtered or unexported fields
}

Edits Endpoint

Given a prompt and an instruction, the model will return an edited version of the prompt.

func (*EditsEndpoint) CreateEdit

func (e *EditsEndpoint) CreateEdit(req *EditRequest) (*EditResponse, error)

Creates a new edit for the provided input, instruction, and parameters.

type EmbeddingsEndpoint

type EmbeddingsEndpoint struct {
	// contains filtered or unexported fields
}

Embeddings Endpoint

	Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
 Related guide: [Embeddings]: https://platform.openai.com/docs/guides/embeddings

func (*EmbeddingsEndpoint) CreateEmbeddings

func (e *EmbeddingsEndpoint) CreateEmbeddings(req *EmbeddingsRequest) (*EmbeddingsResponse, error)

Creates an embedding vector representing the input text.

type EmbeddingsRequest

type EmbeddingsRequest struct {
	// ID of the model to use.
	// You can use the [List models]: https://platform.openai.com/docs/api-reference/models/list API to see all of your available models,
	// or see our [Model overview]: https://platform.openai.com/docs/models/overview for descriptions of them.
	Model string `json:"model" binding:"required"`
	// Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.
	Input string `json:"input" binding:"required"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	User string `json:"user,omitempty"`
}

type EmbeddingsResponse

type EmbeddingsResponse struct {
	Object string `json:"object"`
	Model  string `json:"model"`
	Data   []struct {
		Object    string    `json:"object"`
		Index     int       `json:"index"`
		Embedding []float64 `json:"embedding,omitempty"`
	} `json:"data"`
	Usage struct {
		PromptTokens int `json:"prompt_tokens"`
		TotalTokens  int `json:"total_tokens"`
	} `json:"usage"`
}

type ErrorResponse

type ErrorResponse struct {
	Error *APIError `json:"error,omitempty"`
}

type ExpiresAfter

type ExpiresAfter struct {
	Anchor string `json:"anchor"`
	Days   int64  `json:"days"`
}

type File

type File struct {
	Id        string `json:"id"`
	Object    string `json:"object"`
	Bytes     int64  `json:"bytes"`
	CreatedAt int64  `json:"created_at"`
	Filename  string `json:"filename"`
	Purpose   string `json:"purpose"`
}

type FileCounts

type FileCounts struct {
	InProgress int64 `json:"in_progress"`
	Completed  int64 `json:"completed"`
	Failed     int64 `json:"failed"`
	Cancelled  int64 `json:"cancelled"`
	Total      int64 `json:"total"`
}

type Files

type Files struct {
	Object string `json:"object"`
	Data   []File `json:"data"`
}

type FilesEndpoint

type FilesEndpoint struct {
	// contains filtered or unexported fields
}

Files Endpoint

Files are used to upload documents that can be used with features like [Fine-tuning]: https://platform.openai.com/docs/api-reference/fine-tunes.

func (*FilesEndpoint) DeleteFile

func (e *FilesEndpoint) DeleteFile(fileId string) (bool, error)

Delete a file. [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/files

func (*FilesEndpoint) ListFiles

func (e *FilesEndpoint) ListFiles() ([]File, error)

Returns a list of files that belong to the user's organization. [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/files

func (*FilesEndpoint) RetrieveFile

func (e *FilesEndpoint) RetrieveFile(fileId string) (*File, error)

Returns information about a specific file. [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/files

func (*FilesEndpoint) RetrieveFileContent

func (e *FilesEndpoint) RetrieveFileContent(fileId string) (*string, error)

Returns the contents of the specified file [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/files

func (*FilesEndpoint) UploadFile

func (e *FilesEndpoint) UploadFile(req *UploadFileRequest) (*File, error)

Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/files

type FineTuningEndpoint

type FineTuningEndpoint struct {
	// contains filtered or unexported fields
}

FineTuning Endpoint

Manage fine-tuning jobs to tailor a model to your specific training data. Related guide: [Fine-tuning models]: https://platform.openai.com/docs/guides/fine-tuning [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/fine-tuning

func (*FineTuningEndpoint) CancelFineTuningJob

func (e *FineTuningEndpoint) CancelFineTuningJob(fineTuningJobId string) (*FineTuningJob, error)

Immediately cancel a fine-tune job. Returns the cancelled fine-tuning object.

func (*FineTuningEndpoint) CreateFineTuningJob

func (e *FineTuningEndpoint) CreateFineTuningJob(req *CreateFineTuningJobRequest) (*FineTuningJob, error)

Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. Learn more about Fine-tuning [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/fine-tunes

func (*FineTuningEndpoint) GetFineTuningJob

func (e *FineTuningEndpoint) GetFineTuningJob(fineTuningJobId string) (*FineTuningJob, error)

Get info about a fine-tuning job. Returns the fine-tuning object with the given ID.

func (*FineTuningEndpoint) ListFineTuningEvents

func (e *FineTuningEndpoint) ListFineTuningEvents(fineTuningJobId string, after *string, limit *int) ([]FineTuningEvent, error)

Get status updates for a fine-tuning job. Returns a list of fine-tuning event objects.

func (*FineTuningEndpoint) ListFineTuningJobs

func (e *FineTuningEndpoint) ListFineTuningJobs(after *string, limit *int) ([]FineTuningJob, error)

Returns a list of paginated fine-tuning job objects.

type FineTuningEvent

type FineTuningEvent struct {
	Object    string `json:"object"`
	Id        string `json:"id"`
	CreatedAt int    `json:"created_at"`
	Level     string `json:"level"`
	Message   string `json:"message"`
	// Data	  string `json:"data"`
	Type string `json:"type"`
}

type FineTuningEvents

type FineTuningEvents struct {
	Object  string            `json:"object"`
	Data    []FineTuningEvent `json:"data"`
	HasMore bool              `json:"has_more"`
}

type FineTuningJob

type FineTuningJob struct {
	Id             string `json:"id"`
	Object         string `json:"object"`
	CreatedAt      int64  `json:"created_at"`
	FinishedAt     int64  `json:"finished_at"`
	Model          string `json:"model"`
	FineTunedModel string `json:"fine_tuned_model"`
	OrganizationId string `json:"organization_id"`
	Status         string `json:"status"`
	Hyperparams    struct {
		NEpochs int64 `json:"n_epochs"`
	} `json:"hyperparams"`
	TrainingFile   string   `json:"training_file"`
	ValidationFile *string  `json:"validation_file"`
	ResultFiles    []string `json:"result_files"`
	TrainedTokens  int64    `json:"trained_tokens"`
}

type FineTuningJobs

type FineTuningJobs struct {
	Object  string          `json:"object"`
	Data    []FineTuningJob `json:"data"`
	HasMore bool            `json:"has_more"`
}

type ImagesEndpoint

type ImagesEndpoint struct {
	// contains filtered or unexported fields
}

Images Endpoint

	Given a prompt and/or an input image, the model will generate a new image.
 Related guide: [Image generation]: https://platform.openai.com/docs/guides/images

func (*ImagesEndpoint) CreateImage

func (e *ImagesEndpoint) CreateImage(req *CreateImageRequest) (*ImagesResponse, error)

Creates an image given a prompt.

func (*ImagesEndpoint) CreateImageEdit

func (e *ImagesEndpoint) CreateImageEdit(req *CreateImageEditRequest) (*ImagesResponse, error)

Creates an edited or extended image given an original image and a prompt.

func (*ImagesEndpoint) CreateImageVariation

func (e *ImagesEndpoint) CreateImageVariation(req *CreateImageVariationRequest) (*ImagesResponse, error)

Creates a variation of a given image.

type ImagesResponse

type ImagesResponse struct {
	Created int `json:"created"`
	Data    []struct {
		Url string `json:"url"`
	} `json:"data"`
}

type ListFineTuningEventsRequest

type ListFineTuningEventsRequest struct {
	// Identifier for the last job from the previous pagination request.
	After string `json:"after,omitempty"`

	// Number of fine-tuning jobs to retrieve.
	// Defaults to 20
	Limit int64 `json:"limit,omitempty"`
}

type Model

type Model struct {
	CreatedAt  int64        `json:"created"`
	ID         string       `json:"id"`
	Object     string       `json:"object"`
	OwnedBy    string       `json:"owned_by"`
	Permission []Permission `json:"permission"`
	Root       string       `json:"root"`
	Parent     string       `json:"parent"`
}

Model - OpenAPI Model.

type Models

type Models struct {
	Object string  `json:"object"`
	Data   []Model `json:"data"`
}

type ModelsEndpoint

type ModelsEndpoint struct {
	// contains filtered or unexported fields
}

ModelsEndpoint - OpenAI Models Endpoint

List and describe the various models available in the API.
You can refer to the [Models]: https://platform.openai.com/docs/models documentation to understand what models are available and the differences between them.

func (*ModelsEndpoint) DeleteFineTuneModel

func (e *ModelsEndpoint) DeleteFineTuneModel(id string) (bool, error)

Delete a fine-tuned model. You must have the Owner role in your organization. [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/fine-tunes/delete-model

func (*ModelsEndpoint) ListModels

func (e *ModelsEndpoint) ListModels() ([]Model, error)

Lists the currently available models, and provides basic information about each one such as the owner and availability.

func (*ModelsEndpoint) RetrieveModel

func (e *ModelsEndpoint) RetrieveModel(id string) (*Model, error)

Retrieves a model instance, providing basic information about the model such as the owner and permissioning.

type Moderation

type Moderation struct {
	Id      string `json:"id"`
	Object  string `json:"object"`
	Results []struct {
		Categories []struct {
			Hate            bool `json:"hate"`
			HateThreatening bool `json:"hate/threatening"`
			SelfHarm        bool `json:"self-harm"`
			Sexual          bool `json:"sexual"`
			SexualMinors    bool `json:"sexual/minors"`
			Violence        bool `json:"violence"`
			ViolenceGraphic bool `json:"violence/graphic"`
		} `json:"categories"`
		CategoryScores []struct {
			Hate            float64 `json:"hate"`
			HateThreatening float64 `json:"hate/threatening"`
			SelfHarm        float64 `json:"self-harm"`
			Sexual          float64 `json:"sexual"`
			SexualMinors    float64 `json:"sexual/minors"`
			Violence        float64 `json:"violence"`
			ViolenceGraphic float64 `json:"violence/graphic"`
		} `json:"category_scores"`
		Flagged bool `json:"flagged"`
	} `json:"results"`
}

type ModerationRequest

type ModerationRequest struct {
	// The input text to classify
	Input []string `json:"input" binding:"required"`
	// Defaults to text-moderation-latest
	// Two content moderations models are available: text-moderation-stable and text-moderation-latest.
	// The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.Model string `json:"model" binding:"required"`
	Model string `json:"model.omntempty"`
}

type ModerationsEndpoint

type ModerationsEndpoint struct {
	// contains filtered or unexported fields
}

Moderations Endpoint

Given a input text, outputs if the model classifies it as violating OpenAI's content policy.
Related guide: [Moderations]: https://platform.openai.com/docs/guides/moderation

func (*ModerationsEndpoint) CreateModeration

func (e *ModerationsEndpoint) CreateModeration(req *ModerationRequest) (*Moderation, error)

Classifies if text violates OpenAI's Content Policy [OpenAI Documentation]: https://platform.openai.com/docs/api-reference/moderations/create

type ModifyVectorStoresRequest

type ModifyVectorStoresRequest struct {
	Name         string            `json:"name"`
	ExpiresAfter ExpiresAfter      `json:"expires_after"`
	MetaData     map[string]string `json:"metadata,omitempty"`
}

type Permission

type Permission struct {
	CreatedAt          int64       `json:"created"`
	ID                 string      `json:"id"`
	Object             string      `json:"object"`
	AllowCreateEngine  bool        `json:"allow_create_engine"`
	AllowSampling      bool        `json:"allow_sampling"`
	AllowLogprobs      bool        `json:"allow_logprobs"`
	AllowSearchIndices bool        `json:"allow_search_indices"`
	AllowView          bool        `json:"allow_view"`
	AllowFineTuning    bool        `json:"allow_fine_tuning"`
	Organization       string      `json:"organization"`
	Group              interface{} `json:"group"`
	IsBlocking         bool        `json:"is_blocking"`
}

Permission - OpenAPI Permission.

type RequestError

type RequestError struct {
	HTTPStatusCode int
	Err            error
}

RequestError provides informations about generic request errors.

func (*RequestError) Error

func (e *RequestError) Error() string

func (*RequestError) Unwrap

func (e *RequestError) Unwrap() error

type UploadFileRequest

type UploadFileRequest struct {
	// The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
	File string `json:"file" binding:"required"`
	// 	ID of the model to use. Only whisper-1 is currently available.
	Purpose string `json:"purpose" binding:"required"`
}

type VectorStore

type VectorStore struct {
	Id           string            `json:"id"`
	Object       string            `json:"object"`
	CreatedAt    int64             `json:"created_at"`
	Name         string            `json:"name"`
	UsageBytes   int64             `json:"usage_bytes"`
	FileCounts   *FileCounts       `json:"file_counts"`
	Status       string            `json:"status"`
	ExpiresAfter ExpiresAfter      `json:"expires_after"`
	ExpiresAt    int64             `json:"expires_at"`
	LastActiveAt int64             `json:"last_active_at"`
	Metadata     map[string]string `json:"metadata,omitempty"`
}

type VectorStores

type VectorStores struct {
	Object string        `json:"object"`
	Data   []VectorStore `json:"data"`
}

type VectorStoresEndpoint

type VectorStoresEndpoint struct {
	// contains filtered or unexported fields
}

VectorStores Endpoint

Vector stores are used to store files for use by the file_search tool.

func (*VectorStoresEndpoint) CreateVectorStore

func (e *VectorStoresEndpoint) CreateVectorStore(req *CreateVectorStoresRequest) (*VectorStore, error)

Create a vector store.

func (*VectorStoresEndpoint) DeleteVectorStore

func (e *VectorStoresEndpoint) DeleteVectorStore(vectorStoreId string) (*DeletionStatus, error)

Deletes a vector store.

func (*VectorStoresEndpoint) ListVectorStores

func (e *VectorStoresEndpoint) ListVectorStores() ([]VectorStore, error)

Returns a list of vector stores.

func (*VectorStoresEndpoint) ModifyVectorStore

func (e *VectorStoresEndpoint) ModifyVectorStore(vectorStoreId string, req *ModifyVectorStoresRequest) (*VectorStore, error)

Modifies a vector store.

func (*VectorStoresEndpoint) RetrieveVectorStore

func (e *VectorStoresEndpoint) RetrieveVectorStore(vectorStoreId string) (*VectorStore, error)

Retrieves a vector store.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL