openai

package
v0.0.0-...-03c30b8 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 10, 2025 License: Apache-2.0 Imports: 30 Imported by: 0

Documentation

Index

Constants

View Source
const (
	DataPrefix       = "data: "
	Done             = "[DONE]"
	DataPrefixLength = len(DataPrefix)
)
View Source
const DoNotPatchStreamOptionsIncludeUsageMetaKey = "do_not_patch_stream_options_include_usage"
View Source
const MetaBaseURLNoV1 = "base_url_no_v1"
View Source
const MetaEmbeddingsPatchInputToSlices = "embeddings_input_to_slices"
View Source
const MetaResponseFormat = "response_format"

Variables

View Source
var ModelList = []*model.ModelConfig{
	{
		Model:       "gpt-3.5-turbo",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.022,
		OutputPrice: 0.044,
	},
	{
		Model: "gpt-3.5-turbo-0301",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-3.5-turbo-0613",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-3.5-turbo-1106",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-3.5-turbo-0125",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model:       "gpt-3.5-turbo-16k",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.022,
		OutputPrice: 0.044,
	},
	{
		Model: "gpt-3.5-turbo-16k-0613",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-3.5-turbo-instruct",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model:       "gpt-4",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.22,
		OutputPrice: 0.44,
	},
	{
		Model: "gpt-4-0314",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4-0613",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4-1106-preview",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4-0125-preview",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model:       "gpt-4-32k",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.44,
		OutputPrice: 0.88,
	},
	{
		Model: "gpt-4-32k-0314",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4-32k-0613",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4-turbo-preview",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model:       "gpt-4-turbo",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.071,
		OutputPrice: 0.213,
	},
	{
		Model: "gpt-4-turbo-2024-04-09",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model:       "gpt-4o",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.01775,
		OutputPrice: 0.071,
	},
	{
		Model: "gpt-4o-2024-05-13",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4o-2024-08-06",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "chatgpt-4o-latest",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model:       "gpt-4o-mini",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.001065,
		OutputPrice: 0.00426,
	},
	{
		Model: "gpt-4o-mini-2024-07-18",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4-vision-preview",
		Type:  relaymode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model:       "o1-mini",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.0213,
		OutputPrice: 0.0852,
	},
	{
		Model:       "o1-preview",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerOpenAI,
		InputPrice:  0.1065,
		OutputPrice: 0.426,
	},
	{
		Model: "text-embedding-ada-002",
		Type:  relaymode.Embeddings,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-embedding-3-small",
		Type:  relaymode.Embeddings,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-embedding-3-large",
		Type:  relaymode.Embeddings,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-curie-001",
		Type:  relaymode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-babbage-001",
		Type:  relaymode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-ada-001",
		Type:  relaymode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-davinci-002",
		Type:  relaymode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-davinci-003",
		Type:  relaymode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-moderation-latest",
		Type:  relaymode.Moderations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-moderation-stable",
		Type:  relaymode.Moderations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-davinci-edit-001",
		Type:  relaymode.Edits,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "davinci-002",
		Type:  relaymode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "babbage-002",
		Type:  relaymode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "dall-e-2",
		Type:  relaymode.ImagesGenerations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "dall-e-3",
		Type:  relaymode.ImagesGenerations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "whisper-1",
		Type:  relaymode.AudioTranscription,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1",
		Type:  relaymode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1-1106",
		Type:  relaymode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1-hd",
		Type:  relaymode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1-hd-1106",
		Type:  relaymode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
}

Functions

func ConvertEmbeddingsRequest

func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)

func ConvertImageRequest

func ConvertImageRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)

func ConvertRequest

func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)

func ConvertRerankRequest

func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)

func ConvertSTTRequest

func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error)

func ConvertTTSRequest

func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)

func ConvertTextRequest

func ConvertTextRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)

func CountToken

func CountToken(text string) int

func CountTokenInput

func CountTokenInput(input any, model string) int

func CountTokenMessages

func CountTokenMessages(messages []*model.Message, model string) int

func CountTokenText

func CountTokenText(text string, model string) int

func DoResponse

func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)

func ErrorWrapper

func ErrorWrapper(err error, code any, statusCode int) *relaymodel.ErrorWithStatusCode

func ErrorWrapperWithMessage

func ErrorWrapperWithMessage(message string, code any, statusCode int) *relaymodel.ErrorWithStatusCode

func GetBalance

func GetBalance(channel *model.Channel) (float64, error)

func GetFullRequestURL

func GetFullRequestURL(baseURL string, requestURL string) string

func GetPromptTokens

func GetPromptTokens(meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest) int

func Handler

func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

func ImageHandler

func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

func ModerationsHandler

func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

func RerankHandler

func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

func ResponseText2Usage

func ResponseText2Usage(responseText string, modeName string, promptTokens int) *model.Usage

func STTHandler

func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

func StreamHandler

func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

Types

type Adaptor

type Adaptor struct{}

func (*Adaptor) ConvertRequest

func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)

func (*Adaptor) DoRequest

func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)

func (*Adaptor) DoResponse

func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)

func (*Adaptor) GetBalance

func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error)

func (*Adaptor) GetChannelName

func (a *Adaptor) GetChannelName() string

func (*Adaptor) GetModelList

func (a *Adaptor) GetModelList() []*model.ModelConfig

func (*Adaptor) GetRequestURL

func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)

func (*Adaptor) SetupRequestHeader

func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error

type ChatCompletionsStreamResponse

type ChatCompletionsStreamResponse struct {
	Usage   *model.Usage                           `json:"usage,omitempty"`
	ID      string                                 `json:"id"`
	Object  string                                 `json:"object"`
	Model   string                                 `json:"model"`
	Choices []*ChatCompletionsStreamResponseChoice `json:"choices"`
	Created int64                                  `json:"created"`
}

type ChatCompletionsStreamResponseChoice

type ChatCompletionsStreamResponseChoice struct {
	FinishReason *string       `json:"finish_reason,omitempty"`
	Delta        model.Message `json:"delta"`
	Index        int           `json:"index"`
}

type ChatRequest

type ChatRequest struct {
	Model     string           `json:"model"`
	Messages  []*model.Message `json:"messages"`
	MaxTokens int              `json:"max_tokens"`
}

type CompletionsStreamResponse

type CompletionsStreamResponse struct {
	Choices []*struct {
		Text         string `json:"text"`
		FinishReason string `json:"finish_reason"`
	} `json:"choices"`
}

type EmbeddingResponse

type EmbeddingResponse struct {
	Object      string                   `json:"object"`
	Model       string                   `json:"model"`
	Data        []*EmbeddingResponseItem `json:"data"`
	model.Usage `json:"usage"`
}

type EmbeddingResponseItem

type EmbeddingResponseItem struct {
	Object    string    `json:"object"`
	Embedding []float64 `json:"embedding"`
	Index     int       `json:"index"`
}

type ErrorResp

type ErrorResp struct {
	Error model.Error `json:"error"`
}

type ImageContent

type ImageContent struct {
	ImageURL *model.ImageURL `json:"image_url,omitempty"`
	Type     string          `json:"type,omitempty"`
}

type ImageData

type ImageData struct {
	URL           string `json:"url,omitempty"`
	B64Json       string `json:"b64_json,omitempty"`
	RevisedPrompt string `json:"revised_prompt,omitempty"`
}

type ImageRequest

type ImageRequest struct {
	Model          string `json:"model"`
	Prompt         string `binding:"required"               json:"prompt"`
	Size           string `json:"size,omitempty"`
	Quality        string `json:"quality,omitempty"`
	ResponseFormat string `json:"response_format,omitempty"`
	Style          string `json:"style,omitempty"`
	User           string `json:"user,omitempty"`
	N              int    `json:"n,omitempty"`
}

ImageRequest docs: https://platform.openai.com/docs/api-reference/images/create

type ImageResponse

type ImageResponse struct {
	Data    []*ImageData `json:"data"`
	Created int64        `json:"created"`
}

type Segment

type Segment struct {
	Text             string  `json:"text"`
	Tokens           []int   `json:"tokens"`
	ID               int     `json:"id"`
	Seek             int     `json:"seek"`
	Start            float64 `json:"start"`
	End              float64 `json:"end"`
	Temperature      float64 `json:"temperature"`
	AvgLogprob       float64 `json:"avg_logprob"`
	CompressionRatio float64 `json:"compression_ratio"`
	NoSpeechProb     float64 `json:"no_speech_prob"`
}

type SlimRerankResponse

type SlimRerankResponse struct {
	Meta model.RerankMeta `json:"meta"`
}

type SlimTextResponse

type SlimTextResponse struct {
	Error   model.Error           `json:"error"`
	Choices []*TextResponseChoice `json:"choices"`
	Usage   model.Usage           `json:"usage"`
}

type SubscriptionResponse

type SubscriptionResponse struct {
	Object             string  `json:"object"`
	HasPaymentMethod   bool    `json:"has_payment_method"`
	SoftLimitUSD       float64 `json:"soft_limit_usd"`
	HardLimitUSD       float64 `json:"hard_limit_usd"`
	SystemHardLimitUSD float64 `json:"system_hard_limit_usd"`
	AccessUntil        int64   `json:"access_until"`
}

type TextContent

type TextContent struct {
	Type string `json:"type,omitempty"`
	Text string `json:"text,omitempty"`
}

type TextRequest

type TextRequest struct {
	Model     string           `json:"model"`
	Prompt    string           `json:"prompt"`
	Messages  []*model.Message `json:"messages"`
	MaxTokens int              `json:"max_tokens"`
}

type TextResponse

type TextResponse struct {
	ID          string                `json:"id"`
	Model       string                `json:"model,omitempty"`
	Object      string                `json:"object"`
	Choices     []*TextResponseChoice `json:"choices"`
	model.Usage `json:"usage"`
	Created     int64 `json:"created"`
}

type TextResponseChoice

type TextResponseChoice struct {
	FinishReason string        `json:"finish_reason"`
	Message      model.Message `json:"message"`
	Index        int           `json:"index"`
}

type UsageAndChoicesResponse

type UsageAndChoicesResponse struct {
	Usage   *model.Usage
	Choices []*ChatCompletionsStreamResponseChoice
}

type UsageOrResponseText

type UsageOrResponseText struct {
	*model.Usage
	ResponseText string
}

type UsageResponse

type UsageResponse struct {
	Object string `json:"object"`
	// DailyCosts []OpenAIUsageDailyCost `json:"daily_costs"`
	TotalUsage float64 `json:"total_usage"` // unit: 0.01 dollar
}

type WhisperJSONResponse

type WhisperJSONResponse struct {
	Text string `json:"text,omitempty"`
}

type WhisperVerboseJSONResponse

type WhisperVerboseJSONResponse struct {
	Task     string     `json:"task,omitempty"`
	Language string     `json:"language,omitempty"`
	Text     string     `json:"text,omitempty"`
	Segments []*Segment `json:"segments,omitempty"`
	Duration float64    `json:"duration,omitempty"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL