Documentation ¶
Index ¶
- Constants
- Variables
- func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)
- func ConvertImageRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)
- func ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)
- func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)
- func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (http.Header, io.Reader, error)
- func ConvertTTSRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)
- func ConvertTextRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)
- func CountToken(text string) int
- func CountTokenInput(input any, model string) int
- func CountTokenMessages(messages []*model.Message, model string) int
- func CountTokenText(text string, model string) int
- func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
- func ErrorWrapper(err error, code any, statusCode int) *relaymodel.ErrorWithStatusCode
- func ErrorWrapperWithMessage(message string, code any, statusCode int) *relaymodel.ErrorWithStatusCode
- func GetBalance(channel *model.Channel) (float64, error)
- func GetFullRequestURL(baseURL string, requestURL string) string
- func GetPromptTokens(meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest) int
- func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func ResponseText2Usage(responseText string, modeName string, promptTokens int) *model.Usage
- func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode)
- type Adaptor
- func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (http.Header, io.Reader, error)
- func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)
- func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
- func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error)
- func (a *Adaptor) GetChannelName() string
- func (a *Adaptor) GetModelList() []*model.ModelConfig
- func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)
- func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error
- type ChatCompletionsStreamResponse
- type ChatCompletionsStreamResponseChoice
- type ChatRequest
- type CompletionsStreamResponse
- type EmbeddingResponse
- type EmbeddingResponseItem
- type ErrorResp
- type ImageContent
- type ImageData
- type ImageRequest
- type ImageResponse
- type Segment
- type SlimRerankResponse
- type SlimTextResponse
- type SubscriptionResponse
- type TextContent
- type TextRequest
- type TextResponse
- type TextResponseChoice
- type UsageAndChoicesResponse
- type UsageOrResponseText
- type UsageResponse
- type WhisperJSONResponse
- type WhisperVerboseJSONResponse
Constants ¶
View Source
const ( DataPrefix = "data: " Done = "[DONE]" DataPrefixLength = len(DataPrefix) )
View Source
const DoNotPatchStreamOptionsIncludeUsageMetaKey = "do_not_patch_stream_options_include_usage"
View Source
const MetaBaseURLNoV1 = "base_url_no_v1"
View Source
const MetaEmbeddingsPatchInputToSlices = "embeddings_input_to_slices"
View Source
const MetaResponseFormat = "response_format"
Variables ¶
View Source
var ModelList = []*model.ModelConfig{ { Model: "gpt-3.5-turbo", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.022, OutputPrice: 0.044, }, { Model: "gpt-3.5-turbo-0301", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-3.5-turbo-0613", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-3.5-turbo-1106", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-3.5-turbo-0125", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-3.5-turbo-16k", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.022, OutputPrice: 0.044, }, { Model: "gpt-3.5-turbo-16k-0613", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-3.5-turbo-instruct", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.22, OutputPrice: 0.44, }, { Model: "gpt-4-0314", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-0613", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-1106-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-0125-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-32k", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.44, OutputPrice: 0.88, }, { Model: "gpt-4-32k-0314", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-32k-0613", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-turbo-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-turbo", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.071, OutputPrice: 0.213, }, { Model: "gpt-4-turbo-2024-04-09", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4o", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.01775, OutputPrice: 0.071, }, { Model: "gpt-4o-2024-05-13", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4o-2024-08-06", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "chatgpt-4o-latest", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4o-mini", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.001065, OutputPrice: 0.00426, }, { Model: "gpt-4o-mini-2024-07-18", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4-vision-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "o1-mini", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.0213, OutputPrice: 0.0852, }, { Model: "o1-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerOpenAI, InputPrice: 0.1065, OutputPrice: 0.426, }, { Model: "text-embedding-ada-002", Type: relaymode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-embedding-3-small", Type: relaymode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-embedding-3-large", Type: relaymode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-curie-001", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-babbage-001", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-ada-001", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-002", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-003", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-moderation-latest", Type: relaymode.Moderations, Owner: model.ModelOwnerOpenAI, }, { Model: "text-moderation-stable", Type: relaymode.Moderations, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-edit-001", Type: relaymode.Edits, Owner: model.ModelOwnerOpenAI, }, { Model: "davinci-002", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "babbage-002", Type: relaymode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "dall-e-2", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerOpenAI, }, { Model: "dall-e-3", Type: relaymode.ImagesGenerations, Owner: model.ModelOwnerOpenAI, }, { Model: "whisper-1", Type: relaymode.AudioTranscription, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-1106", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-hd", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-hd-1106", Type: relaymode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, }
Functions ¶
func ConvertImageRequest ¶
func ConvertRequest ¶
func ConvertRerankRequest ¶
func ConvertSTTRequest ¶
func ConvertTTSRequest ¶
func ConvertTextRequest ¶
func CountToken ¶
func CountTokenInput ¶
func CountTokenText ¶
func DoResponse ¶
func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
func ErrorWrapper ¶
func ErrorWrapper(err error, code any, statusCode int) *relaymodel.ErrorWithStatusCode
func ErrorWrapperWithMessage ¶
func ErrorWrapperWithMessage(message string, code any, statusCode int) *relaymodel.ErrorWithStatusCode
func GetFullRequestURL ¶
func GetPromptTokens ¶
func GetPromptTokens(meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest) int
func ImageHandler ¶
func ModerationsHandler ¶
func RerankHandler ¶
func ResponseText2Usage ¶
func STTHandler ¶
func StreamHandler ¶
func TTSHandler ¶
func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*relaymodel.Usage, *relaymodel.ErrorWithStatusCode)
Types ¶
type Adaptor ¶
type Adaptor struct{}
func (*Adaptor) ConvertRequest ¶
func (*Adaptor) DoResponse ¶
func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
func (*Adaptor) GetChannelName ¶
func (*Adaptor) GetModelList ¶
func (a *Adaptor) GetModelList() []*model.ModelConfig
type ChatRequest ¶
type EmbeddingResponse ¶
type EmbeddingResponse struct { Object string `json:"object"` Model string `json:"model"` Data []*EmbeddingResponseItem `json:"data"` model.Usage `json:"usage"` }
type EmbeddingResponseItem ¶
type ImageContent ¶
type ImageRequest ¶
type ImageRequest struct { Model string `json:"model"` Prompt string `binding:"required" json:"prompt"` Size string `json:"size,omitempty"` Quality string `json:"quality,omitempty"` ResponseFormat string `json:"response_format,omitempty"` Style string `json:"style,omitempty"` User string `json:"user,omitempty"` N int `json:"n,omitempty"` }
ImageRequest docs: https://platform.openai.com/docs/api-reference/images/create
type ImageResponse ¶
type Segment ¶
type Segment struct { Text string `json:"text"` Tokens []int `json:"tokens"` ID int `json:"id"` Seek int `json:"seek"` Start float64 `json:"start"` End float64 `json:"end"` Temperature float64 `json:"temperature"` AvgLogprob float64 `json:"avg_logprob"` CompressionRatio float64 `json:"compression_ratio"` NoSpeechProb float64 `json:"no_speech_prob"` }
type SlimRerankResponse ¶
type SlimRerankResponse struct {
Meta model.RerankMeta `json:"meta"`
}
type SlimTextResponse ¶
type SlimTextResponse struct { Error model.Error `json:"error"` Choices []*TextResponseChoice `json:"choices"` Usage model.Usage `json:"usage"` }
type SubscriptionResponse ¶
type SubscriptionResponse struct { Object string `json:"object"` HasPaymentMethod bool `json:"has_payment_method"` SoftLimitUSD float64 `json:"soft_limit_usd"` HardLimitUSD float64 `json:"hard_limit_usd"` SystemHardLimitUSD float64 `json:"system_hard_limit_usd"` AccessUntil int64 `json:"access_until"` }
type TextContent ¶
type TextRequest ¶
type TextResponse ¶
type TextResponseChoice ¶
type UsageAndChoicesResponse ¶
type UsageAndChoicesResponse struct { Usage *model.Usage Choices []*ChatCompletionsStreamResponseChoice }
type UsageOrResponseText ¶
type UsageResponse ¶
type WhisperJSONResponse ¶
type WhisperJSONResponse struct {
Text string `json:"text,omitempty"`
}
Click to show internal directories.
Click to hide internal directories.