Documentation
¶
Index ¶
- Constants
- Variables
- func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)
- type Adaptor
- func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)
- func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
- func (a *Adaptor) GetBaseURL() string
- func (a *Adaptor) GetChannelName() string
- func (a *Adaptor) GetModelList() []*model.ModelConfig
- func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)
- func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error
- type BatchEmbeddingRequest
- type ChatCandidate
- type ChatContent
- type ChatGenerationConfig
- type ChatPromptFeedback
- type ChatRequest
- type ChatResponse
- type ChatSafetyRating
- type ChatSafetySettings
- type ChatTools
- type CountTokensResponse
- type EmbeddingData
- type EmbeddingRequest
- type EmbeddingResponse
- type Error
- type FunctionCall
- type FunctionCallingConfig
- type FunctionResponse
- type InlineData
- type Part
- type ToolConfig
- type UsageMetadata
Constants ¶
View Source
const (
VisionMaxImageNum = 16
)
Variables ¶
View Source
var ModelList = []*model.ModelConfig{ { Model: "gemini-1.5-pro", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, InputPrice: 0.0025, OutputPrice: 0.01, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(2097152), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-1.5-flash", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, InputPrice: 0.00015, OutputPrice: 0.0006, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-1.5-flash-8b", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, InputPrice: 0.000075, OutputPrice: 0.0003, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-flash", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, InputPrice: 0.0001, OutputPrice: 0.0004, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-flash-lite-preview", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, InputPrice: 0.000075, OutputPrice: 0.0003, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-flash-thinking-exp", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, InputPrice: 0.0001, OutputPrice: 0.0004, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(1048576), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigVision(true), ), }, { Model: "gemini-2.0-pro-exp", Type: relaymode.ChatCompletions, Owner: model.ModelOwnerGoogle, InputPrice: 0.0025, OutputPrice: 0.01, RPM: 600, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(2097152), model.WithModelConfigMaxOutputTokens(8192), model.WithModelConfigToolChoice(true), model.WithModelConfigVision(true), ), }, { Model: "text-embedding-004", Type: relaymode.Embeddings, Owner: model.ModelOwnerGoogle, InputPrice: 0.0001, RPM: 1500, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(2048), model.WithModelConfigMaxOutputTokens(768), ), }, }
Functions ¶
func ConvertEmbeddingRequest ¶
func ConvertRequest ¶
Setting safety to the lowest possible values since Gemini is already powerless enough
func EmbeddingHandler ¶
Types ¶
type Adaptor ¶
type Adaptor struct{}
func (*Adaptor) ConvertRequest ¶
func (*Adaptor) DoResponse ¶
func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)
func (*Adaptor) GetBaseURL ¶
func (*Adaptor) GetChannelName ¶
func (*Adaptor) GetModelList ¶
func (a *Adaptor) GetModelList() []*model.ModelConfig
type BatchEmbeddingRequest ¶
type BatchEmbeddingRequest struct {
Requests []EmbeddingRequest `json:"requests"`
}
type ChatCandidate ¶
type ChatCandidate struct { FinishReason string `json:"finishReason"` Content ChatContent `json:"content"` SafetyRatings []ChatSafetyRating `json:"safetyRatings"` Index int64 `json:"index"` }
type ChatContent ¶
type ChatGenerationConfig ¶
type ChatGenerationConfig struct { ResponseSchema any `json:"responseSchema,omitempty"` Temperature *float64 `json:"temperature,omitempty"` TopP *float64 `json:"topP,omitempty"` ResponseMimeType string `json:"responseMimeType,omitempty"` StopSequences []string `json:"stopSequences,omitempty"` TopK float64 `json:"topK,omitempty"` MaxOutputTokens int `json:"maxOutputTokens,omitempty"` CandidateCount int `json:"candidateCount,omitempty"` }
type ChatPromptFeedback ¶
type ChatPromptFeedback struct {
SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
}
type ChatRequest ¶
type ChatRequest struct { Contents []*ChatContent `json:"contents"` SystemInstruction *ChatContent `json:"system_instruction,omitempty"` SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"` GenerationConfig *ChatGenerationConfig `json:"generation_config,omitempty"` Tools []ChatTools `json:"tools,omitempty"` ToolConfig *ToolConfig `json:"tool_config,omitempty"` }
type ChatResponse ¶
type ChatResponse struct { Candidates []*ChatCandidate `json:"candidates"` PromptFeedback ChatPromptFeedback `json:"promptFeedback"` UsageMetadata *UsageMetadata `json:"usageMetadata"` ModelVersion string `json:"modelVersion"` }
func (*ChatResponse) GetResponseText ¶
func (g *ChatResponse) GetResponseText() string
type ChatSafetyRating ¶
type ChatSafetySettings ¶
type ChatTools ¶
type ChatTools struct {
FunctionDeclarations any `json:"function_declarations,omitempty"`
}
type CountTokensResponse ¶
type EmbeddingData ¶
type EmbeddingData struct {
Values []float64 `json:"values"`
}
type EmbeddingRequest ¶
type EmbeddingRequest struct { Model string `json:"model"` TaskType string `json:"taskType,omitempty"` Title string `json:"title,omitempty"` Content ChatContent `json:"content"` OutputDimensionality int `json:"outputDimensionality,omitempty"` }
type EmbeddingResponse ¶
type EmbeddingResponse struct { Error *Error `json:"error,omitempty"` Embeddings []EmbeddingData `json:"embeddings"` }
type FunctionCall ¶
type FunctionCallingConfig ¶
type FunctionResponse ¶
type InlineData ¶
type Part ¶
type Part struct { InlineData *InlineData `json:"inlineData,omitempty"` FunctionCall *FunctionCall `json:"functionCall,omitempty"` FunctionResponse *FunctionResponse `json:"functionResponse,omitempty"` Text string `json:"text,omitempty"` }
type ToolConfig ¶
type ToolConfig struct {
FunctionCallingConfig FunctionCallingConfig `json:"function_calling_config"`
}
type UsageMetadata ¶
Click to show internal directories.
Click to hide internal directories.