gemini

package
v0.0.0-...-d017c07 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 21, 2025 License: Apache-2.0 Imports: 26 Imported by: 0

Documentation

Index

Constants

View Source
const (
	VisionMaxImageNum = 16
)

Variables

View Source
var ModelList = []*model.ModelConfig{
	{
		Model:       "gemini-1.5-pro",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerGoogle,
		InputPrice:  0.0025,
		OutputPrice: 0.01,
		RPM:         600,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(2097152),
			model.WithModelConfigMaxOutputTokens(8192),
			model.WithModelConfigToolChoice(true),
			model.WithModelConfigVision(true),
		),
	},
	{
		Model:       "gemini-1.5-flash",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerGoogle,
		InputPrice:  0.00015,
		OutputPrice: 0.0006,
		RPM:         600,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(1048576),
			model.WithModelConfigMaxOutputTokens(8192),
			model.WithModelConfigToolChoice(true),
			model.WithModelConfigVision(true),
		),
	},
	{
		Model:       "gemini-1.5-flash-8b",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerGoogle,
		InputPrice:  0.000075,
		OutputPrice: 0.0003,
		RPM:         600,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(1048576),
			model.WithModelConfigMaxOutputTokens(8192),
			model.WithModelConfigToolChoice(true),
			model.WithModelConfigVision(true),
		),
	},
	{
		Model:       "gemini-2.0-flash",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerGoogle,
		InputPrice:  0.0001,
		OutputPrice: 0.0004,
		RPM:         600,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(1048576),
			model.WithModelConfigMaxOutputTokens(8192),
			model.WithModelConfigToolChoice(true),
			model.WithModelConfigVision(true),
		),
	},
	{
		Model:       "gemini-2.0-flash-lite-preview",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerGoogle,
		InputPrice:  0.000075,
		OutputPrice: 0.0003,
		RPM:         600,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(1048576),
			model.WithModelConfigMaxOutputTokens(8192),
			model.WithModelConfigToolChoice(true),
			model.WithModelConfigVision(true),
		),
	},
	{
		Model:       "gemini-2.0-flash-thinking-exp",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerGoogle,
		InputPrice:  0.0001,
		OutputPrice: 0.0004,
		RPM:         600,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(1048576),
			model.WithModelConfigMaxOutputTokens(8192),
			model.WithModelConfigVision(true),
		),
	},
	{
		Model:       "gemini-2.0-pro-exp",
		Type:        relaymode.ChatCompletions,
		Owner:       model.ModelOwnerGoogle,
		InputPrice:  0.0025,
		OutputPrice: 0.01,
		RPM:         600,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(2097152),
			model.WithModelConfigMaxOutputTokens(8192),
			model.WithModelConfigToolChoice(true),
			model.WithModelConfigVision(true),
		),
	},

	{
		Model:      "text-embedding-004",
		Type:       relaymode.Embeddings,
		Owner:      model.ModelOwnerGoogle,
		InputPrice: 0.0001,
		RPM:        1500,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(2048),
			model.WithModelConfigMaxOutputTokens(768),
		),
	},
}

Functions

func ConvertEmbeddingRequest

func ConvertEmbeddingRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

func ConvertRequest

func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

Setting safety to the lowest possible values since Gemini is already powerless enough

func EmbeddingHandler

func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

func Handler

func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

func StreamHandler

func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *model.ErrorWithStatusCode)

Types

type Adaptor

type Adaptor struct{}

func (*Adaptor) ConvertRequest

func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

func (*Adaptor) DoRequest

func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)

func (*Adaptor) DoResponse

func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *relaymodel.Usage, err *relaymodel.ErrorWithStatusCode)

func (*Adaptor) GetBaseURL

func (a *Adaptor) GetBaseURL() string

func (*Adaptor) GetChannelName

func (a *Adaptor) GetChannelName() string

func (*Adaptor) GetModelList

func (a *Adaptor) GetModelList() []*model.ModelConfig

func (*Adaptor) GetRequestURL

func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)

func (*Adaptor) SetupRequestHeader

func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error

type BatchEmbeddingRequest

type BatchEmbeddingRequest struct {
	Requests []EmbeddingRequest `json:"requests"`
}

type ChatCandidate

type ChatCandidate struct {
	FinishReason  string             `json:"finishReason"`
	Content       ChatContent        `json:"content"`
	SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
	Index         int64              `json:"index"`
}

type ChatContent

type ChatContent struct {
	Role  string `json:"role,omitempty"`
	Parts []Part `json:"parts"`
}

type ChatGenerationConfig

type ChatGenerationConfig struct {
	ResponseSchema   any      `json:"responseSchema,omitempty"`
	Temperature      *float64 `json:"temperature,omitempty"`
	TopP             *float64 `json:"topP,omitempty"`
	ResponseMimeType string   `json:"responseMimeType,omitempty"`
	StopSequences    []string `json:"stopSequences,omitempty"`
	TopK             float64  `json:"topK,omitempty"`
	MaxOutputTokens  int      `json:"maxOutputTokens,omitempty"`
	CandidateCount   int      `json:"candidateCount,omitempty"`
}

type ChatPromptFeedback

type ChatPromptFeedback struct {
	SafetyRatings []ChatSafetyRating `json:"safetyRatings"`
}

type ChatRequest

type ChatRequest struct {
	Contents          []*ChatContent        `json:"contents"`
	SystemInstruction *ChatContent          `json:"system_instruction,omitempty"`
	SafetySettings    []ChatSafetySettings  `json:"safety_settings,omitempty"`
	GenerationConfig  *ChatGenerationConfig `json:"generation_config,omitempty"`
	Tools             []ChatTools           `json:"tools,omitempty"`
	ToolConfig        *ToolConfig           `json:"tool_config,omitempty"`
}

type ChatResponse

type ChatResponse struct {
	Candidates     []*ChatCandidate   `json:"candidates"`
	PromptFeedback ChatPromptFeedback `json:"promptFeedback"`
	UsageMetadata  *UsageMetadata     `json:"usageMetadata"`
	ModelVersion   string             `json:"modelVersion"`
}

func (*ChatResponse) GetResponseText

func (g *ChatResponse) GetResponseText() string

type ChatSafetyRating

type ChatSafetyRating struct {
	Category    string `json:"category"`
	Probability string `json:"probability"`
}

type ChatSafetySettings

type ChatSafetySettings struct {
	Category  string `json:"category"`
	Threshold string `json:"threshold"`
}

type ChatTools

type ChatTools struct {
	FunctionDeclarations any `json:"function_declarations,omitempty"`
}

type CountTokensResponse

type CountTokensResponse struct {
	Error       *Error `json:"error,omitempty"`
	TotalTokens int    `json:"totalTokens"`
}

type EmbeddingData

type EmbeddingData struct {
	Values []float64 `json:"values"`
}

type EmbeddingRequest

type EmbeddingRequest struct {
	Model                string      `json:"model"`
	TaskType             string      `json:"taskType,omitempty"`
	Title                string      `json:"title,omitempty"`
	Content              ChatContent `json:"content"`
	OutputDimensionality int         `json:"outputDimensionality,omitempty"`
}

type EmbeddingResponse

type EmbeddingResponse struct {
	Error      *Error          `json:"error,omitempty"`
	Embeddings []EmbeddingData `json:"embeddings"`
}

type Error

type Error struct {
	Message string `json:"message,omitempty"`
	Status  string `json:"status,omitempty"`
	Code    int    `json:"code,omitempty"`
}

type FunctionCall

type FunctionCall struct {
	Args map[string]any `json:"args"`
	Name string         `json:"name"`
}

type FunctionCallingConfig

type FunctionCallingConfig struct {
	Mode                 string   `json:"mode,omitempty"`
	AllowedFunctionNames []string `json:"allowed_function_names,omitempty"`
}

type FunctionResponse

type FunctionResponse struct {
	Name     string `json:"name"`
	Response struct {
		Name    string         `json:"name"`
		Content map[string]any `json:"content"`
	} `json:"response"`
}

type InlineData

type InlineData struct {
	MimeType string `json:"mimeType"`
	Data     string `json:"data"`
}

type Part

type Part struct {
	InlineData       *InlineData       `json:"inlineData,omitempty"`
	FunctionCall     *FunctionCall     `json:"functionCall,omitempty"`
	FunctionResponse *FunctionResponse `json:"functionResponse,omitempty"`
	Text             string            `json:"text,omitempty"`
}

type ToolConfig

type ToolConfig struct {
	FunctionCallingConfig FunctionCallingConfig `json:"function_calling_config"`
}

type UsageMetadata

type UsageMetadata struct {
	PromptTokenCount     int `json:"promptTokenCount"`
	CandidatesTokenCount int `json:"candidatesTokenCount"`
	TotalTokenCount      int `json:"totalTokenCount"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL