Documentation ¶
Index ¶
- type Cohere
- type CohereClient
- type CohereOptions
- type Fake
- type FakeOptions
- type HuggingFaceHub
- func (l *HuggingFaceHub) Callbacks() []schema.Callback
- func (l *HuggingFaceHub) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
- func (l *HuggingFaceHub) InvocationParams() map[string]any
- func (l *HuggingFaceHub) Type() string
- func (l *HuggingFaceHub) Verbose() bool
- type HuggingFaceHubClient
- type HuggingFaceHubOptions
- type LLMContentHandler
- type OpenAI
- type OpenAIClient
- type OpenAIOptions
- type SagemakerEndpoint
- func (l *SagemakerEndpoint) Callbacks() []schema.Callback
- func (l *SagemakerEndpoint) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
- func (l *SagemakerEndpoint) InvocationParams() map[string]any
- func (l *SagemakerEndpoint) Type() string
- func (l *SagemakerEndpoint) Verbose() bool
- type SagemakerEndpointOptions
- type Transformer
- type VertexAI
- func (l *VertexAI) Callbacks() []schema.Callback
- func (l *VertexAI) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
- func (l *VertexAI) InvocationParams() map[string]any
- func (l *VertexAI) Type() string
- func (l *VertexAI) Verbose() bool
- type VertexAIClient
- type VertexAIOptions
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Cohere ¶
func NewCohereFromClient ¶ added in v0.0.31
func NewCohereFromClient(client CohereClient, optFns ...func(o *CohereOptions)) (*Cohere, error)
func (*Cohere) Generate ¶
func (l *Cohere) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided prompt and options.
func (*Cohere) InvocationParams ¶ added in v0.0.27
InvocationParams returns the parameters used in the llm model invocation.
type CohereClient ¶ added in v0.0.31
type CohereClient interface {
Generate(opts cohere.GenerateOptions) (*cohere.GenerateResponse, error)
}
type CohereOptions ¶
type Fake ¶ added in v0.0.14
func (*Fake) Generate ¶ added in v0.0.14
func (l *Fake) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided prompt and options.
func (*Fake) InvocationParams ¶ added in v0.0.27
InvocationParams returns the parameters used in the model invocation.
type FakeOptions ¶ added in v0.0.31
type FakeOptions struct { *schema.CallbackOptions `map:"-"` schema.Tokenizer `map:"-"` }
type HuggingFaceHub ¶ added in v0.0.14
func NewHuggingFaceHub ¶ added in v0.0.14
func NewHuggingFaceHub(apiToken string, optFns ...func(o *HuggingFaceHubOptions)) (*HuggingFaceHub, error)
func NewHuggingFaceHubFromClient ¶ added in v0.0.31
func NewHuggingFaceHubFromClient(client HuggingFaceHubClient, optFns ...func(o *HuggingFaceHubOptions)) (*HuggingFaceHub, error)
func (*HuggingFaceHub) Callbacks ¶ added in v0.0.14
func (l *HuggingFaceHub) Callbacks() []schema.Callback
Callbacks returns the registered callbacks of the model.
func (*HuggingFaceHub) Generate ¶ added in v0.0.14
func (l *HuggingFaceHub) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided prompt and options.
func (*HuggingFaceHub) InvocationParams ¶ added in v0.0.27
func (l *HuggingFaceHub) InvocationParams() map[string]any
InvocationParams returns the parameters used in the model invocation.
func (*HuggingFaceHub) Type ¶ added in v0.0.14
func (l *HuggingFaceHub) Type() string
Type returns the type of the model.
func (*HuggingFaceHub) Verbose ¶ added in v0.0.14
func (l *HuggingFaceHub) Verbose() bool
Verbose returns the verbosity setting of the model.
type HuggingFaceHubClient ¶ added in v0.0.31
type HuggingFaceHubClient interface { TextGeneration(ctx context.Context, req *huggingface.TextGenerationRequest) (huggingface.TextGenerationResponse, error) Text2TextGeneration(ctx context.Context, req *huggingface.Text2TextGenerationRequest) (huggingface.Text2TextGenerationResponse, error) Summarization(ctx context.Context, req *huggingface.SummarizationRequest) (huggingface.SummarizationResponse, error) SetModel(model string) }
type HuggingFaceHubOptions ¶ added in v0.0.14
type LLMContentHandler ¶
type LLMContentHandler struct {
// contains filtered or unexported fields
}
func NewLLMContentHandler ¶
func NewLLMContentHandler(contentType, accept string, transformer Transformer) *LLMContentHandler
func (*LLMContentHandler) Accept ¶
func (ch *LLMContentHandler) Accept() string
func (*LLMContentHandler) ContentType ¶
func (ch *LLMContentHandler) ContentType() string
func (*LLMContentHandler) TransformInput ¶
func (ch *LLMContentHandler) TransformInput(prompt string) ([]byte, error)
func (*LLMContentHandler) TransformOutput ¶
func (ch *LLMContentHandler) TransformOutput(output []byte) (string, error)
type OpenAI ¶
OpenAI is an implementation of the LLM interface for the OpenAI language model.
func NewOpenAI ¶
func NewOpenAI(apiKey string, optFns ...func(o *OpenAIOptions)) (*OpenAI, error)
NewOpenAI creates a new OpenAI instance with the provided API key and options.
func NewOpenAIFromClient ¶ added in v0.0.31
func NewOpenAIFromClient(client OpenAIClient, optFns ...func(o *OpenAIOptions)) (*OpenAI, error)
NewOpenAIFromClient creates a new OpenAI instance with the provided client and options.
func (*OpenAI) Generate ¶
func (l *OpenAI) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided prompt and options.
func (*OpenAI) InvocationParams ¶ added in v0.0.27
InvocationParams returns the parameters used in the model invocation.
type OpenAIClient ¶ added in v0.0.31
type OpenAIClient interface { // CreateCompletionStream creates a streaming completion request with the provided completion request. // It returns a completion stream for receiving streamed completion responses from the OpenAI API. // The `CompletionStream` should be closed after use. CreateCompletionStream(ctx context.Context, request openai.CompletionRequest) (stream *openai.CompletionStream, err error) // CreateCompletion sends a completion request to the OpenAI API and returns the completion response. // It blocks until the response is received from the API. CreateCompletion(ctx context.Context, request openai.CompletionRequest) (response openai.CompletionResponse, err error) }
OpenAIClient represents the interface for interacting with the OpenAI API.
type OpenAIOptions ¶
type OpenAIOptions struct { *schema.CallbackOptions `map:"-"` schema.Tokenizer `map:"-"` // ModelName is the name of the OpenAI language model to use. ModelName string `map:"model_name"` // Temperature is the sampling temperature to use during text generation. Temperatur float32 `map:"temperatur"` // MaxTokens is the maximum number of tokens to generate in the completion. MaxTokens int `map:"max_tokens"` // TopP is the total probability mass of tokens to consider at each step. TopP float32 `map:"top_p"` // PresencePenalty penalizes repeated tokens. PresencePenalty float32 `map:"presence_penalty"` // FrequencyPenalty penalizes repeated tokens according to frequency. FrequencyPenalty float32 `map:"frequency_penalty"` // N is the number of completions to generate for each prompt. N int `map:"n"` // Stream indicates whether to stream the results or not. Stream bool `map:"stream"` }
OpenAIOptions contains options for configuring the OpenAI LLM model.
type SagemakerEndpoint ¶
func NewSagemakerEndpoint ¶
func NewSagemakerEndpoint(client *sagemakerruntime.Client, endpointName string, contenHandler *LLMContentHandler, optFns ...func(o *SagemakerEndpointOptions)) (*SagemakerEndpoint, error)
func (*SagemakerEndpoint) Callbacks ¶
func (l *SagemakerEndpoint) Callbacks() []schema.Callback
Callbacks returns the registered callbacks of the model.
func (*SagemakerEndpoint) Generate ¶
func (l *SagemakerEndpoint) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided prompt and options.
func (*SagemakerEndpoint) InvocationParams ¶ added in v0.0.27
func (l *SagemakerEndpoint) InvocationParams() map[string]any
InvocationParams returns the parameters used in the model invocation.
func (*SagemakerEndpoint) Type ¶
func (l *SagemakerEndpoint) Type() string
Type returns the type of the model.
func (*SagemakerEndpoint) Verbose ¶
func (l *SagemakerEndpoint) Verbose() bool
Verbose returns the verbosity setting of the model.
type SagemakerEndpointOptions ¶
type SagemakerEndpointOptions struct { *schema.CallbackOptions `map:"-"` schema.Tokenizer `map:"-"` }
type Transformer ¶
type Transformer interface { // Transforms the input to a format that model can accept // as the request Body. Should return bytes or seekable file // like object in the format specified in the content_type // request header. TransformInput(prompt string) ([]byte, error) // Transforms the output from the model to string that // the LLM class expects. TransformOutput(output []byte) (string, error) }
type VertexAI ¶ added in v0.0.31
VertexAI represents the VertexAI language model.
func NewVertexAI ¶ added in v0.0.31
func NewVertexAI(client VertexAIClient, endpoint string, optFns ...func(o *VertexAIOptions)) (*VertexAI, error)
NewVertexAI creates a new VertexAI instance with the provided client and endpoint.
func (*VertexAI) Callbacks ¶ added in v0.0.31
Callbacks returns the registered callbacks of the model.
func (*VertexAI) Generate ¶ added in v0.0.31
func (l *VertexAI) Generate(ctx context.Context, prompt string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)
Generate generates text based on the provided prompt and options.
func (*VertexAI) InvocationParams ¶ added in v0.0.31
InvocationParams returns the parameters used in the model invocation.
type VertexAIClient ¶ added in v0.0.31
type VertexAIClient interface { // Predict sends a prediction request to the Vertex AI service. // It takes a context, predict request, and optional call options. // It returns the predict response or an error if the prediction fails. Predict(ctx context.Context, req *aiplatformpb.PredictRequest, opts ...gax.CallOption) (*aiplatformpb.PredictResponse, error) }
VertexAIClient represents the interface for interacting with Vertex AI.
type VertexAIOptions ¶ added in v0.0.31
type VertexAIOptions struct { *schema.CallbackOptions `map:"-"` schema.Tokenizer `map:"-"` // Temperature is the sampling temperature to use during text generation. Temperatur float32 `map:"temperatur"` // MaxOutputTokens determines the maximum amount of text output from one prompt. MaxOutputTokens int `map:"max_output_tokens"` // TopP is the total probability mass of tokens to consider at each step. TopP float32 `map:"top_p"` // TopK determines how the model selects tokens for output. TopK int `map:"top_k"` }
VertexAIOptions contains options for configuring the VertexAI language model.