Documentation ¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type CohereOptions ¶
type CohereOptions struct { *schema.CallbackOptions Model string Temperatur float32 }
type HuggingFaceHub ¶ added in v0.0.14
func NewHuggingFaceHub ¶ added in v0.0.14
func NewHuggingFaceHub(apiToken string, optFns ...func(o *HuggingFaceHubOptions)) (*HuggingFaceHub, error)
func (*HuggingFaceHub) Callbacks ¶ added in v0.0.14
func (l *HuggingFaceHub) Callbacks() []schema.Callback
func (*HuggingFaceHub) Generate ¶ added in v0.0.14
func (l *HuggingFaceHub) Generate(ctx context.Context, prompts []string, optFns ...func(o *schema.GenerateOptions)) (*schema.LLMResult, error)
func (*HuggingFaceHub) Type ¶ added in v0.0.14
func (l *HuggingFaceHub) Type() string
func (*HuggingFaceHub) Verbose ¶ added in v0.0.14
func (l *HuggingFaceHub) Verbose() bool
type HuggingFaceHubOptions ¶ added in v0.0.14
type HuggingFaceHubOptions struct { *schema.CallbackOptions // Model name to use. RepoID string Task string }
type LLMContentHandler ¶
type LLMContentHandler struct {
// contains filtered or unexported fields
}
func NewLLMContentHandler ¶
func NewLLMContentHandler(contentType, accept string, transformer Transformer) *LLMContentHandler
func (*LLMContentHandler) Accept ¶
func (ch *LLMContentHandler) Accept() string
func (*LLMContentHandler) ContentType ¶
func (ch *LLMContentHandler) ContentType() string
func (*LLMContentHandler) TransformInput ¶
func (ch *LLMContentHandler) TransformInput(prompt string) ([]byte, error)
func (*LLMContentHandler) TransformOutput ¶
func (ch *LLMContentHandler) TransformOutput(output []byte) (string, error)
type OpenAIOptions ¶
type OpenAIOptions struct { *schema.CallbackOptions // Model name to use. ModelName string // Sampling temperature to use. Temperatur float32 // The maximum number of tokens to generate in the completion. // -1 returns as many tokens as possible given the prompt and //the models maximal context size. MaxTokens int // Total probability mass of tokens to consider at each step. TopP float32 // Penalizes repeated tokens. PresencePenalty float32 // Penalizes repeated tokens according to frequency. FrequencyPenalty float32 // How many completions to generate for each prompt. N int // Batch size to use when passing multiple documents to generate. BatchSize int // Whether to stream the results or not. Stream bool }
type SagemakerEndpoint ¶
func NewSagemakerEndpoint ¶
func NewSagemakerEndpoint(client *sagemakerruntime.Client, endpointName string, contenHandler *LLMContentHandler) (*SagemakerEndpoint, error)
func (*SagemakerEndpoint) Callbacks ¶
func (l *SagemakerEndpoint) Callbacks() []schema.Callback
func (*SagemakerEndpoint) Generate ¶
func (l *SagemakerEndpoint) Generate(ctx context.Context, prompts []string, optFns ...func(o *schema.GenerateOptions)) (*schema.LLMResult, error)
func (*SagemakerEndpoint) Type ¶
func (l *SagemakerEndpoint) Type() string
func (*SagemakerEndpoint) Verbose ¶
func (l *SagemakerEndpoint) Verbose() bool
type SagemakerEndpointOptions ¶
type SagemakerEndpointOptions struct {
*schema.CallbackOptions
}
type Transformer ¶
type Transformer interface { // Transforms the input to a format that model can accept // as the request Body. Should return bytes or seekable file // like object in the format specified in the content_type // request header. TransformInput(prompt string) ([]byte, error) // Transforms the output from the model to string that // the LLM class expects. TransformOutput(output []byte) (string, error) }
Click to show internal directories.
Click to hide internal directories.