llm

package
v0.0.26 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 28, 2023 License: MIT Imports: 13 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type Cohere

type Cohere struct {
	schema.Tokenizer
	// contains filtered or unexported fields
}

func NewCohere

func NewCohere(apiKey string, optFns ...func(o *CohereOptions)) (*Cohere, error)

func (*Cohere) Callbacks

func (l *Cohere) Callbacks() []schema.Callback

func (*Cohere) Generate

func (l *Cohere) Generate(ctx context.Context, prompts []string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)

func (*Cohere) Type

func (l *Cohere) Type() string

func (*Cohere) Verbose

func (l *Cohere) Verbose() bool

type CohereOptions

type CohereOptions struct {
	*schema.CallbackOptions
	Tokenizer  schema.Tokenizer
	Model      string
	Temperatur float32
}

type Fake added in v0.0.14

type Fake struct {
	schema.Tokenizer
	// contains filtered or unexported fields
}

func NewFake added in v0.0.14

func NewFake(response string) *Fake

func (*Fake) Callbacks added in v0.0.14

func (l *Fake) Callbacks() []schema.Callback

func (*Fake) Generate added in v0.0.14

func (l *Fake) Generate(ctx context.Context, prompts []string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)

func (*Fake) Type added in v0.0.14

func (l *Fake) Type() string

func (*Fake) Verbose added in v0.0.14

func (l *Fake) Verbose() bool

type HuggingFaceHub added in v0.0.14

type HuggingFaceHub struct {
	schema.Tokenizer
	// contains filtered or unexported fields
}

func NewHuggingFaceHub added in v0.0.14

func NewHuggingFaceHub(apiToken string, optFns ...func(o *HuggingFaceHubOptions)) (*HuggingFaceHub, error)

func (*HuggingFaceHub) Callbacks added in v0.0.14

func (l *HuggingFaceHub) Callbacks() []schema.Callback

func (*HuggingFaceHub) Generate added in v0.0.14

func (l *HuggingFaceHub) Generate(ctx context.Context, prompts []string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)

func (*HuggingFaceHub) Type added in v0.0.14

func (l *HuggingFaceHub) Type() string

func (*HuggingFaceHub) Verbose added in v0.0.14

func (l *HuggingFaceHub) Verbose() bool

type HuggingFaceHubOptions added in v0.0.14

type HuggingFaceHubOptions struct {
	*schema.CallbackOptions
	Tokenizer schema.Tokenizer
	huggingface.HTTPClient
	// Model name to use.
	Model string
	Task  string
}

type LLMContentHandler

type LLMContentHandler struct {
	// contains filtered or unexported fields
}

func NewLLMContentHandler

func NewLLMContentHandler(contentType, accept string, transformer Transformer) *LLMContentHandler

func (*LLMContentHandler) Accept

func (ch *LLMContentHandler) Accept() string

func (*LLMContentHandler) ContentType

func (ch *LLMContentHandler) ContentType() string

func (*LLMContentHandler) TransformInput

func (ch *LLMContentHandler) TransformInput(prompt string) ([]byte, error)

func (*LLMContentHandler) TransformOutput

func (ch *LLMContentHandler) TransformOutput(output []byte) (string, error)

type OpenAI

type OpenAI struct {
	schema.Tokenizer
	// contains filtered or unexported fields
}

func NewOpenAI

func NewOpenAI(apiKey string, optFns ...func(o *OpenAIOptions)) (*OpenAI, error)

func (*OpenAI) Callbacks

func (l *OpenAI) Callbacks() []schema.Callback

func (*OpenAI) Generate

func (l *OpenAI) Generate(ctx context.Context, prompts []string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)

func (*OpenAI) Type

func (l *OpenAI) Type() string

func (*OpenAI) Verbose

func (l *OpenAI) Verbose() bool

type OpenAIOptions

type OpenAIOptions struct {
	*schema.CallbackOptions
	schema.Tokenizer
	// Model name to use.
	ModelName string
	// Sampling temperature to use.
	Temperatur float32
	// The maximum number of tokens to generate in the completion.
	// -1 returns as many tokens as possible given the prompt and
	//the models maximal context size.
	MaxTokens int
	// Total probability mass of tokens to consider at each step.
	TopP float32
	// Penalizes repeated tokens.
	PresencePenalty float32
	// Penalizes repeated tokens according to frequency.
	FrequencyPenalty float32
	// How many completions to generate for each prompt.
	N int
	// Batch size to use when passing multiple documents to generate.
	BatchSize int
	// Whether to stream the results or not.
	Stream bool
}

type SagemakerEndpoint

type SagemakerEndpoint struct {
	schema.Tokenizer
	// contains filtered or unexported fields
}

func NewSagemakerEndpoint

func NewSagemakerEndpoint(client *sagemakerruntime.Client, endpointName string, contenHandler *LLMContentHandler, optFns ...func(o *SagemakerEndpointOptions)) (*SagemakerEndpoint, error)

func (*SagemakerEndpoint) Callbacks

func (l *SagemakerEndpoint) Callbacks() []schema.Callback

func (*SagemakerEndpoint) Generate

func (l *SagemakerEndpoint) Generate(ctx context.Context, prompts []string, optFns ...func(o *schema.GenerateOptions)) (*schema.ModelResult, error)

func (*SagemakerEndpoint) Type

func (l *SagemakerEndpoint) Type() string

func (*SagemakerEndpoint) Verbose

func (l *SagemakerEndpoint) Verbose() bool

type SagemakerEndpointOptions

type SagemakerEndpointOptions struct {
	*schema.CallbackOptions
	Tokenizer schema.Tokenizer
}

type Transformer

type Transformer interface {
	// Transforms the input to a format that model can accept
	// as the request Body. Should return bytes or seekable file
	// like object in the format specified in the content_type
	// request header.
	TransformInput(prompt string) ([]byte, error)

	// Transforms the output from the model to string that
	// the LLM class expects.
	TransformOutput(output []byte) (string, error)
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL