Documentation
¶
Index ¶
- type OllamaProvider
- func (p *OllamaProvider) EstimateTokens(ctx context.Context, message string) (int, error)
- func (p *OllamaProvider) GenerateResponse(ctx context.Context, messages []message.Message) (*message.Message, error)
- func (p *OllamaProvider) GenerateStream(ctx context.Context, messages []message.Message, opts *core.InferenceOptions) (<-chan *message.Message, <-chan error)
- func (p *OllamaProvider) GenerateStreamWithTools(ctx context.Context, messages []message.Message, tools []tool.Tool, ...) (<-chan *message.Message, <-chan error)
- func (p *OllamaProvider) GenerateWithTools(ctx context.Context, messages []message.Message, tools []tool.Tool) (*message.Message, error)
- func (p *OllamaProvider) GetCapabilities(ctx context.Context) (*core.ProviderCapabilities, error)
- func (p *OllamaProvider) GetModelList(ctx context.Context) ([]string, error)
- func (p *OllamaProvider) ValidatePrompt(ctx context.Context, messages []message.Message) error
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type OllamaProvider ¶
type OllamaProvider struct {
// contains filtered or unexported fields
}
OllamaProvider implements the LLMProvider interface for Ollama
func NewOllamaProvider ¶
NewOllamaProvider creates a new Ollama provider
TODO: - need to handle base URL better (with trailing slashes, etc.) - need to construct actual URL using baseURL, port, etc.
func (*OllamaProvider) EstimateTokens ¶
EstimateTokens estimates the number of tokens in a message
func (*OllamaProvider) GenerateResponse ¶
func (p *OllamaProvider) GenerateResponse(ctx context.Context, messages []message.Message) (*message.Message, error)
GenerateResponse implements the LLMProvider interface for basic responses
func (*OllamaProvider) GenerateStream ¶
func (p *OllamaProvider) GenerateStream(ctx context.Context, messages []message.Message, opts *core.InferenceOptions) (<-chan *message.Message, <-chan error)
GenerateStream streams the response token by token
func (*OllamaProvider) GenerateStreamWithTools ¶
func (p *OllamaProvider) GenerateStreamWithTools(ctx context.Context, messages []message.Message, tools []tool.Tool, opts *core.InferenceOptions) (<-chan *message.Message, <-chan error)
GenerateStreamWithTools streams a response with tools token by token
func (*OllamaProvider) GenerateWithTools ¶
func (p *OllamaProvider) GenerateWithTools(ctx context.Context, messages []message.Message, tools []tool.Tool) (*message.Message, error)
GenerateWithTools implements the LLMProvider interface for tool-using responses
TODO: - handle automatically generating a system message descriptor
func (*OllamaProvider) GetCapabilities ¶
func (p *OllamaProvider) GetCapabilities(ctx context.Context) (*core.ProviderCapabilities, error)
func (*OllamaProvider) GetModelList ¶
func (p *OllamaProvider) GetModelList(ctx context.Context) ([]string, error)
GetModelList returns available models for this provider
func (*OllamaProvider) ValidatePrompt ¶
ValidatePrompt checks if a prompt is valid for the provider