Documentation
¶
Index ¶
- Constants
- Variables
- func AppPath(relativePath string) string
- func AppRoot() string
- func CalculateNumCtx(text string, sessionTokens int) (response int)
- func CountryName(langTag string) string
- func CountryNameOf(countryLang, outputLang string) string
- func Download(url string) (data []byte, err error)
- func GetDirSensyPrompts() string
- func IsDocumentPrompt(name string) bool
- func Lang(lang string) *nlp_detect.LanguageISO
- func LangCode(lang string) string
- func LangCodeDetect(text string) string
- func LangDetect(text string) *nlp_detect.LanguageISO
- func LangName(lang string) string
- func LangNameDefOut(lang string) string
- func LangNameOut(langIn, langOut string) string
- func RenderMustache(text string, context ...interface{}) (string, error)
- func TempRoot() string
- type ActionType
- type KnowledgeModeType
- type LLMDriverCreatorOptions
- type LLMDriverOptions
- func (instance *LLMDriverOptions) GetAccessKey() (response string)
- func (instance *LLMDriverOptions) GetSecretKey() (response string)
- func (instance *LLMDriverOptions) Map() map[string]interface{}
- func (instance *LLMDriverOptions) Parse(m map[string]any) *LLMDriverOptions
- func (instance *LLMDriverOptions) String() string
- type LLMRequest
- func (instance *LLMRequest) Add(args ...interface{}) *LLMRequest
- func (instance *LLMRequest) AddUserMessage(arg interface{}) *LLMRequest
- func (instance *LLMRequest) CalcMaxTokens() int
- func (instance *LLMRequest) CalcMaxTokensMin(min int) int
- func (instance *LLMRequest) CalcPrompt() (response string)
- func (instance *LLMRequest) CalcPromptSystem() (response string)
- func (instance *LLMRequest) CalculateMaxTokens() int
- func (instance *LLMRequest) Errors() (response []string)
- func (instance *LLMRequest) GetOptions() (response *RequestOptions)
- func (instance *LLMRequest) GetPrompt() (response string)
- func (instance *LLMRequest) GetPromptSystem() (response string)
- func (instance *LLMRequest) Map() (m map[string]interface{})
- func (instance *LLMRequest) Messages() (response []*RequestMessage)
- func (instance *LLMRequest) MessagesLength() (response int)
- func (instance *LLMRequest) Parse(m map[string]interface{}) *LLMRequest
- func (instance *LLMRequest) Refresh() *LLMRequest
- func (instance *LLMRequest) SetOptionMaxTokens(value int) *LLMRequest
- func (instance *LLMRequest) SetOptionMaxTokensWithLimit(value, limit int) *LLMRequest
- func (instance *LLMRequest) SetOptions(options *RequestOptions) *LLMRequest
- func (instance *LLMRequest) SetPrompt(text string) *LLMRequest
- func (instance *LLMRequest) SetPromptSystem(text string) *LLMRequest
- func (instance *LLMRequest) String() string
- type LLMRequestContext
- type LLMRequestEmbeddings
- type LLMResponse
- func (instance *LLMResponse) GetMetaProjectId() (response string)
- func (instance *LLMResponse) GetMetadata() (response map[string]interface{})
- func (instance *LLMResponse) IsJSONResponse() bool
- func (instance *LLMResponse) JSONCount() int
- func (instance *LLMResponse) JSONData() (response []map[string]interface{})
- func (instance *LLMResponse) JSONForEach(iterator func(jsonItem map[string]any)) *LLMResponse
- func (instance *LLMResponse) Map() (response map[string]interface{})
- func (instance *LLMResponse) SetMetaProjectId(value string) *LLMResponse
- func (instance *LLMResponse) SetRequest(request *LLMRequest) *LLMResponse
- func (instance *LLMResponse) SetResponse(rawResponse map[string]interface{}) *LLMResponse
- func (instance *LLMResponse) SetTextResponse(value string) *LLMResponse
- func (instance *LLMResponse) String() string
- type OptionsAgent
- type RequestMessage
- func (instance *RequestMessage) Clone() (response *RequestMessage)
- func (instance *RequestMessage) Length() int
- func (instance *RequestMessage) Map() (response map[string]interface{})
- func (instance *RequestMessage) SetContent(value string) *RequestMessage
- func (instance *RequestMessage) String() string
- type RequestMessageList
- func (instance *RequestMessageList) Add(item *RequestMessage) *RequestMessageList
- func (instance *RequestMessageList) Count() (response int)
- func (instance *RequestMessageList) ForEach(f func(item *RequestMessage) any)
- func (instance *RequestMessageList) GetSystem() (response *RequestMessage)
- func (instance *RequestMessageList) GetUser() (response *RequestMessage)
- func (instance *RequestMessageList) Length() (response int)
- func (instance *RequestMessageList) Map() (response []map[string]interface{})
- func (instance *RequestMessageList) SetSystem(text string) *RequestMessageList
- func (instance *RequestMessageList) SetUser(text string, images ...string) *RequestMessageList
- func (instance *RequestMessageList) String() string
- type RequestOptions
- type RequestPayload
- func (instance *RequestPayload) Format(text string) (response string)
- func (instance *RequestPayload) Map() (response map[string]interface{})
- func (instance *RequestPayload) Parse(value any) bool
- func (instance *RequestPayload) RAGContextAppendItems(items ...any) *RequestPayload
- func (instance *RequestPayload) RAGContextSetItems(items ...any) *RequestPayload
- func (instance *RequestPayload) SetLang(value string) *RequestPayload
- func (instance *RequestPayload) String() string
- type StreamHandler
Constants ¶
const ( RoleSystem string = "system" RoleUser = "user" RoleAssistant = "assistant" )
const ( DriverOllama = "ollama" DriverChatGPT = "chatgpt" DriverClaude = "claude" DriverDalle = "dalle" DriverUnsplash = "unsplash" )
const ( PromptDefNameUpload = "upload" // special system prompt used only after a session upload PromptDefNameEntities = "entities" // system prompt used by tools or user actions to achieve a special task PromptDefNameContext = "context" )
const ContentOptionSyntaxHTML = "html"
const ContentOptionSyntaxMarkdown = "markdown"
const ContentOptionSyntaxTextPlain = "text"
const DefLang = "en" // default llm language
const DefModel = "llama3.2"
const DirSensyOutputs = "./sensy-outputs" // all sensy outputs go here
const DirSensyPrompts = "./sensy-prompts" // source folder for prompts (only if used AI)
const PrefixLiteral = "literal:"
const PrefixPrompt = "prompt:"
const PromptDefLang = DefLang
const PromptSeparator = "\n---\n"
Variables ¶
var PromptDefDocumentNames = []string{ PromptDefNameEntities, }
PromptDefDocumentNames contains names for prompts that require a "document" payload to be performed.
Functions ¶
func CalculateNumCtx ¶
CalculateNumCtx computes a numerical context value based on input text length and session tokens. Minimum response value is 2048
func CountryName ¶
func CountryNameOf ¶
func GetDirSensyPrompts ¶
func GetDirSensyPrompts() string
func IsDocumentPrompt ¶
IsDocumentPrompt checks if the provided name is in the list of prompt names that require a "document" payload.
func Lang ¶
func Lang(lang string) *nlp_detect.LanguageISO
func LangCodeDetect ¶
func LangDetect ¶
func LangDetect(text string) *nlp_detect.LanguageISO
func LangNameDefOut ¶
func LangNameOut ¶
func RenderMustache ¶
Types ¶
type ActionType ¶ added in v0.3.36
type ActionType string
const ( ActionChat ActionType = "chat" ActionGenerate ActionType = "generate" )
type KnowledgeModeType ¶
type KnowledgeModeType string
const ( ModeKnowledge KnowledgeModeType = "knowledge" ModeSession KnowledgeModeType = "session" ModeNotebook = "notebook" )
type LLMDriverCreatorOptions ¶ added in v0.3.35
type LLMDriverCreatorOptions struct { Uid string `json:"uid"` PlanUid string `json:"plan-uid"` Lang string `json:"lang"` TextQuery string `json:"text-query"` TitleQuery string `json:"title-query"` CreateImage bool `json:"create-image"` ImageQuery string `json:"image-query"` ImageParams []string `json:"image-params"` Tags []string `json:"tags"` Categories []string `json:"categories"` CreateText bool `json:"create-text"` SourceSyntax string `json:"source-syntax"` // markdown, text TargetSyntax string `json:"target-syntax"` // html }
func NewLLMDriverCreatorOptions ¶ added in v0.3.35
func NewLLMDriverCreatorOptions(args ...interface{}) (instance *LLMDriverCreatorOptions)
func (*LLMDriverCreatorOptions) HasSyntax ¶ added in v0.3.35
func (instance *LLMDriverCreatorOptions) HasSyntax() bool
func (*LLMDriverCreatorOptions) Map ¶ added in v0.3.35
func (instance *LLMDriverCreatorOptions) Map() map[string]interface{}
func (*LLMDriverCreatorOptions) String ¶ added in v0.3.35
func (instance *LLMDriverCreatorOptions) String() string
type LLMDriverOptions ¶ added in v0.3.35
type LLMDriverOptions struct { Uid string `json:"uid"` // (only for store drivers) store uid DriverName string `json:"driver-name"` // name of the driver Model string `json:"model"` // name of the model AccessKey string `json:"access-key"` // access token SecretKey string `json:"secret-key,omitempty"` OptionsCreator *LLMDriverCreatorOptions `json:"create-options,omitempty"` OptionsModel *RequestOptions `json:"model-options"` // specific AI model option }
func NewLLMDriverOptions ¶ added in v0.3.35
func NewLLMDriverOptions(args ...interface{}) (instance *LLMDriverOptions)
func (*LLMDriverOptions) GetAccessKey ¶ added in v0.3.35
func (instance *LLMDriverOptions) GetAccessKey() (response string)
func (*LLMDriverOptions) GetSecretKey ¶ added in v0.3.35
func (instance *LLMDriverOptions) GetSecretKey() (response string)
func (*LLMDriverOptions) Map ¶ added in v0.3.37
func (instance *LLMDriverOptions) Map() map[string]interface{}
func (*LLMDriverOptions) Parse ¶ added in v0.3.37
func (instance *LLMDriverOptions) Parse(m map[string]any) *LLMDriverOptions
func (*LLMDriverOptions) String ¶ added in v0.3.37
func (instance *LLMDriverOptions) String() string
type LLMRequest ¶ added in v0.3.35
type LLMRequest struct { // required Version string `json:"version"` // application version Driver string `json:"driver"` // name of driver Model string `json:"model"` // ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. Payload *RequestPayload `json:"payload"` // payload used to format prompts // optional SkillName string `json:"skill-name,omitempty"` // (optional) used only in agents with a skill controller PromptName string `json:"prompt-name,omitempty"` // (optional) used only in agents with a skill controller Action ActionType `json:"action,omitempty"` // (optional) chat, generate Format string `json:"format,omitempty"` // (optional) json SessionId string `json:"session-id,omitempty"` // (optional) ID of the session if any Context []int `json:"context,omitempty"` // (optional) Previous context if any Images []string `json:"images,omitempty"` // (optional) list of base64 encoded images // not exposed Stream StreamHandler `json:"-"` // Stream: only when stream is allowed // contains filtered or unexported fields }
func NewLLMRequest ¶ added in v0.3.35
func NewLLMRequest(messages ...interface{}) (instance *LLMRequest)
func (*LLMRequest) Add ¶ added in v0.3.35
func (instance *LLMRequest) Add(args ...interface{}) *LLMRequest
func (*LLMRequest) AddUserMessage ¶ added in v0.3.37
func (instance *LLMRequest) AddUserMessage(arg interface{}) *LLMRequest
func (*LLMRequest) CalcMaxTokens ¶ added in v0.3.37
func (instance *LLMRequest) CalcMaxTokens() int
func (*LLMRequest) CalcMaxTokensMin ¶ added in v0.3.37
func (instance *LLMRequest) CalcMaxTokensMin(min int) int
func (*LLMRequest) CalcPrompt ¶ added in v0.3.37
func (instance *LLMRequest) CalcPrompt() (response string)
func (*LLMRequest) CalcPromptSystem ¶ added in v0.3.37
func (instance *LLMRequest) CalcPromptSystem() (response string)
func (*LLMRequest) CalculateMaxTokens ¶ added in v0.3.38
func (instance *LLMRequest) CalculateMaxTokens() int
func (*LLMRequest) Errors ¶ added in v0.3.35
func (instance *LLMRequest) Errors() (response []string)
func (*LLMRequest) GetOptions ¶ added in v0.3.35
func (instance *LLMRequest) GetOptions() (response *RequestOptions)
func (*LLMRequest) GetPrompt ¶ added in v0.3.37
func (instance *LLMRequest) GetPrompt() (response string)
func (*LLMRequest) GetPromptSystem ¶ added in v0.3.37
func (instance *LLMRequest) GetPromptSystem() (response string)
func (*LLMRequest) Map ¶ added in v0.3.35
func (instance *LLMRequest) Map() (m map[string]interface{})
func (*LLMRequest) Messages ¶ added in v0.3.35
func (instance *LLMRequest) Messages() (response []*RequestMessage)
Messages returns a list of request messages from the LLMRequest instance, with their content optionally formatted.
func (*LLMRequest) MessagesLength ¶ added in v0.3.35
func (instance *LLMRequest) MessagesLength() (response int)
func (*LLMRequest) Parse ¶ added in v0.3.35
func (instance *LLMRequest) Parse(m map[string]interface{}) *LLMRequest
func (*LLMRequest) Refresh ¶ added in v0.3.35
func (instance *LLMRequest) Refresh() *LLMRequest
func (*LLMRequest) SetOptionMaxTokens ¶ added in v0.3.37
func (instance *LLMRequest) SetOptionMaxTokens(value int) *LLMRequest
func (*LLMRequest) SetOptionMaxTokensWithLimit ¶ added in v0.3.38
func (instance *LLMRequest) SetOptionMaxTokensWithLimit(value, limit int) *LLMRequest
func (*LLMRequest) SetOptions ¶ added in v0.3.35
func (instance *LLMRequest) SetOptions(options *RequestOptions) *LLMRequest
func (*LLMRequest) SetPrompt ¶ added in v0.3.38
func (instance *LLMRequest) SetPrompt(text string) *LLMRequest
SetPrompt sets the user's prompt text in the LLMRequest messages if the instance and messages are not nil and text is non-empty. This may be a "prompt" wrapper that will be formatted with the request payload. NOTE: the request payload may contain a field "user-query" that will be merged with a prompt wrapper.
func (*LLMRequest) SetPromptSystem ¶ added in v0.3.37
func (instance *LLMRequest) SetPromptSystem(text string) *LLMRequest
func (*LLMRequest) String ¶ added in v0.3.35
func (instance *LLMRequest) String() string
type LLMRequestContext ¶ added in v0.3.39
type LLMRequestContext struct { Version string `json:"version"` // application version Driver string `json:"driver"` // name of driver (usually ollama) Model string `json:"model"` // ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. Prompt string `json:"prompt"` // contains filtered or unexported fields }
func NewLLMRequestContext ¶ added in v0.3.39
func NewLLMRequestContext() (instance *LLMRequestContext)
func (*LLMRequestContext) GetOptions ¶ added in v0.3.39
func (instance *LLMRequestContext) GetOptions() (response *RequestOptions)
func (*LLMRequestContext) Map ¶ added in v0.3.39
func (instance *LLMRequestContext) Map() (m map[string]interface{})
func (*LLMRequestContext) String ¶ added in v0.3.39
func (instance *LLMRequestContext) String() string
type LLMRequestEmbeddings ¶ added in v0.3.38
type LLMRequestEmbeddings struct { Version string `json:"version"` // application version Driver string `json:"driver"` // name of driver Model string `json:"model"` // ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. Prompt string `json:"prompt"` // contains filtered or unexported fields }
func NewLLMRequestEmbeddings ¶ added in v0.3.38
func NewLLMRequestEmbeddings() (instance *LLMRequestEmbeddings)
func (*LLMRequestEmbeddings) GetOptions ¶ added in v0.3.38
func (instance *LLMRequestEmbeddings) GetOptions() (response *RequestOptions)
func (*LLMRequestEmbeddings) Map ¶ added in v0.3.38
func (instance *LLMRequestEmbeddings) Map() (m map[string]interface{})
func (*LLMRequestEmbeddings) String ¶ added in v0.3.38
func (instance *LLMRequestEmbeddings) String() string
type LLMResponse ¶ added in v0.3.35
type LLMResponse struct { Version string `json:"version"` Driver string `json:"driver"` Model string `json:"model"` SessionId string `json:"session-id"` Request map[string]interface{} `json:"request"` // entire original request (only for debug) RequestSystem string `json:"request-system"` // original request system prompt (only for debug) RequestUser string `json:"request-user"` // original request user prompt (only for debug) Done bool `json:"done"` // used only for stream Elapsed int `json:"elapsed"` // milliseconds UserLang string `json:"user-lang"` // language used by user RespRaw map[string]interface{} `json:"resp-raw"` // Native JSON response RespContext []int `json:"resp-context"` // Context generated after response RespText string `json:"resp-text"` // Raw text response RespJsonObject map[string]interface{} `json:"resp-json-object"` // JSON response RespJsonArray []map[string]interface{} `json:"resp-json-array"` // JSON response RespKnowledge []map[string]interface{} `json:"resp-knowledge"` // knowledge used in RAG RespSession map[string]interface{} `json:"resp-session"` // session info Metadata map[string]interface{} `json:"metadata"` // Used for custom fields. This is an external payload containing fields like "project-id", "sku", etc... // contains filtered or unexported fields }
func NewLLMResponse ¶ added in v0.3.35
func NewLLMResponse(request *LLMRequest) (instance *LLMResponse)
func (*LLMResponse) GetMetaProjectId ¶ added in v0.3.35
func (instance *LLMResponse) GetMetaProjectId() (response string)
GetMetaProjectId (only for NOTEBOOK) retrieves the "project-id" value from the Metadata field of the LLMResponse instance. Returns an empty string if no value is found or if the instance is nil.
func (*LLMResponse) GetMetadata ¶ added in v0.3.35
func (instance *LLMResponse) GetMetadata() (response map[string]interface{})
GetMetadata retrieves the Metadata field of the LLMResponse instance, initializing it as an empty map if nil.
func (*LLMResponse) IsJSONResponse ¶ added in v0.3.37
func (instance *LLMResponse) IsJSONResponse() bool
func (*LLMResponse) JSONCount ¶ added in v0.3.37
func (instance *LLMResponse) JSONCount() int
func (*LLMResponse) JSONData ¶ added in v0.3.37
func (instance *LLMResponse) JSONData() (response []map[string]interface{})
func (*LLMResponse) JSONForEach ¶ added in v0.3.37
func (instance *LLMResponse) JSONForEach(iterator func(jsonItem map[string]any)) *LLMResponse
func (*LLMResponse) Map ¶ added in v0.3.35
func (instance *LLMResponse) Map() (response map[string]interface{})
func (*LLMResponse) SetMetaProjectId ¶ added in v0.3.35
func (instance *LLMResponse) SetMetaProjectId(value string) *LLMResponse
SetMetaProjectId (only for NOTEBOOK) sets the "project-id" value within the Metadata field of the LLMResponse instance and returns the instance.
func (*LLMResponse) SetRequest ¶ added in v0.3.35
func (instance *LLMResponse) SetRequest(request *LLMRequest) *LLMResponse
SetRequest associates an LLMRequest with the LLMResponse instance and updates relevant fields like Model and UserLang.
func (*LLMResponse) SetResponse ¶ added in v0.3.35
func (instance *LLMResponse) SetResponse(rawResponse map[string]interface{}) *LLMResponse
SetResponse updates the LLMResponse instance with the provided raw response data and marks the operation as complete.
func (*LLMResponse) SetTextResponse ¶ added in v0.3.37
func (instance *LLMResponse) SetTextResponse(value string) *LLMResponse
func (*LLMResponse) String ¶ added in v0.3.35
func (instance *LLMResponse) String() string
type OptionsAgent ¶ added in v0.3.38
type OptionsAgent struct { Name string `json:"name"` AutoCalcMaxTokens bool `json:"auto-calc-max-tokens"` LimitMaxTokens int `json:"limit-max-tokens"` // maximum number of tokens allowed in this agent Drivers []*LLMDriverOptions `json:"drivers"` }
func NewOptionsAgent ¶ added in v0.3.38
func NewOptionsAgent() (instance *OptionsAgent)
func (*OptionsAgent) AddDriver ¶ added in v0.3.38
func (instance *OptionsAgent) AddDriver(item *LLMDriverOptions)
func (*OptionsAgent) ContainsDriver ¶ added in v0.3.38
func (instance *OptionsAgent) ContainsDriver(uid string) bool
func (*OptionsAgent) Map ¶ added in v0.3.38
func (instance *OptionsAgent) Map() map[string]interface{}
func (*OptionsAgent) String ¶ added in v0.3.38
func (instance *OptionsAgent) String() string
type RequestMessage ¶ added in v0.3.37
type RequestMessage struct { Role string `json:"role"` // the role of the message, either system, user or assistant Content string `json:"content,omitempty"` // the content of the message Images []string `json:"images,omitempty"` // (optional) a list of base64-encoded images (for multimodal models such as llava) }
RequestMessage ollama standard chat message
func NewAIMessageAssistant ¶ added in v0.3.35
func NewAIMessageAssistant() *RequestMessage
func NewAIMessageSystem ¶ added in v0.3.35
func NewAIMessageSystem() *RequestMessage
func NewAIMessageUser ¶ added in v0.3.35
func NewAIMessageUser() *RequestMessage
func (*RequestMessage) Clone ¶ added in v0.3.37
func (instance *RequestMessage) Clone() (response *RequestMessage)
func (*RequestMessage) Length ¶ added in v0.3.37
func (instance *RequestMessage) Length() int
func (*RequestMessage) Map ¶ added in v0.3.37
func (instance *RequestMessage) Map() (response map[string]interface{})
func (*RequestMessage) SetContent ¶ added in v0.3.37
func (instance *RequestMessage) SetContent(value string) *RequestMessage
func (*RequestMessage) String ¶ added in v0.3.37
func (instance *RequestMessage) String() string
type RequestMessageList ¶ added in v0.3.37
type RequestMessageList struct {
// contains filtered or unexported fields
}
func NewMessageList ¶ added in v0.3.35
func NewMessageList(args ...interface{}) (instance *RequestMessageList)
func (*RequestMessageList) Add ¶ added in v0.3.37
func (instance *RequestMessageList) Add(item *RequestMessage) *RequestMessageList
func (*RequestMessageList) Count ¶ added in v0.3.37
func (instance *RequestMessageList) Count() (response int)
Count returns the number of non-nil elements in the RequestMessageList.
func (*RequestMessageList) ForEach ¶ added in v0.3.37
func (instance *RequestMessageList) ForEach(f func(item *RequestMessage) any)
func (*RequestMessageList) GetSystem ¶ added in v0.3.37
func (instance *RequestMessageList) GetSystem() (response *RequestMessage)
func (*RequestMessageList) GetUser ¶ added in v0.3.37
func (instance *RequestMessageList) GetUser() (response *RequestMessage)
func (*RequestMessageList) Length ¶ added in v0.3.37
func (instance *RequestMessageList) Length() (response int)
Length calculates the total length of the content across all non-nil RequestMessage objects in the list.
func (*RequestMessageList) Map ¶ added in v0.3.37
func (instance *RequestMessageList) Map() (response []map[string]interface{})
func (*RequestMessageList) SetSystem ¶ added in v0.3.37
func (instance *RequestMessageList) SetSystem(text string) *RequestMessageList
func (*RequestMessageList) SetUser ¶ added in v0.3.37
func (instance *RequestMessageList) SetUser(text string, images ...string) *RequestMessageList
func (*RequestMessageList) String ¶ added in v0.3.37
func (instance *RequestMessageList) String() string
type RequestOptions ¶ added in v0.3.37
type RequestOptions struct { // optionals useful N int `json:"n,omitempty"` // Default 1. How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs. MaxTokens int `json:"max_tokens,omitempty"` // The maximum number of tokens that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length Temperature float32 `json:"temperature,omitempty"` // Default 1. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. TopP float32 `json:"top_p,omitempty"` // Default 1. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. User string `json:"user,omitempty"` // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse // optionals FrequencyPenalty int `json:"frequency_penalty,omitempty"` // Default 0. Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. LogitBias map[string]interface{} `json:"logit_bias,omitempty"` // Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. LogProbs bool `json:"logprobs,omitempty"` // Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model. TopLogProbs int `json:"top_logprobs,omitempty"` // An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used. PresencePenalty float32 `json:"presence_penalty,omitempty"` // Default 0.Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Seed string `json:"seed,omitempty"` // This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend. Stop string `json:"stop,omitempty"` // string / array / null. Up to 4 sequences where the API will stop generating further tokens. Stream bool `json:"stream,omitempty"` // If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message Tools []interface{} `json:"tools,omitempty"` // A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. ToolChoice interface{} `json:"tool_choice,omitempty"` // Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message. auto means the model can pick between generating a message or calling a function. Specifying a particular function via {"type": "function", "function": {"name": "my_function"}} forces the model to call that function. none is the default when no functions are present. auto is the default if functions are present }
func NewModelOptions ¶ added in v0.3.35
func NewModelOptions() (instance *RequestOptions)
func (*RequestOptions) Map ¶ added in v0.3.37
func (instance *RequestOptions) Map() map[string]interface{}
func (*RequestOptions) String ¶ added in v0.3.37
func (instance *RequestOptions) String() string
type RequestPayload ¶ added in v0.3.37
type RequestPayload struct { Lang string `json:"lang"` // user lang UserQuery string `json:"user-query"` // request of the user PromptLang string `json:"prompt-lang"` FromLang string `json:"from-lang"` ToLang string `json:"to-lang"` Today string `json:"today"` Now string `json:"now"` LangCountryPrompt string `json:"lang-country-prompt"` RAGContext any `json:"context,omitempty"` // (optional) some context for prompts that need a limited knowledge for response (ex: RAG) }
func NewRequestPayload ¶ added in v0.3.37
func NewRequestPayload(args ...any) (instance *RequestPayload)
func (*RequestPayload) Format ¶ added in v0.3.37
func (instance *RequestPayload) Format(text string) (response string)
func (*RequestPayload) Map ¶ added in v0.3.37
func (instance *RequestPayload) Map() (response map[string]interface{})
func (*RequestPayload) Parse ¶ added in v0.3.37
func (instance *RequestPayload) Parse(value any) bool
func (*RequestPayload) RAGContextAppendItems ¶ added in v0.3.38
func (instance *RequestPayload) RAGContextAppendItems(items ...any) *RequestPayload
func (*RequestPayload) RAGContextSetItems ¶ added in v0.3.38
func (instance *RequestPayload) RAGContextSetItems(items ...any) *RequestPayload
func (*RequestPayload) SetLang ¶ added in v0.3.37
func (instance *RequestPayload) SetLang(value string) *RequestPayload
func (*RequestPayload) String ¶ added in v0.3.37
func (instance *RequestPayload) String() string
type StreamHandler ¶ added in v0.3.36
type StreamHandler func(data []byte)
Source Files
¶
- constants.go
- constants_prompt.go
- model_request.go
- model_request_context.go
- model_request_embeddings.go
- model_request_message.go
- model_request_options.go
- model_request_payload.go
- model_response.go
- options_llm_agent.go
- options_llm_driver.go
- options_llm_driver_creator.go
- utils.go
- utils_lang.go
- utils_prompts.go