Documentation ¶
Index ¶
- Constants
- func ExecuteTextChat(inputStruct ai.TextChatInput, client httpclient.IClient, job *base.Job, ...) (*structpb.Struct, error)
- func Init(bc base.Component) *component
- func NewClient(setup *structpb.Struct, logger *zap.Logger) *httpclient.Client
- type ChatModelRequester
- type IChatModelRequester
- type O1ModelRequester
- type SupportJSONOutputModelRequester
Constants ¶
View Source
const (
TextChatTask = "TASK_CHAT"
)
Variables ¶
This section is empty.
Functions ¶
func ExecuteTextChat ¶
Types ¶
type ChatModelRequester ¶
type ChatModelRequester struct { Input ai.TextChatInput Client httpclient.IClient }
func (*ChatModelRequester) SendChatRequest ¶
type IChatModelRequester ¶
type IChatModelRequester interface {
SendChatRequest(*base.Job, context.Context) (*structpb.Struct, error)
}
func ModelRequesterFactory ¶
func ModelRequesterFactory(input ai.TextChatInput, client httpclient.IClient) IChatModelRequester
type O1ModelRequester ¶
type O1ModelRequester struct { Input ai.TextChatInput Client httpclient.IClient }
o1-preview or o1-mini
func (*O1ModelRequester) SendChatRequest ¶
func (r *O1ModelRequester) SendChatRequest(_ *base.Job, _ context.Context) (*structpb.Struct, error)
When it supports streaming, the job and ctx will be used.
type SupportJSONOutputModelRequester ¶
type SupportJSONOutputModelRequester struct { Input ai.TextChatInput Client httpclient.IClient }
https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format Compatible with GPT-4o, GPT-4o mini, GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.
func (*SupportJSONOutputModelRequester) SendChatRequest ¶
Click to show internal directories.
Click to hide internal directories.