Documentation ¶
Index ¶
Constants ¶
const OpenAIBackend = "https://api.openai.com/v1"
Variables ¶
var ( // ErrResultTruncated is returned when the OpenAI API returned a truncated // result. The reason for the truncation will be appended to the error // string. ErrResultTruncated = errors.New("result was truncated") // ErrNoResults is returned if the OpenAI API returned an empty result. This // should not generally happen. ErrNoResults = errors.New("no results return from API") // ErrUnsupportedModel is returned if the SetModel method is provided with // an unsupported model ErrUnsupportedModel = errors.New("unsupported model") // ErrUnexpectedStatus is returned when the OpenAI API returned a response // with an unexpected status code ErrUnexpectedStatus = errors.New("OpenAI returned unexpected response") // ErrRequestFailed is returned when the OpenAI API returned an error for // the request ErrRequestFailed = errors.New("request failed") )
var ( // ModelGPT35Turbo represents the gpt-3.5-turbo model used by ChatGPT ModelGPT35Turbo = Model{"gpt-3.5-turbo", 4096, ModelTypeChat} // ModelGPT35Turbo represents the gpt-3.5-turbo-0301 model, a March 1st 2023 // snapshot of gpt-3.5-turbo ModelGPT35Turbo0301 = Model{"gpt-3.5-turbo-0301", 4096, ModelTypeChat} // ModelGPT4 represents the gpt-4 model ModelGPT4 = Model{"gpt-4", 8192, ModelTypeChat} // ModelGPT40314 represents the gpt-4-0314 model, a March 14th 2023 snapshot // of the gpt-4 model. ModelGPT40314 = Model{"gpt-4-0314", 8192, ModelTypeChat} // ModelGPT432K represents the gpt-4-32k model, which is the same as gpt-4, // but with 4x the context length. ModelGPT432K = Model{"gpt-4-32k", 32768, ModelTypeChat} // ModelGPT432K0314 represents the gpt-4-32k-0314 model, a March 14th 2023 // snapshot of the gpt-4-32k model ModelGPT432K0314 = Model{"gpt-4-32k-0314", 32768, ModelTypeChat} // ModelTextDaVinci3 represents the text-davinci-003 language generation // model. ModelTextDaVinci3 = Model{"text-davinci-003", 4097, ModelTypeCompletion} // ModelTextDaVinci2 represents the text-davinci-002 language generation // model. ModelTextDaVinci2 = Model{"text-davinci-002", 4097, ModelTypeCompletion} // SupportedModels is a list of all language models supported by aiac SupportedModels = []Model{ ModelGPT35Turbo, ModelGPT35Turbo0301, ModelGPT4, ModelGPT40314, ModelGPT432K, ModelGPT432K0314, ModelTextDaVinci3, ModelTextDaVinci2, } )
var Version = "development"
Version contains aiac's version string
Functions ¶
func ExtractCode ¶
ExtractCode receives the full output string from the OpenAI API and attempts to extract a code block from it. OpenAI code blocks are generally Markdown blocks surrounded by the ``` string on both sides. If successful, the code string will be returned together with a true value, otherwise an empty string is returned together with a false value.
Types ¶
type Client ¶
type Client struct { *requests.HTTPClient // contains filtered or unexported fields }
Client is a structure used to continuously generate IaC code via OpenAPI/ChatGPT
func NewClient ¶
func NewClient(opts *NewClientOptions) *Client
NewClient creates a new instance of the Client struct, with the provided input options. Neither the OpenAI API nor ChatGPT are yet contacted at this point.
func (*Client) Chat ¶
func (client *Client) Chat(model Model) *Conversation
Chat initiates a conversation with an OpenAI chat model. A conversation maintains context, allowing to send further instructions to modify the output from previous requests, just like using the ChatGPT website.
func (*Client) Complete ¶
func (client *Client) Complete( ctx context.Context, model Model, prompt string, ) (res Response, err error)
Complete sends a request to OpenAI's Completions API using the provided model and prompt, and returns the response
func (*Client) GenerateCode ¶
func (client *Client) GenerateCode( ctx context.Context, model Model, prompt string, msgs ...Message, ) (res Response, err error)
GenerateCode sends the provided prompt to the OpenAI API and returns a Response object. It is a convenience wrapper around client.Complete (for text completion models) and client.Chat.Send (for chat models).
type Conversation ¶
type Conversation struct {
// contains filtered or unexported fields
}
Conversation is a struct used to converse with an OpenAI chat model. It maintains all messages sent/received in order to maintain context just like using ChatGPT.
func (*Conversation) Send ¶
func (conv *Conversation) Send(ctx context.Context, prompt string, msgs ...Message) ( res Response, err error, )
Send sends the provided message to the API and returns a Response object. To maintain context, all previous messages (whether from you to the API or vice-versa) are sent as well, allowing you to ask the API to modify the code it already generated.
type NewClientOptions ¶
type NewClientOptions struct { // APIKey is the OpenAI API key to use for requests. This is required. ApiKey string // ChatGPTURL is the URL to use for ChatGPT requests. This is optional nd by default to openai backend. URL string // APIVersion is the version of the OpenAI API to use. This is optional and by default to non specified. APIVersion string }
type Response ¶
type Response struct { // FullOutput is the complete output returned by the API. This is generally // a Markdown-formatted Message that contains the generated code, plus // explanations, if any. FullOutput string // Code is the extracted code section from the complete output. If code was // not found or extraction otherwise failed, this will be the same as // FullOutput. Code string // APIKeyUsed is the API key used when making the request. APIKeyUsed string // TokensUsed is the number of tokens utilized by the request. This is // the "usage.total_tokens" value returned from the API. TokensUsed int64 }
Response is the struct returned from methods generating code via the OpenAI API.