openai

package module
v1.0.6 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 6, 2024 License: Apache-2.0 Imports: 18 Imported by: 0

README

Go OpenAI

Go Reference Go Report Card codecov

This library provides unofficial Go clients for OpenAI API. We support:

  • ChatGPT 4o, o1
  • GPT-3, GPT-4
  • DALL·E 2, DALL·E 3
  • Whisper

Installation

go get gitee.com/ledao/go-openai

Currently, go-openai requires Go version 1.18 or greater.

Usage

ChatGPT example usage:
package main

import (
	"context"
	"fmt"
	openai "gitee.com/ledao/go-openai"
)

func main() {
	client := openai.NewClient("your token")
	resp, err := client.CreateChatCompletion(
		context.Background(),
		openai.ChatCompletionRequest{
			Model: openai.GPT3Dot5Turbo,
			Messages: []openai.ChatCompletionMessage{
				{
					Role:    openai.ChatMessageRoleUser,
					Content: "Hello!",
				},
			},
		},
	)

	if err != nil {
		fmt.Printf("ChatCompletion error: %v\n", err)
		return
	}

	fmt.Println(resp.Choices[0].Message.Content)
}

Getting an OpenAI API Key:
  1. Visit the OpenAI website at https://platform.openai.com/account/api-keys.
  2. If you don't have an account, click on "Sign Up" to create one. If you do, click "Log In".
  3. Once logged in, navigate to your API key management page.
  4. Click on "Create new secret key".
  5. Enter a name for your new key, then click "Create secret key".
  6. Your new API key will be displayed. Use this key to interact with the OpenAI API.

Note: Your API key is sensitive information. Do not share it with anyone.

Other examples:
ChatGPT streaming completion
package main

import (
	"context"
	"errors"
	"fmt"
	"io"
	openai "gitee.com/ledao/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.ChatCompletionRequest{
		Model:     openai.GPT3Dot5Turbo,
		MaxTokens: 20,
		Messages: []openai.ChatCompletionMessage{
			{
				Role:    openai.ChatMessageRoleUser,
				Content: "Lorem ipsum",
			},
		},
		Stream: true,
	}
	stream, err := c.CreateChatCompletionStream(ctx, req)
	if err != nil {
		fmt.Printf("ChatCompletionStream error: %v\n", err)
		return
	}
	defer stream.Close()

	fmt.Printf("Stream response: ")
	for {
		response, err := stream.Recv()
		if errors.Is(err, io.EOF) {
			fmt.Println("\nStream finished")
			return
		}

		if err != nil {
			fmt.Printf("\nStream error: %v\n", err)
			return
		}

		fmt.Printf(response.Choices[0].Delta.Content)
	}
}
GPT-3 completion
package main

import (
	"context"
	"fmt"
	openai "gitee.com/ledao/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.CompletionRequest{
		Model:     openai.GPT3Babbage002,
		MaxTokens: 5,
		Prompt:    "Lorem ipsum",
	}
	resp, err := c.CreateCompletion(ctx, req)
	if err != nil {
		fmt.Printf("Completion error: %v\n", err)
		return
	}
	fmt.Println(resp.Choices[0].Text)
}
GPT-3 streaming completion
package main

import (
	"errors"
	"context"
	"fmt"
	"io"
	openai "gitee.com/ledao/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.CompletionRequest{
		Model:     openai.GPT3Babbage002,
		MaxTokens: 5,
		Prompt:    "Lorem ipsum",
		Stream:    true,
	}
	stream, err := c.CreateCompletionStream(ctx, req)
	if err != nil {
		fmt.Printf("CompletionStream error: %v\n", err)
		return
	}
	defer stream.Close()

	for {
		response, err := stream.Recv()
		if errors.Is(err, io.EOF) {
			fmt.Println("Stream finished")
			return
		}

		if err != nil {
			fmt.Printf("Stream error: %v\n", err)
			return
		}


		fmt.Printf("Stream response: %v\n", response)
	}
}
Audio Speech-To-Text
package main

import (
	"context"
	"fmt"

	openai "gitee.com/ledao/go-openai"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	req := openai.AudioRequest{
		Model:    openai.Whisper1,
		FilePath: "recording.mp3",
	}
	resp, err := c.CreateTranscription(ctx, req)
	if err != nil {
		fmt.Printf("Transcription error: %v\n", err)
		return
	}
	fmt.Println(resp.Text)
}
Audio Captions
package main

import (
	"context"
	"fmt"
	"os"

	openai "gitee.com/ledao/go-openai"
)

func main() {
	c := openai.NewClient(os.Getenv("OPENAI_KEY"))

	req := openai.AudioRequest{
		Model:    openai.Whisper1,
		FilePath: os.Args[1],
		Format:   openai.AudioResponseFormatSRT,
	}
	resp, err := c.CreateTranscription(context.Background(), req)
	if err != nil {
		fmt.Printf("Transcription error: %v\n", err)
		return
	}
	f, err := os.Create(os.Args[1] + ".srt")
	if err != nil {
		fmt.Printf("Could not open file: %v\n", err)
		return
	}
	defer f.Close()
	if _, err := f.WriteString(resp.Text); err != nil {
		fmt.Printf("Error writing to file: %v\n", err)
		return
	}
}
DALL-E 2 image generation
package main

import (
	"bytes"
	"context"
	"encoding/base64"
	"fmt"
	openai "gitee.com/ledao/go-openai"
	"image/png"
	"os"
)

func main() {
	c := openai.NewClient("your token")
	ctx := context.Background()

	// Sample image by link
	reqUrl := openai.ImageRequest{
		Prompt:         "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail",
		Size:           openai.CreateImageSize256x256,
		ResponseFormat: openai.CreateImageResponseFormatURL,
		N:              1,
	}

	respUrl, err := c.CreateImage(ctx, reqUrl)
	if err != nil {
		fmt.Printf("Image creation error: %v\n", err)
		return
	}
	fmt.Println(respUrl.Data[0].URL)

	// Example image as base64
	reqBase64 := openai.ImageRequest{
		Prompt:         "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine",
		Size:           openai.CreateImageSize256x256,
		ResponseFormat: openai.CreateImageResponseFormatB64JSON,
		N:              1,
	}

	respBase64, err := c.CreateImage(ctx, reqBase64)
	if err != nil {
		fmt.Printf("Image creation error: %v\n", err)
		return
	}

	imgBytes, err := base64.StdEncoding.DecodeString(respBase64.Data[0].B64JSON)
	if err != nil {
		fmt.Printf("Base64 decode error: %v\n", err)
		return
	}

	r := bytes.NewReader(imgBytes)
	imgData, err := png.Decode(r)
	if err != nil {
		fmt.Printf("PNG decode error: %v\n", err)
		return
	}

	file, err := os.Create("example.png")
	if err != nil {
		fmt.Printf("File creation error: %v\n", err)
		return
	}
	defer file.Close()

	if err := png.Encode(file, imgData); err != nil {
		fmt.Printf("PNG encode error: %v\n", err)
		return
	}

	fmt.Println("The image was saved as example.png")
}

Configuring proxy
config := openai.DefaultConfig("token")
proxyUrl, err := url.Parse("http://localhost:{port}")
if err != nil {
	panic(err)
}
transport := &http.Transport{
	Proxy: http.ProxyURL(proxyUrl),
}
config.HTTPClient = &http.Client{
	Transport: transport,
}

c := openai.NewClientWithConfig(config)

See also: https://pkg.go.dev/gitee.com/ledao/go-openai#ClientConfig

ChatGPT support context
package main

import (
	"bufio"
	"context"
	"fmt"
	"os"
	"strings"

	"gitee.com/ledao/go-openai"
)

func main() {
	client := openai.NewClient("your token")
	messages := make([]openai.ChatCompletionMessage, 0)
	reader := bufio.NewReader(os.Stdin)
	fmt.Println("Conversation")
	fmt.Println("---------------------")

	for {
		fmt.Print("-> ")
		text, _ := reader.ReadString('\n')
		// convert CRLF to LF
		text = strings.Replace(text, "\n", "", -1)
		messages = append(messages, openai.ChatCompletionMessage{
			Role:    openai.ChatMessageRoleUser,
			Content: text,
		})

		resp, err := client.CreateChatCompletion(
			context.Background(),
			openai.ChatCompletionRequest{
				Model:    openai.GPT3Dot5Turbo,
				Messages: messages,
			},
		)

		if err != nil {
			fmt.Printf("ChatCompletion error: %v\n", err)
			continue
		}

		content := resp.Choices[0].Message.Content
		messages = append(messages, openai.ChatCompletionMessage{
			Role:    openai.ChatMessageRoleAssistant,
			Content: content,
		})
		fmt.Println(content)
	}
}
Azure OpenAI ChatGPT
package main

import (
	"context"
	"fmt"

	openai "gitee.com/ledao/go-openai"
)

func main() {
	config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
	// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
	// config.AzureModelMapperFunc = func(model string) string {
	// 	azureModelMapping := map[string]string{
	// 		"gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
	// 	}
	// 	return azureModelMapping[model]
	// }

	client := openai.NewClientWithConfig(config)
	resp, err := client.CreateChatCompletion(
		context.Background(),
		openai.ChatCompletionRequest{
			Model: openai.GPT3Dot5Turbo,
			Messages: []openai.ChatCompletionMessage{
				{
					Role:    openai.ChatMessageRoleUser,
					Content: "Hello Azure OpenAI!",
				},
			},
		},
	)
	if err != nil {
		fmt.Printf("ChatCompletion error: %v\n", err)
		return
	}

	fmt.Println(resp.Choices[0].Message.Content)
}

Embedding Semantic Similarity
package main

import (
	"context"
	"log"
	openai "gitee.com/ledao/go-openai"

)

func main() {
	client := openai.NewClient("your-token")

	// Create an EmbeddingRequest for the user query
	queryReq := openai.EmbeddingRequest{
		Input: []string{"How many chucks would a woodchuck chuck"},
		Model: openai.AdaEmbeddingV2,
	}

	// Create an embedding for the user query
	queryResponse, err := client.CreateEmbeddings(context.Background(), queryReq)
	if err != nil {
		log.Fatal("Error creating query embedding:", err)
	}

	// Create an EmbeddingRequest for the target text
	targetReq := openai.EmbeddingRequest{
		Input: []string{"How many chucks would a woodchuck chuck if the woodchuck could chuck wood"},
		Model: openai.AdaEmbeddingV2,
	}

	// Create an embedding for the target text
	targetResponse, err := client.CreateEmbeddings(context.Background(), targetReq)
	if err != nil {
		log.Fatal("Error creating target embedding:", err)
	}

	// Now that we have the embeddings for the user query and the target text, we
	// can calculate their similarity.
	queryEmbedding := queryResponse.Data[0]
	targetEmbedding := targetResponse.Data[0]

	similarity, err := queryEmbedding.DotProduct(&targetEmbedding)
	if err != nil {
		log.Fatal("Error calculating dot product:", err)
	}

	log.Printf("The similarity score between the query and the target is %f", similarity)
}

Azure OpenAI Embeddings
package main

import (
	"context"
	"fmt"

	openai "gitee.com/ledao/go-openai"
)

func main() {

	config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
	config.APIVersion = "2023-05-15" // optional update to latest API version

	//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
	//config.AzureModelMapperFunc = func(model string) string {
	//    azureModelMapping := map[string]string{
	//        "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
	//    }
	//    return azureModelMapping[model]
	//}

	input := "Text to vectorize"

	client := openai.NewClientWithConfig(config)
	resp, err := client.CreateEmbeddings(
		context.Background(),
		openai.EmbeddingRequest{
			Input: []string{input},
			Model: openai.AdaEmbeddingV2,
		})

	if err != nil {
		fmt.Printf("CreateEmbeddings error: %v\n", err)
		return
	}

	vectors := resp.Data[0].Embedding // []float32 with 1536 dimensions

	fmt.Println(vectors[:10], "...", vectors[len(vectors)-10:])
}
JSON Schema for function calling

It is now possible for chat completion to choose to call a function for more information (see developer docs here).

In order to describe the type of functions that can be called, a JSON schema must be provided. Many JSON schema libraries exist and are more advanced than what we can offer in this library, however we have included a simple jsonschema package for those who want to use this feature without formatting their own JSON schema payload.

The developer documents give this JSON schema definition as an example:

{
  "name":"get_current_weather",
  "description":"Get the current weather in a given location",
  "parameters":{
    "type":"object",
    "properties":{
        "location":{
          "type":"string",
          "description":"The city and state, e.g. San Francisco, CA"
        },
        "unit":{
          "type":"string",
          "enum":[
              "celsius",
              "fahrenheit"
          ]
        }
    },
    "required":[
        "location"
    ]
  }
}

Using the jsonschema package, this schema could be created using structs as such:

FunctionDefinition{
  Name: "get_current_weather",
  Parameters: jsonschema.Definition{
    Type: jsonschema.Object,
    Properties: map[string]jsonschema.Definition{
      "location": {
        Type: jsonschema.String,
        Description: "The city and state, e.g. San Francisco, CA",
      },
      "unit": {
        Type: jsonschema.String,
        Enum: []string{"celsius", "fahrenheit"},
      },
    },
    Required: []string{"location"},
  },
}

The Parameters field of a FunctionDefinition can accept either of the above styles, or even a nested struct from another library (as long as it can be marshalled into JSON).

Error handling

Open-AI maintains clear documentation on how to handle API errors

example:

e := &openai.APIError{}
if errors.As(err, &e) {
  switch e.HTTPStatusCode {
    case 401:
      // invalid auth or key (do not retry)
    case 429:
      // rate limiting or engine overload (wait and retry) 
    case 500:
      // openai server error (retry)
    default:
      // unhandled
  }
}

Fine Tune Model
package main

import (
	"context"
	"fmt"
	"gitee.com/ledao/go-openai"
)

func main() {
	client := openai.NewClient("your token")
	ctx := context.Background()

	// create a .jsonl file with your training data for conversational model
	// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
	// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
	// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}

	// chat models are trained using the following file format:
	// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
	// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
	// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}

	// you can use openai cli tool to validate the data
	// For more info - https://platform.openai.com/docs/guides/fine-tuning

	file, err := client.CreateFile(ctx, openai.FileRequest{
		FilePath: "training_prepared.jsonl",
		Purpose:  "fine-tune",
	})
	if err != nil {
		fmt.Printf("Upload JSONL file error: %v\n", err)
		return
	}

	// create a fine tuning job
	// Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large)
	// use below get method to know the status of your model
	fineTuningJob, err := client.CreateFineTuningJob(ctx, openai.FineTuningJobRequest{
		TrainingFile: file.ID,
		Model:        "davinci-002", // gpt-3.5-turbo-0613, babbage-002.
	})
	if err != nil {
		fmt.Printf("Creating new fine tune model error: %v\n", err)
		return
	}

	fineTuningJob, err = client.RetrieveFineTuningJob(ctx, fineTuningJob.ID)
	if err != nil {
		fmt.Printf("Getting fine tune model error: %v\n", err)
		return
	}
	fmt.Println(fineTuningJob.FineTunedModel)

	// once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request

	// resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
	//	 Model:  fineTuningJob.FineTunedModel,
	//	 Prompt: "your prompt",
	// })
	// if err != nil {
	//	 fmt.Printf("Create completion error %v\n", err)
	//	 return
	// }
	//
	// fmt.Println(resp.Choices[0].Text)
}
Structured Outputs
package main

import (
	"context"
	"fmt"
	"log"

	"gitee.com/ledao/go-openai"
	"gitee.com/ledao/go-openai/jsonschema"
)

func main() {
	client := openai.NewClient("your token")
	ctx := context.Background()

	type Result struct {
		Steps []struct {
			Explanation string `json:"explanation"`
			Output      string `json:"output"`
		} `json:"steps"`
		FinalAnswer string `json:"final_answer"`
	}
	var result Result
	schema, err := jsonschema.GenerateSchemaForType(result)
	if err != nil {
		log.Fatalf("GenerateSchemaForType error: %v", err)
	}
	resp, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
		Model: openai.GPT4oMini,
		Messages: []openai.ChatCompletionMessage{
			{
				Role:    openai.ChatMessageRoleSystem,
				Content: "You are a helpful math tutor. Guide the user through the solution step by step.",
			},
			{
				Role:    openai.ChatMessageRoleUser,
				Content: "how can I solve 8x + 7 = -23",
			},
		},
		ResponseFormat: &openai.ChatCompletionResponseFormat{
			Type: openai.ChatCompletionResponseFormatTypeJSONSchema,
			JSONSchema: &openai.ChatCompletionResponseFormatJSONSchema{
				Name:   "math_reasoning",
				Schema: schema,
				Strict: true,
			},
		},
	})
	if err != nil {
		log.Fatalf("CreateChatCompletion error: %v", err)
	}
	err = schema.Unmarshal(resp.Choices[0].Message.Content, &result)
	if err != nil {
		log.Fatalf("Unmarshal schema error: %v", err)
	}
	fmt.Println(result)
}
See the `examples/` folder for more.

Frequently Asked Questions

Why don't we get the same answer when specifying a temperature field of 0 and asking the same question?

Even when specifying a temperature field of 0, it doesn't guarantee that you'll always get the same response. Several factors come into play.

  1. Go OpenAI Behavior: When you specify a temperature field of 0 in Go OpenAI, the omitempty tag causes that field to be removed from the request. Consequently, the OpenAI API applies the default value of 1.
  2. Token Count for Input/Output: If there's a large number of tokens in the input and output, setting the temperature to 0 can still result in non-deterministic behavior. In particular, when using around 32k tokens, the likelihood of non-deterministic behavior becomes highest even with a temperature of 0.

Due to the factors mentioned above, different answers may be returned even for the same question.

Workarounds:

  1. As of November 2023, use the new seed parameter in conjunction with the system_fingerprint response field, alongside Temperature management.
  2. Try using math.SmallestNonzeroFloat32: By specifying math.SmallestNonzeroFloat32 in the temperature field instead of 0, you can mimic the behavior of setting it to 0.
  3. Limiting Token Count: By limiting the number of tokens in the input and output and especially avoiding large requests close to 32k tokens, you can reduce the risk of non-deterministic behavior.

By adopting these strategies, you can expect more consistent results.

Related Issues:
omitempty option of request struct will generate incorrect request when parameter is 0.

Does Go OpenAI provide a method to count tokens?

No, Go OpenAI does not offer a feature to count tokens, and there are no plans to provide such a feature in the future. However, if there's a way to implement a token counting feature with zero dependencies, it might be possible to merge that feature into Go OpenAI. Otherwise, it would be more appropriate to implement it in a dedicated library or repository.

For counting tokens, you might find the following links helpful:

Related Issues:
Is it possible to join the implementation of GPT3 Tokenizer

Contributing

By following Contributing Guidelines, we hope to ensure that your contributions are made smoothly and efficiently.

Thank you

We want to take a moment to express our deepest gratitude to the contributors and sponsors of this project:

To all of you: thank you. You've helped us achieve more than we ever imagined possible. Can't wait to see where we go next, together!

Documentation

Overview

Example
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
resp, err := client.CreateChatCompletion(
	context.Background(),
	openai.ChatCompletionRequest{
		Model: openai.GPT3Dot5Turbo,
		Messages: []openai.ChatCompletionMessage{
			{
				Role:    openai.ChatMessageRoleUser,
				Content: "Hello!",
			},
		},
	},
)
if err != nil {
	fmt.Printf("ChatCompletion error: %v\n", err)
	return
}

fmt.Println(resp.Choices[0].Message.Content)
Output:

Example (Chatbot)
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

req := openai.ChatCompletionRequest{
	Model: openai.GPT3Dot5Turbo,
	Messages: []openai.ChatCompletionMessage{
		{
			Role:    openai.ChatMessageRoleSystem,
			Content: "you are a helpful chatbot",
		},
	},
}
fmt.Println("Conversation")
fmt.Println("---------------------")
fmt.Print("> ")
s := bufio.NewScanner(os.Stdin)
for s.Scan() {
	req.Messages = append(req.Messages, openai.ChatCompletionMessage{
		Role:    openai.ChatMessageRoleUser,
		Content: s.Text(),
	})
	resp, err := client.CreateChatCompletion(context.Background(), req)
	if err != nil {
		fmt.Printf("ChatCompletion error: %v\n", err)
		continue
	}
	fmt.Printf("%s\n\n", resp.Choices[0].Message.Content)
	req.Messages = append(req.Messages, resp.Choices[0].Message)
	fmt.Print("> ")
}
Output:

Index

Examples

Constants

View Source
const (
	ChatMessageRoleSystem    = "system"
	ChatMessageRoleUser      = "user"
	ChatMessageRoleAssistant = "assistant"
	ChatMessageRoleFunction  = "function"
	ChatMessageRoleTool      = "tool"
)

Chat message role defined by the OpenAI API.

View Source
const (
	O1Mini                = "o1-mini"
	O1Mini20240912        = "o1-mini-2024-09-12"
	O1Preview             = "o1-preview"
	O1Preview20240912     = "o1-preview-2024-09-12"
	GPT432K0613           = "gpt-4-32k-0613"
	GPT432K0314           = "gpt-4-32k-0314"
	GPT432K               = "gpt-4-32k"
	GPT40613              = "gpt-4-0613"
	GPT40314              = "gpt-4-0314"
	GPT4o                 = "gpt-4o"
	GPT4o20240513         = "gpt-4o-2024-05-13"
	GPT4o20240806         = "gpt-4o-2024-08-06"
	GPT4oLatest           = "chatgpt-4o-latest"
	GPT4oMini             = "gpt-4o-mini"
	GPT4oMini20240718     = "gpt-4o-mini-2024-07-18"
	GPT4Turbo             = "gpt-4-turbo"
	GPT4Turbo20240409     = "gpt-4-turbo-2024-04-09"
	GPT4Turbo0125         = "gpt-4-0125-preview"
	GPT4Turbo1106         = "gpt-4-1106-preview"
	GPT4TurboPreview      = "gpt-4-turbo-preview"
	GPT4VisionPreview     = "gpt-4-vision-preview"
	GPT4                  = "gpt-4"
	GPT3Dot5Turbo0125     = "gpt-3.5-turbo-0125"
	GPT3Dot5Turbo1106     = "gpt-3.5-turbo-1106"
	GPT3Dot5Turbo0613     = "gpt-3.5-turbo-0613"
	GPT3Dot5Turbo0301     = "gpt-3.5-turbo-0301"
	GPT3Dot5Turbo16K      = "gpt-3.5-turbo-16k"
	GPT3Dot5Turbo16K0613  = "gpt-3.5-turbo-16k-0613"
	GPT3Dot5Turbo         = "gpt-3.5-turbo"
	GPT3Dot5TurboInstruct = "gpt-3.5-turbo-instruct"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3TextDavinci003 = "text-davinci-003"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3TextDavinci002 = "text-davinci-002"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3TextCurie001 = "text-curie-001"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3TextBabbage001 = "text-babbage-001"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3TextAda001 = "text-ada-001"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3TextDavinci001 = "text-davinci-001"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3DavinciInstructBeta = "davinci-instruct-beta"
	// Deprecated: Model is shutdown. Use davinci-002 instead.
	GPT3Davinci    = "davinci"
	GPT3Davinci002 = "davinci-002"
	// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
	GPT3CurieInstructBeta = "curie-instruct-beta"
	GPT3Curie             = "curie"
	GPT3Curie002          = "curie-002"
	// Deprecated: Model is shutdown. Use babbage-002 instead.
	GPT3Ada    = "ada"
	GPT3Ada002 = "ada-002"
	// Deprecated: Model is shutdown. Use babbage-002 instead.
	GPT3Babbage    = "babbage"
	GPT3Babbage002 = "babbage-002"
)

GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI. GPT3 Models are designed for text-based tasks. For code-specific tasks, please refer to the Codex series of models.

View Source
const (
	CodexCodeDavinci002 = "code-davinci-002"
	CodexCodeCushman001 = "code-cushman-001"
	CodexCodeDavinci001 = "code-davinci-001"
)

Codex Defines the models provided by OpenAI. These models are designed for code-specific tasks, and use a different tokenizer which optimizes for whitespace.

View Source
const (
	CreateImageSize256x256   = "256x256"
	CreateImageSize512x512   = "512x512"
	CreateImageSize1024x1024 = "1024x1024"
	// dall-e-3 supported only.
	CreateImageSize1792x1024 = "1792x1024"
	CreateImageSize1024x1792 = "1024x1792"
)

Image sizes defined by the OpenAI API.

View Source
const (
	CreateImageResponseFormatURL     = "url"
	CreateImageResponseFormatB64JSON = "b64_json"
)
View Source
const (
	CreateImageModelDallE2 = "dall-e-2"
	CreateImageModelDallE3 = "dall-e-3"
)
View Source
const (
	CreateImageQualityHD       = "hd"
	CreateImageQualityStandard = "standard"
)
View Source
const (
	CreateImageStyleVivid   = "vivid"
	CreateImageStyleNatural = "natural"
)
View Source
const (
	ModerationOmniLatest   = "omni-moderation-latest"
	ModerationOmni20240926 = "omni-moderation-2024-09-26"
	ModerationTextStable   = "text-moderation-stable"
	ModerationTextLatest   = "text-moderation-latest"
	// Deprecated: use ModerationTextStable and ModerationTextLatest instead.
	ModerationText001 = "text-moderation-001"
)

The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.

View Source
const (
	// TruncationStrategyAuto messages in the middle of the thread will be dropped to fit the context length of the model.
	TruncationStrategyAuto = TruncationStrategy("auto")
	// TruncationStrategyLastMessages the thread will be truncated to the n most recent messages in the thread.
	TruncationStrategyLastMessages = TruncationStrategy("last_messages")
)
View Source
const AzureAPIKeyHeader = "api-key"
View Source
const (
	Whisper1 = "whisper-1"
)

Whisper Defines the models provided by OpenAI to use when processing audio with OpenAI.

Variables

View Source
var (
	ErrChatCompletionInvalidModel       = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll
	ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream")              //nolint:lll
	ErrContentFieldsMisused             = errors.New("can't use both Content and MultiContent properties simultaneously")
)
View Source
var (
	ErrO1MaxTokensDeprecated                   = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens")                               //nolint:lll
	ErrCompletionUnsupportedModel              = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll
	ErrCompletionStreamNotSupported            = errors.New("streaming is not supported with this method, please use CreateCompletionStream")                      //nolint:lll
	ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string")                              //nolint:lll
)
View Source
var (
	ErrO1BetaLimitationsMessageTypes = errors.New("this model has beta-limitations, user and assistant messages only, system messages are not supported")                                  //nolint:lll
	ErrO1BetaLimitationsStreaming    = errors.New("this model has beta-limitations, streaming not supported")                                                                              //nolint:lll
	ErrO1BetaLimitationsTools        = errors.New("this model has beta-limitations, tools, function calling, and response format parameters are not supported")                            //nolint:lll
	ErrO1BetaLimitationsLogprobs     = errors.New("this model has beta-limitations, logprobs not supported")                                                                               //nolint:lll
	ErrO1BetaLimitationsOther        = errors.New("this model has beta-limitations, temperature, top_p and n are fixed at 1, while presence_penalty and frequency_penalty are fixed at 0") //nolint:lll
)
View Source
var (
	ErrModerationInvalidModel = errors.New("this model is not supported with moderation, please use text-moderation-stable or text-moderation-latest instead") //nolint:lll
)
View Source
var (
	ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages")
)
View Source
var ErrVectorLengthMismatch = errors.New("vector length mismatch")
View Source
var O1SeriesModels = map[string]struct{}{
	O1Mini:            {},
	O1Mini20240912:    {},
	O1Preview:         {},
	O1Preview20240912: {},
}

O1SeriesModels List of new Series of OpenAI models. Some old api attributes not supported.

Functions

This section is empty.

Types

type APIError

type APIError struct {
	Code           any         `json:"code,omitempty"`
	Message        string      `json:"message"`
	Param          *string     `json:"param,omitempty"`
	Type           string      `json:"type"`
	HTTPStatus     string      `json:"-"`
	HTTPStatusCode int         `json:"-"`
	InnerError     *InnerError `json:"innererror,omitempty"`
}

APIError provides error information returned by the OpenAI API. InnerError struct is only valid for Azure OpenAI Service.

Example

Open-AI maintains clear documentation on how to handle API errors.

see: https://platform.openai.com/docs/guides/error-codes/api-errors

var err error // Assume this is the error you are checking.
e := &openai.APIError{}
if errors.As(err, &e) {
	switch e.HTTPStatusCode {
	case 401:
	// invalid auth or key (do not retry)
	case 429:
	// rate limiting or engine overload (wait and retry)
	case 500:
	// openai server error (retry)
	default:
		// unhandled
	}
}
Output:

func (*APIError) Error

func (e *APIError) Error() string

func (*APIError) UnmarshalJSON

func (e *APIError) UnmarshalJSON(data []byte) (err error)

type APIType

type APIType string
const (
	APITypeOpenAI          APIType = "OPEN_AI"
	APITypeAzure           APIType = "AZURE"
	APITypeAzureAD         APIType = "AZURE_AD"
	APITypeCloudflareAzure APIType = "CLOUDFLARE_AZURE"
)

type Assistant

type Assistant struct {
	ID             string                 `json:"id"`
	Object         string                 `json:"object"`
	CreatedAt      int64                  `json:"created_at"`
	Name           *string                `json:"name,omitempty"`
	Description    *string                `json:"description,omitempty"`
	Model          string                 `json:"model"`
	Instructions   *string                `json:"instructions,omitempty"`
	Tools          []AssistantTool        `json:"tools"`
	ToolResources  *AssistantToolResource `json:"tool_resources,omitempty"`
	FileIDs        []string               `json:"file_ids,omitempty"` // Deprecated in v2
	Metadata       map[string]any         `json:"metadata,omitempty"`
	Temperature    *float32               `json:"temperature,omitempty"`
	TopP           *float32               `json:"top_p,omitempty"`
	ResponseFormat any                    `json:"response_format,omitempty"`
	// contains filtered or unexported fields
}

func (*Assistant) GetRateLimitHeaders

func (h *Assistant) GetRateLimitHeaders() RateLimitHeaders

func (*Assistant) Header

func (h *Assistant) Header() http.Header

func (*Assistant) SetHeader

func (h *Assistant) SetHeader(header http.Header)

type AssistantDeleteResponse

type AssistantDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
	// contains filtered or unexported fields
}

func (*AssistantDeleteResponse) GetRateLimitHeaders

func (h *AssistantDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (*AssistantDeleteResponse) Header

func (h *AssistantDeleteResponse) Header() http.Header

func (*AssistantDeleteResponse) SetHeader

func (h *AssistantDeleteResponse) SetHeader(header http.Header)

type AssistantFile

type AssistantFile struct {
	ID          string `json:"id"`
	Object      string `json:"object"`
	CreatedAt   int64  `json:"created_at"`
	AssistantID string `json:"assistant_id"`
	// contains filtered or unexported fields
}

func (*AssistantFile) GetRateLimitHeaders

func (h *AssistantFile) GetRateLimitHeaders() RateLimitHeaders

func (*AssistantFile) Header

func (h *AssistantFile) Header() http.Header

func (*AssistantFile) SetHeader

func (h *AssistantFile) SetHeader(header http.Header)

type AssistantFileRequest

type AssistantFileRequest struct {
	FileID string `json:"file_id"`
}

type AssistantFilesList

type AssistantFilesList struct {
	AssistantFiles []AssistantFile `json:"data"`
	// contains filtered or unexported fields
}

func (*AssistantFilesList) GetRateLimitHeaders

func (h *AssistantFilesList) GetRateLimitHeaders() RateLimitHeaders

func (*AssistantFilesList) Header

func (h *AssistantFilesList) Header() http.Header

func (*AssistantFilesList) SetHeader

func (h *AssistantFilesList) SetHeader(header http.Header)

type AssistantRequest

type AssistantRequest struct {
	Model          string                 `json:"model"`
	Name           *string                `json:"name,omitempty"`
	Description    *string                `json:"description,omitempty"`
	Instructions   *string                `json:"instructions,omitempty"`
	Tools          []AssistantTool        `json:"-"`
	FileIDs        []string               `json:"file_ids,omitempty"`
	Metadata       map[string]any         `json:"metadata,omitempty"`
	ToolResources  *AssistantToolResource `json:"tool_resources,omitempty"`
	ResponseFormat any                    `json:"response_format,omitempty"`
	Temperature    *float32               `json:"temperature,omitempty"`
	TopP           *float32               `json:"top_p,omitempty"`
}

AssistantRequest provides the assistant request parameters. When modifying the tools the API functions as the following: If Tools is undefined, no changes are made to the Assistant's tools. If Tools is empty slice it will effectively delete all of the Assistant's tools. If Tools is populated, it will replace all of the existing Assistant's tools with the provided tools.

func (AssistantRequest) MarshalJSON

func (a AssistantRequest) MarshalJSON() ([]byte, error)

MarshalJSON provides a custom marshaller for the assistant request to handle the API use cases If Tools is nil, the field is omitted from the JSON. If Tools is an empty slice, it's included in the JSON as an empty array ([]). If Tools is populated, it's included in the JSON with the elements.

type AssistantTool

type AssistantTool struct {
	Type     AssistantToolType   `json:"type"`
	Function *FunctionDefinition `json:"function,omitempty"`
}

type AssistantToolCodeInterpreter

type AssistantToolCodeInterpreter struct {
	FileIDs []string `json:"file_ids"`
}

type AssistantToolFileSearch

type AssistantToolFileSearch struct {
	VectorStoreIDs []string `json:"vector_store_ids"`
}

type AssistantToolResource

type AssistantToolResource struct {
	FileSearch      *AssistantToolFileSearch      `json:"file_search,omitempty"`
	CodeInterpreter *AssistantToolCodeInterpreter `json:"code_interpreter,omitempty"`
}

type AssistantToolType

type AssistantToolType string
const (
	AssistantToolTypeCodeInterpreter AssistantToolType = "code_interpreter"
	AssistantToolTypeRetrieval       AssistantToolType = "retrieval"
	AssistantToolTypeFunction        AssistantToolType = "function"
	AssistantToolTypeFileSearch      AssistantToolType = "file_search"
)

type AssistantsList

type AssistantsList struct {
	Assistants []Assistant `json:"data"`
	LastID     *string     `json:"last_id"`
	FirstID    *string     `json:"first_id"`
	HasMore    bool        `json:"has_more"`
	// contains filtered or unexported fields
}

AssistantsList is a list of assistants.

func (*AssistantsList) GetRateLimitHeaders

func (h *AssistantsList) GetRateLimitHeaders() RateLimitHeaders

func (*AssistantsList) Header

func (h *AssistantsList) Header() http.Header

func (*AssistantsList) SetHeader

func (h *AssistantsList) SetHeader(header http.Header)

type AudioRequest

type AudioRequest struct {
	Model string

	// FilePath is either an existing file in your filesystem or a filename representing the contents of Reader.
	FilePath string

	// Reader is an optional io.Reader when you do not want to use an existing file.
	Reader io.Reader

	Prompt                 string
	Temperature            float32
	Language               string // Only for transcription.
	Format                 AudioResponseFormat
	TimestampGranularities []TranscriptionTimestampGranularity // Only for transcription.
}

AudioRequest represents a request structure for audio API.

func (AudioRequest) HasJSONResponse

func (r AudioRequest) HasJSONResponse() bool

HasJSONResponse returns true if the response format is JSON.

type AudioResponse

type AudioResponse struct {
	Task     string  `json:"task"`
	Language string  `json:"language"`
	Duration float64 `json:"duration"`
	Segments []struct {
		ID               int     `json:"id"`
		Seek             int     `json:"seek"`
		Start            float64 `json:"start"`
		End              float64 `json:"end"`
		Text             string  `json:"text"`
		Tokens           []int   `json:"tokens"`
		Temperature      float64 `json:"temperature"`
		AvgLogprob       float64 `json:"avg_logprob"`
		CompressionRatio float64 `json:"compression_ratio"`
		NoSpeechProb     float64 `json:"no_speech_prob"`
		Transient        bool    `json:"transient"`
	} `json:"segments"`
	Words []struct {
		Word  string  `json:"word"`
		Start float64 `json:"start"`
		End   float64 `json:"end"`
	} `json:"words"`
	Text string `json:"text"`
	// contains filtered or unexported fields
}

AudioResponse represents a response structure for audio API.

func (*AudioResponse) GetRateLimitHeaders

func (h *AudioResponse) GetRateLimitHeaders() RateLimitHeaders

func (*AudioResponse) Header

func (h *AudioResponse) Header() http.Header

func (*AudioResponse) SetHeader

func (h *AudioResponse) SetHeader(header http.Header)

type AudioResponseFormat

type AudioResponseFormat string

Response formats; Whisper uses AudioResponseFormatJSON by default.

const (
	AudioResponseFormatJSON        AudioResponseFormat = "json"
	AudioResponseFormatText        AudioResponseFormat = "text"
	AudioResponseFormatSRT         AudioResponseFormat = "srt"
	AudioResponseFormatVerboseJSON AudioResponseFormat = "verbose_json"
	AudioResponseFormatVTT         AudioResponseFormat = "vtt"
)

type Base64Embedding

type Base64Embedding struct {
	Object    string       `json:"object"`
	Embedding base64String `json:"embedding"`
	Index     int          `json:"index"`
}

Base64Embedding is a container for base64 encoded embeddings.

type Batch

type Batch struct {
	ID       string        `json:"id"`
	Object   string        `json:"object"`
	Endpoint BatchEndpoint `json:"endpoint"`
	Errors   *struct {
		Object string `json:"object,omitempty"`
		Data   []struct {
			Code    string  `json:"code,omitempty"`
			Message string  `json:"message,omitempty"`
			Param   *string `json:"param,omitempty"`
			Line    *int    `json:"line,omitempty"`
		} `json:"data"`
	} `json:"errors"`
	InputFileID      string             `json:"input_file_id"`
	CompletionWindow string             `json:"completion_window"`
	Status           string             `json:"status"`
	OutputFileID     *string            `json:"output_file_id"`
	ErrorFileID      *string            `json:"error_file_id"`
	CreatedAt        int                `json:"created_at"`
	InProgressAt     *int               `json:"in_progress_at"`
	ExpiresAt        *int               `json:"expires_at"`
	FinalizingAt     *int               `json:"finalizing_at"`
	CompletedAt      *int               `json:"completed_at"`
	FailedAt         *int               `json:"failed_at"`
	ExpiredAt        *int               `json:"expired_at"`
	CancellingAt     *int               `json:"cancelling_at"`
	CancelledAt      *int               `json:"cancelled_at"`
	RequestCounts    BatchRequestCounts `json:"request_counts"`
	Metadata         map[string]any     `json:"metadata"`
}

type BatchChatCompletionRequest

type BatchChatCompletionRequest struct {
	CustomID string                `json:"custom_id"`
	Body     ChatCompletionRequest `json:"body"`
	Method   string                `json:"method"`
	URL      BatchEndpoint         `json:"url"`
}

func (BatchChatCompletionRequest) MarshalBatchLineItem

func (r BatchChatCompletionRequest) MarshalBatchLineItem() []byte

type BatchCompletionRequest

type BatchCompletionRequest struct {
	CustomID string            `json:"custom_id"`
	Body     CompletionRequest `json:"body"`
	Method   string            `json:"method"`
	URL      BatchEndpoint     `json:"url"`
}

func (BatchCompletionRequest) MarshalBatchLineItem

func (r BatchCompletionRequest) MarshalBatchLineItem() []byte

type BatchEmbeddingRequest

type BatchEmbeddingRequest struct {
	CustomID string           `json:"custom_id"`
	Body     EmbeddingRequest `json:"body"`
	Method   string           `json:"method"`
	URL      BatchEndpoint    `json:"url"`
}

func (BatchEmbeddingRequest) MarshalBatchLineItem

func (r BatchEmbeddingRequest) MarshalBatchLineItem() []byte

type BatchEndpoint

type BatchEndpoint string
const (
	BatchEndpointChatCompletions BatchEndpoint = "/v1/chat/completions"
	BatchEndpointCompletions     BatchEndpoint = "/v1/completions"
	BatchEndpointEmbeddings      BatchEndpoint = "/v1/embeddings"
)

type BatchLineItem

type BatchLineItem interface {
	MarshalBatchLineItem() []byte
}

type BatchRequestCounts

type BatchRequestCounts struct {
	Total     int `json:"total"`
	Completed int `json:"completed"`
	Failed    int `json:"failed"`
}

type BatchResponse

type BatchResponse struct {
	Batch
	// contains filtered or unexported fields
}

func (*BatchResponse) GetRateLimitHeaders

func (h *BatchResponse) GetRateLimitHeaders() RateLimitHeaders

func (*BatchResponse) Header

func (h *BatchResponse) Header() http.Header

func (*BatchResponse) SetHeader

func (h *BatchResponse) SetHeader(header http.Header)

type ChatCompletionChoice

type ChatCompletionChoice struct {
	Index   int                   `json:"index"`
	Message ChatCompletionMessage `json:"message"`
	// FinishReason
	// stop: API returned complete message,
	// or a message terminated by one of the stop sequences provided via the stop parameter
	// length: Incomplete model output due to max_tokens parameter or token limit
	// function_call: The model decided to call a function
	// content_filter: Omitted content due to a flag from our content filters
	// null: API response still in progress or incomplete
	FinishReason         FinishReason         `json:"finish_reason"`
	LogProbs             *LogProbs            `json:"logprobs,omitempty"`
	ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}

type ChatCompletionMessage

type ChatCompletionMessage struct {
	Role         string `json:"role"`
	Content      string `json:"content"`
	Refusal      string `json:"refusal,omitempty"`
	MultiContent []ChatMessagePart

	// This property isn't in the official documentation, but it's in
	// the documentation for the official library for python:
	// - https://github.com/openai/openai-python/blob/main/chatml.md
	// - https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
	Name string `json:"name,omitempty"`

	FunctionCall *FunctionCall `json:"function_call,omitempty"`

	// For Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls.
	ToolCalls []ToolCall `json:"tool_calls,omitempty"`

	// For Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool.
	ToolCallID string `json:"tool_call_id,omitempty"`
}

func (ChatCompletionMessage) MarshalJSON

func (m ChatCompletionMessage) MarshalJSON() ([]byte, error)

func (*ChatCompletionMessage) UnmarshalJSON

func (m *ChatCompletionMessage) UnmarshalJSON(bs []byte) error

type ChatCompletionRequest

type ChatCompletionRequest struct {
	Model    string                  `json:"model"`
	Messages []ChatCompletionMessage `json:"messages"`
	// MaxTokens The maximum number of tokens that can be generated in the chat completion.
	// This value can be used to control costs for text generated via API.
	// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
	// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
	MaxTokens int `json:"max_tokens,omitempty"`
	// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
	// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
	MaxCompletionTokens int                           `json:"max_completion_tokens,omitempty"`
	Temperature         float32                       `json:"temperature,omitempty"`
	TopP                float32                       `json:"top_p,omitempty"`
	N                   int                           `json:"n,omitempty"`
	Stream              bool                          `json:"stream,omitempty"`
	Stop                []string                      `json:"stop,omitempty"`
	PresencePenalty     float32                       `json:"presence_penalty,omitempty"`
	ResponseFormat      *ChatCompletionResponseFormat `json:"response_format,omitempty"`
	Seed                *int                          `json:"seed,omitempty"`
	FrequencyPenalty    float32                       `json:"frequency_penalty,omitempty"`
	// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
	// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
	// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
	LogitBias map[string]int `json:"logit_bias,omitempty"`
	// LogProbs indicates whether to return log probabilities of the output tokens or not.
	// If true, returns the log probabilities of each output token returned in the content of message.
	// This option is currently not available on the gpt-4-vision-preview model.
	LogProbs bool `json:"logprobs,omitempty"`
	// TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each
	// token position, each with an associated log probability.
	// logprobs must be set to true if this parameter is used.
	TopLogProbs int    `json:"top_logprobs,omitempty"`
	User        string `json:"user,omitempty"`
	// Deprecated: use Tools instead.
	Functions []FunctionDefinition `json:"functions,omitempty"`
	// Deprecated: use ToolChoice instead.
	FunctionCall any    `json:"function_call,omitempty"`
	Tools        []Tool `json:"tools,omitempty"`
	// This can be either a string or an ToolChoice object.
	ToolChoice any `json:"tool_choice,omitempty"`
	// Options for streaming response. Only set this when you set stream: true.
	StreamOptions *StreamOptions `json:"stream_options,omitempty"`
	// Disable the default behavior of parallel tool calls by setting it: false.
	ParallelToolCalls any `json:"parallel_tool_calls,omitempty"`
	// Store can be set to true to store the output of this completion request for use in distillations and evals.
	// https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
	Store bool `json:"store,omitempty"`
	// Metadata to store with the completion.
	Metadata map[string]string `json:"metadata,omitempty"`
}

ChatCompletionRequest represents a request structure for chat completion API.

type ChatCompletionResponse

type ChatCompletionResponse struct {
	ID                  string                 `json:"id"`
	Object              string                 `json:"object"`
	Created             int64                  `json:"created"`
	Model               string                 `json:"model"`
	Choices             []ChatCompletionChoice `json:"choices"`
	Usage               Usage                  `json:"usage"`
	SystemFingerprint   string                 `json:"system_fingerprint"`
	PromptFilterResults []PromptFilterResult   `json:"prompt_filter_results,omitempty"`
	// contains filtered or unexported fields
}

ChatCompletionResponse represents a response structure for chat completion API.

func (*ChatCompletionResponse) GetRateLimitHeaders

func (h *ChatCompletionResponse) GetRateLimitHeaders() RateLimitHeaders

func (*ChatCompletionResponse) Header

func (h *ChatCompletionResponse) Header() http.Header

func (*ChatCompletionResponse) SetHeader

func (h *ChatCompletionResponse) SetHeader(header http.Header)

type ChatCompletionResponseFormat

type ChatCompletionResponseFormat struct {
	Type       ChatCompletionResponseFormatType        `json:"type,omitempty"`
	JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"`
}

type ChatCompletionResponseFormatJSONSchema

type ChatCompletionResponseFormatJSONSchema struct {
	Name        string         `json:"name"`
	Description string         `json:"description,omitempty"`
	Schema      json.Marshaler `json:"schema"`
	Strict      bool           `json:"strict"`
}

type ChatCompletionResponseFormatType

type ChatCompletionResponseFormatType string
const (
	ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object"
	ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema"
	ChatCompletionResponseFormatTypeText       ChatCompletionResponseFormatType = "text"
)

type ChatCompletionStream

type ChatCompletionStream struct {
	// contains filtered or unexported fields
}

ChatCompletionStream Note: Perhaps it is more elegant to abstract Stream using generics.

func (ChatCompletionStream) Close

func (stream ChatCompletionStream) Close() error

func (ChatCompletionStream) Recv

func (stream ChatCompletionStream) Recv() (response T, err error)

type ChatCompletionStreamChoice

type ChatCompletionStreamChoice struct {
	Index                int                                 `json:"index"`
	Delta                ChatCompletionStreamChoiceDelta     `json:"delta"`
	Logprobs             *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"`
	FinishReason         FinishReason                        `json:"finish_reason"`
	ContentFilterResults ContentFilterResults                `json:"content_filter_results,omitempty"`
}

type ChatCompletionStreamChoiceDelta

type ChatCompletionStreamChoiceDelta struct {
	Content      string        `json:"content,omitempty"`
	Role         string        `json:"role,omitempty"`
	FunctionCall *FunctionCall `json:"function_call,omitempty"`
	ToolCalls    []ToolCall    `json:"tool_calls,omitempty"`
	Refusal      string        `json:"refusal,omitempty"`
}

type ChatCompletionStreamChoiceLogprobs

type ChatCompletionStreamChoiceLogprobs struct {
	Content []ChatCompletionTokenLogprob `json:"content,omitempty"`
	Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"`
}

type ChatCompletionStreamResponse

type ChatCompletionStreamResponse struct {
	ID                  string                       `json:"id"`
	Object              string                       `json:"object"`
	Created             int64                        `json:"created"`
	Model               string                       `json:"model"`
	Choices             []ChatCompletionStreamChoice `json:"choices"`
	SystemFingerprint   string                       `json:"system_fingerprint"`
	PromptAnnotations   []PromptAnnotation           `json:"prompt_annotations,omitempty"`
	PromptFilterResults []PromptFilterResult         `json:"prompt_filter_results,omitempty"`
	// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
	// When present, it contains a null value except for the last chunk which contains the token usage statistics
	// for the entire request.
	Usage *Usage `json:"usage,omitempty"`
}

type ChatCompletionTokenLogprob

type ChatCompletionTokenLogprob struct {
	Token       string                                 `json:"token"`
	Bytes       []int64                                `json:"bytes,omitempty"`
	Logprob     float64                                `json:"logprob,omitempty"`
	TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"`
}

type ChatCompletionTokenLogprobTopLogprob

type ChatCompletionTokenLogprobTopLogprob struct {
	Token   string  `json:"token"`
	Bytes   []int64 `json:"bytes"`
	Logprob float64 `json:"logprob"`
}

type ChatMessageImageURL

type ChatMessageImageURL struct {
	URL    string         `json:"url,omitempty"`
	Detail ImageURLDetail `json:"detail,omitempty"`
}

type ChatMessagePart

type ChatMessagePart struct {
	Type     ChatMessagePartType  `json:"type,omitempty"`
	Text     string               `json:"text,omitempty"`
	ImageURL *ChatMessageImageURL `json:"image_url,omitempty"`
}

type ChatMessagePartType

type ChatMessagePartType string
const (
	ChatMessagePartTypeText     ChatMessagePartType = "text"
	ChatMessagePartTypeImageURL ChatMessagePartType = "image_url"
)

type ChunkingStrategy

type ChunkingStrategy struct {
	Type   ChunkingStrategyType    `json:"type"`
	Static *StaticChunkingStrategy `json:"static,omitempty"`
}

type ChunkingStrategyType

type ChunkingStrategyType string
const (
	ChunkingStrategyTypeAuto   ChunkingStrategyType = "auto"
	ChunkingStrategyTypeStatic ChunkingStrategyType = "static"
)

type Client

type Client struct {
	// contains filtered or unexported fields
}

Client is OpenAI GPT-3 API client.

func NewClient

func NewClient(authToken string) *Client

NewClient creates new OpenAI API client.

func NewClientWithConfig

func NewClientWithConfig(config ClientConfig) *Client

NewClientWithConfig creates new OpenAI API client for specified config.

func NewOrgClient deprecated

func NewOrgClient(authToken, org string) *Client

NewOrgClient creates new OpenAI API client for specified Organization ID.

Deprecated: Please use NewClientWithConfig.

func (*Client) CancelBatch

func (c *Client) CancelBatch(
	ctx context.Context,
	batchID string,
) (response BatchResponse, err error)

CancelBatch — API call to Cancel batch.

func (*Client) CancelFineTune

func (c *Client) CancelFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error)

CancelFineTune cancel a fine-tune job. Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*Client) CancelFineTuningJob

func (c *Client) CancelFineTuningJob(ctx context.Context, fineTuningJobID string) (response FineTuningJob, err error)

CancelFineTuningJob cancel a fine tuning job.

func (*Client) CancelRun

func (c *Client) CancelRun(
	ctx context.Context,
	threadID string,
	runID string) (response Run, err error)

CancelRun cancels a run.

func (*Client) CancelVectorStoreFileBatch

func (c *Client) CancelVectorStoreFileBatch(
	ctx context.Context,
	vectorStoreID string,
	batchID string,
) (response VectorStoreFileBatch, err error)

CancelVectorStoreFileBatch cancel a new vector store file batch.

func (*Client) CreateAssistant

func (c *Client) CreateAssistant(ctx context.Context, request AssistantRequest) (response Assistant, err error)

CreateAssistant creates a new assistant.

func (*Client) CreateAssistantFile

func (c *Client) CreateAssistantFile(
	ctx context.Context,
	assistantID string,
	request AssistantFileRequest,
) (response AssistantFile, err error)

CreateAssistantFile creates a new assistant file.

func (*Client) CreateBatch

func (c *Client) CreateBatch(
	ctx context.Context,
	request CreateBatchRequest,
) (response BatchResponse, err error)

CreateBatch — API call to Create batch.

func (*Client) CreateBatchWithUploadFile

func (c *Client) CreateBatchWithUploadFile(
	ctx context.Context,
	request CreateBatchWithUploadFileRequest,
) (response BatchResponse, err error)

CreateBatchWithUploadFile — API call to Create batch with upload file.

func (*Client) CreateChatCompletion

func (c *Client) CreateChatCompletion(
	ctx context.Context,
	request ChatCompletionRequest,
) (response ChatCompletionResponse, err error)

CreateChatCompletion — API call to Create a completion for the chat message.

func (*Client) CreateChatCompletionStream

func (c *Client) CreateChatCompletionStream(
	ctx context.Context,
	request ChatCompletionRequest,
) (stream *ChatCompletionStream, err error)

CreateChatCompletionStream — API call to create a chat completion w/ streaming support. It sets whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.

Example
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

stream, err := client.CreateChatCompletionStream(
	context.Background(),
	openai.ChatCompletionRequest{
		Model:     openai.GPT3Dot5Turbo,
		MaxTokens: 20,
		Messages: []openai.ChatCompletionMessage{
			{
				Role:    openai.ChatMessageRoleUser,
				Content: "Lorem ipsum",
			},
		},
		Stream: true,
	},
)
if err != nil {
	fmt.Printf("ChatCompletionStream error: %v\n", err)
	return
}
defer stream.Close()

fmt.Print("Stream response: ")
for {
	var response openai.ChatCompletionStreamResponse
	response, err = stream.Recv()
	if errors.Is(err, io.EOF) {
		fmt.Println("\nStream finished")
		return
	}

	if err != nil {
		fmt.Printf("\nStream error: %v\n", err)
		return
	}

	fmt.Println(response.Choices[0].Delta.Content)
}
Output:

func (*Client) CreateCompletion

func (c *Client) CreateCompletion(
	ctx context.Context,
	request CompletionRequest,
) (response CompletionResponse, err error)

CreateCompletion — API call to create a completion. This is the main endpoint of the API. Returns new text as well as, if requested, the probabilities over each alternative token at each position.

If using a fine-tuned model, simply provide the model's ID in the CompletionRequest object, and the server will use the model's parameters to generate the completion.

Example
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
resp, err := client.CreateCompletion(
	context.Background(),
	openai.CompletionRequest{
		Model:     openai.GPT3Babbage002,
		MaxTokens: 5,
		Prompt:    "Lorem ipsum",
	},
)
if err != nil {
	fmt.Printf("Completion error: %v\n", err)
	return
}
fmt.Println(resp.Choices[0].Text)
Output:

func (*Client) CreateCompletionStream

func (c *Client) CreateCompletionStream(
	ctx context.Context,
	request CompletionRequest,
) (stream *CompletionStream, err error)

CreateCompletionStream — API call to create a completion w/ streaming support. It sets whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.

Example
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
stream, err := client.CreateCompletionStream(
	context.Background(),
	openai.CompletionRequest{
		Model:     openai.GPT3Babbage002,
		MaxTokens: 5,
		Prompt:    "Lorem ipsum",
		Stream:    true,
	},
)
if err != nil {
	fmt.Printf("CompletionStream error: %v\n", err)
	return
}
defer stream.Close()

for {
	var response openai.CompletionResponse
	response, err = stream.Recv()
	if errors.Is(err, io.EOF) {
		fmt.Println("Stream finished")
		return
	}

	if err != nil {
		fmt.Printf("Stream error: %v\n", err)
		return
	}

	fmt.Printf("Stream response: %#v\n", response)
}
Output:

func (*Client) CreateEditImage

func (c *Client) CreateEditImage(ctx context.Context, request ImageEditRequest) (response ImageResponse, err error)

CreateEditImage - API call to create an image. This is the main endpoint of the DALL-E API.

func (*Client) CreateEmbeddings

func (c *Client) CreateEmbeddings(
	ctx context.Context,
	conv EmbeddingRequestConverter,
) (res EmbeddingResponse, err error)

CreateEmbeddings returns an EmbeddingResponse which will contain an Embedding for every item in |body.Input|. https://beta.openai.com/docs/api-reference/embeddings/create

Body should be of type EmbeddingRequestStrings for embedding strings or EmbeddingRequestTokens for embedding groups of text already converted to tokens.

func (*Client) CreateFile

func (c *Client) CreateFile(ctx context.Context, request FileRequest) (file File, err error)

CreateFile uploads a jsonl file to GPT3 FilePath must be a local file path.

func (*Client) CreateFileBytes

func (c *Client) CreateFileBytes(ctx context.Context, request FileBytesRequest) (file File, err error)

CreateFileBytes uploads bytes directly to OpenAI without requiring a local file.

func (*Client) CreateFineTune deprecated

func (c *Client) CreateFineTune(ctx context.Context, request FineTuneRequest) (response FineTune, err error)

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*Client) CreateFineTuningJob

func (c *Client) CreateFineTuningJob(
	ctx context.Context,
	request FineTuningJobRequest,
) (response FineTuningJob, err error)

CreateFineTuningJob create a fine tuning job.

func (*Client) CreateImage

func (c *Client) CreateImage(ctx context.Context, request ImageRequest) (response ImageResponse, err error)

CreateImage - API call to create an image. This is the main endpoint of the DALL-E API.

Example
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

respURL, err := client.CreateImage(
	context.Background(),
	openai.ImageRequest{
		Prompt:         "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail",
		Size:           openai.CreateImageSize256x256,
		ResponseFormat: openai.CreateImageResponseFormatURL,
		N:              1,
	},
)
if err != nil {
	fmt.Printf("Image creation error: %v\n", err)
	return
}
fmt.Println(respURL.Data[0].URL)
Output:

Example (Base64)
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

resp, err := client.CreateImage(
	context.Background(),
	openai.ImageRequest{
		Prompt:         "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine",
		Size:           openai.CreateImageSize512x512,
		ResponseFormat: openai.CreateImageResponseFormatB64JSON,
		N:              1,
	},
)
if err != nil {
	fmt.Printf("Image creation error: %v\n", err)
	return
}

b, err := base64.StdEncoding.DecodeString(resp.Data[0].B64JSON)
if err != nil {
	fmt.Printf("Base64 decode error: %v\n", err)
	return
}

f, err := os.Create("example.png")
if err != nil {
	fmt.Printf("File creation error: %v\n", err)
	return
}
defer f.Close()

_, err = f.Write(b)
if err != nil {
	fmt.Printf("File write error: %v\n", err)
	return
}

fmt.Println("The image was saved as example.png")
Output:

func (*Client) CreateMessage

func (c *Client) CreateMessage(ctx context.Context, threadID string, request MessageRequest) (msg Message, err error)

CreateMessage creates a new message.

func (*Client) CreateRun

func (c *Client) CreateRun(
	ctx context.Context,
	threadID string,
	request RunRequest,
) (response Run, err error)

CreateRun creates a new run.

func (*Client) CreateSpeech

func (c *Client) CreateSpeech(ctx context.Context, request CreateSpeechRequest) (response RawResponse, err error)

func (*Client) CreateThread

func (c *Client) CreateThread(ctx context.Context, request ThreadRequest) (response Thread, err error)

CreateThread creates a new thread.

func (*Client) CreateThreadAndRun

func (c *Client) CreateThreadAndRun(
	ctx context.Context,
	request CreateThreadAndRunRequest) (response Run, err error)

CreateThreadAndRun submits tool outputs.

func (*Client) CreateTranscription

func (c *Client) CreateTranscription(
	ctx context.Context,
	request AudioRequest,
) (response AudioResponse, err error)

CreateTranscription — API call to create a transcription. Returns transcribed text.

Example
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
resp, err := client.CreateTranscription(
	context.Background(),
	openai.AudioRequest{
		Model:    openai.Whisper1,
		FilePath: "recording.mp3",
	},
)
if err != nil {
	fmt.Printf("Transcription error: %v\n", err)
	return
}
fmt.Println(resp.Text)
Output:

Example (Captions)
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

resp, err := client.CreateTranscription(
	context.Background(),
	openai.AudioRequest{
		Model:    openai.Whisper1,
		FilePath: os.Args[1],
		Format:   openai.AudioResponseFormatSRT,
	},
)
if err != nil {
	fmt.Printf("Transcription error: %v\n", err)
	return
}
f, err := os.Create(os.Args[1] + ".srt")
if err != nil {
	fmt.Printf("Could not open file: %v\n", err)
	return
}
defer f.Close()
if _, err = f.WriteString(resp.Text); err != nil {
	fmt.Printf("Error writing to file: %v\n", err)
	return
}
Output:

func (*Client) CreateTranslation

func (c *Client) CreateTranslation(
	ctx context.Context,
	request AudioRequest,
) (response AudioResponse, err error)

CreateTranslation — API call to translate audio into English.

Example
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
resp, err := client.CreateTranslation(
	context.Background(),
	openai.AudioRequest{
		Model:    openai.Whisper1,
		FilePath: "recording.mp3",
	},
)
if err != nil {
	fmt.Printf("Translation error: %v\n", err)
	return
}
fmt.Println(resp.Text)
Output:

func (*Client) CreateVariImage

func (c *Client) CreateVariImage(ctx context.Context, request ImageVariRequest) (response ImageResponse, err error)

CreateVariImage - API call to create an image variation. This is the main endpoint of the DALL-E API. Use abbreviations(vari for variation) because ci-lint has a single-line length limit ...

func (*Client) CreateVectorStore

func (c *Client) CreateVectorStore(ctx context.Context, request VectorStoreRequest) (response VectorStore, err error)

CreateVectorStore creates a new vector store.

func (*Client) CreateVectorStoreFile

func (c *Client) CreateVectorStoreFile(
	ctx context.Context,
	vectorStoreID string,
	request VectorStoreFileRequest,
) (response VectorStoreFile, err error)

CreateVectorStoreFile creates a new vector store file.

func (*Client) CreateVectorStoreFileBatch

func (c *Client) CreateVectorStoreFileBatch(
	ctx context.Context,
	vectorStoreID string,
	request VectorStoreFileBatchRequest,
) (response VectorStoreFileBatch, err error)

CreateVectorStoreFileBatch creates a new vector store file batch.

func (*Client) DeleteAssistant

func (c *Client) DeleteAssistant(
	ctx context.Context,
	assistantID string,
) (response AssistantDeleteResponse, err error)

DeleteAssistant deletes an assistant.

func (*Client) DeleteAssistantFile

func (c *Client) DeleteAssistantFile(
	ctx context.Context,
	assistantID string,
	fileID string,
) (err error)

DeleteAssistantFile deletes an existing file.

func (*Client) DeleteFile

func (c *Client) DeleteFile(ctx context.Context, fileID string) (err error)

DeleteFile deletes an existing file.

func (*Client) DeleteFineTune deprecated

func (c *Client) DeleteFineTune(ctx context.Context, fineTuneID string) (response FineTuneDeleteResponse, err error)

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*Client) DeleteFineTuneModel

func (c *Client) DeleteFineTuneModel(ctx context.Context, modelID string) (
	response FineTuneModelDeleteResponse, err error)

DeleteFineTuneModel Deletes a fine-tune model. You must have the Owner role in your organization to delete a model.

func (*Client) DeleteMessage

func (c *Client) DeleteMessage(
	ctx context.Context,
	threadID, messageID string,
) (status MessageDeletionStatus, err error)

DeleteMessage deletes a message..

func (*Client) DeleteThread

func (c *Client) DeleteThread(
	ctx context.Context,
	threadID string,
) (response ThreadDeleteResponse, err error)

DeleteThread deletes a thread.

func (*Client) DeleteVectorStore

func (c *Client) DeleteVectorStore(
	ctx context.Context,
	vectorStoreID string,
) (response VectorStoreDeleteResponse, err error)

DeleteVectorStore deletes an vector store.

func (*Client) DeleteVectorStoreFile

func (c *Client) DeleteVectorStoreFile(
	ctx context.Context,
	vectorStoreID string,
	fileID string,
) (err error)

DeleteVectorStoreFile deletes an existing file.

func (*Client) Edits

func (c *Client) Edits(ctx context.Context, request EditsRequest) (response EditsResponse, err error)

Edits Perform an API call to the Edits endpoint.

Deprecated: Users of the Edits API and its associated models (e.g., text-davinci-edit-001 or code-davinci-edit-001)

will need to migrate to GPT-3.5 Turbo by January 4, 2024. You can use CreateChatCompletion or CreateChatCompletionStream instead.

func (*Client) GetEngine

func (c *Client) GetEngine(
	ctx context.Context,
	engineID string,
) (engine Engine, err error)

GetEngine Retrieves an engine instance, providing basic information about the engine such as the owner and availability.

func (*Client) GetFile

func (c *Client) GetFile(ctx context.Context, fileID string) (file File, err error)

GetFile Retrieves a file instance, providing basic information about the file such as the file name and purpose.

func (*Client) GetFileContent

func (c *Client) GetFileContent(ctx context.Context, fileID string) (content RawResponse, err error)

func (*Client) GetFineTune deprecated

func (c *Client) GetFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error)

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*Client) GetModel

func (c *Client) GetModel(ctx context.Context, modelID string) (model Model, err error)

GetModel Retrieves a model instance, providing basic information about the model such as the owner and permissioning.

func (*Client) ListAssistantFiles

func (c *Client) ListAssistantFiles(
	ctx context.Context,
	assistantID string,
	limit *int,
	order *string,
	after *string,
	before *string,
) (response AssistantFilesList, err error)

ListAssistantFiles Lists the currently available files for an assistant.

func (*Client) ListAssistants

func (c *Client) ListAssistants(
	ctx context.Context,
	limit *int,
	order *string,
	after *string,
	before *string,
) (response AssistantsList, err error)

ListAssistants Lists the currently available assistants.

func (*Client) ListBatch

func (c *Client) ListBatch(ctx context.Context, after *string, limit *int) (response ListBatchResponse, err error)

ListBatch API call to List batch.

func (*Client) ListEngines

func (c *Client) ListEngines(ctx context.Context) (engines EnginesList, err error)

ListEngines Lists the currently available engines, and provides basic information about each option such as the owner and availability.

func (*Client) ListFiles

func (c *Client) ListFiles(ctx context.Context) (files FilesList, err error)

ListFiles Lists the currently available files, and provides basic information about each file such as the file name and purpose.

func (*Client) ListFineTuneEvents deprecated

func (c *Client) ListFineTuneEvents(ctx context.Context, fineTuneID string) (response FineTuneEventList, err error)

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*Client) ListFineTunes deprecated

func (c *Client) ListFineTunes(ctx context.Context) (response FineTuneList, err error)

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*Client) ListFineTuningJobEvents

func (c *Client) ListFineTuningJobEvents(
	ctx context.Context,
	fineTuningJobID string,
	setters ...ListFineTuningJobEventsParameter,
) (response FineTuningJobEventList, err error)

ListFineTuningJobs list fine tuning jobs events.

func (*Client) ListMessage

func (c *Client) ListMessage(ctx context.Context, threadID string,
	limit *int,
	order *string,
	after *string,
	before *string,
	runID *string,
) (messages MessagesList, err error)

ListMessage fetches all messages in the thread.

func (*Client) ListMessageFiles

func (c *Client) ListMessageFiles(
	ctx context.Context,
	threadID, messageID string,
) (files MessageFilesList, err error)

ListMessageFiles fetches all files attached to a message.

func (*Client) ListModels

func (c *Client) ListModels(ctx context.Context) (models ModelsList, err error)

ListModels Lists the currently available models, and provides basic information about each model such as the model id and parent.

func (*Client) ListRunSteps

func (c *Client) ListRunSteps(
	ctx context.Context,
	threadID string,
	runID string,
	pagination Pagination,
) (response RunStepList, err error)

ListRunSteps lists run steps.

func (*Client) ListRuns

func (c *Client) ListRuns(
	ctx context.Context,
	threadID string,
	pagination Pagination,
) (response RunList, err error)

ListRuns lists runs.

func (*Client) ListVectorStoreFiles

func (c *Client) ListVectorStoreFiles(
	ctx context.Context,
	vectorStoreID string,
	pagination Pagination,
) (response VectorStoreFilesList, err error)

ListVectorStoreFiles Lists the currently available files for a vector store.

func (*Client) ListVectorStoreFilesInBatch

func (c *Client) ListVectorStoreFilesInBatch(
	ctx context.Context,
	vectorStoreID string,
	batchID string,
	pagination Pagination,
) (response VectorStoreFilesList, err error)

ListVectorStoreFiles Lists the currently available files for a vector store.

func (*Client) ListVectorStores

func (c *Client) ListVectorStores(
	ctx context.Context,
	pagination Pagination,
) (response VectorStoresList, err error)

ListVectorStores Lists the currently available vector store.

func (*Client) Moderations

func (c *Client) Moderations(ctx context.Context, request ModerationRequest) (response ModerationResponse, err error)

Moderations — perform a moderation api call over a string. Input can be an array or slice but a string will reduce the complexity.

func (*Client) ModifyAssistant

func (c *Client) ModifyAssistant(
	ctx context.Context,
	assistantID string,
	request AssistantRequest,
) (response Assistant, err error)

ModifyAssistant modifies an assistant.

func (*Client) ModifyMessage

func (c *Client) ModifyMessage(
	ctx context.Context,
	threadID, messageID string,
	metadata map[string]string,
) (msg Message, err error)

ModifyMessage modifies a message.

func (*Client) ModifyRun

func (c *Client) ModifyRun(
	ctx context.Context,
	threadID string,
	runID string,
	request RunModifyRequest,
) (response Run, err error)

ModifyRun modifies a run.

func (*Client) ModifyThread

func (c *Client) ModifyThread(
	ctx context.Context,
	threadID string,
	request ModifyThreadRequest,
) (response Thread, err error)

ModifyThread modifies a thread.

func (*Client) ModifyVectorStore

func (c *Client) ModifyVectorStore(
	ctx context.Context,
	vectorStoreID string,
	request VectorStoreRequest,
) (response VectorStore, err error)

ModifyVectorStore modifies a vector store.

func (*Client) RetrieveAssistant

func (c *Client) RetrieveAssistant(
	ctx context.Context,
	assistantID string,
) (response Assistant, err error)

RetrieveAssistant retrieves an assistant.

func (*Client) RetrieveAssistantFile

func (c *Client) RetrieveAssistantFile(
	ctx context.Context,
	assistantID string,
	fileID string,
) (response AssistantFile, err error)

RetrieveAssistantFile retrieves an assistant file.

func (*Client) RetrieveBatch

func (c *Client) RetrieveBatch(
	ctx context.Context,
	batchID string,
) (response BatchResponse, err error)

RetrieveBatch — API call to Retrieve batch.

func (*Client) RetrieveFineTuningJob

func (c *Client) RetrieveFineTuningJob(
	ctx context.Context,
	fineTuningJobID string,
) (response FineTuningJob, err error)

RetrieveFineTuningJob retrieve a fine tuning job.

func (*Client) RetrieveMessage

func (c *Client) RetrieveMessage(
	ctx context.Context,
	threadID, messageID string,
) (msg Message, err error)

RetrieveMessage retrieves a Message.

func (*Client) RetrieveMessageFile

func (c *Client) RetrieveMessageFile(
	ctx context.Context,
	threadID, messageID, fileID string,
) (file MessageFile, err error)

RetrieveMessageFile fetches a message file.

func (*Client) RetrieveRun

func (c *Client) RetrieveRun(
	ctx context.Context,
	threadID string,
	runID string,
) (response Run, err error)

RetrieveRun retrieves a run.

func (*Client) RetrieveRunStep

func (c *Client) RetrieveRunStep(
	ctx context.Context,
	threadID string,
	runID string,
	stepID string,
) (response RunStep, err error)

RetrieveRunStep retrieves a run step.

func (*Client) RetrieveThread

func (c *Client) RetrieveThread(ctx context.Context, threadID string) (response Thread, err error)

RetrieveThread retrieves a thread.

func (*Client) RetrieveVectorStore

func (c *Client) RetrieveVectorStore(
	ctx context.Context,
	vectorStoreID string,
) (response VectorStore, err error)

RetrieveVectorStore retrieves an vector store.

func (*Client) RetrieveVectorStoreFile

func (c *Client) RetrieveVectorStoreFile(
	ctx context.Context,
	vectorStoreID string,
	fileID string,
) (response VectorStoreFile, err error)

RetrieveVectorStoreFile retrieves a vector store file.

func (*Client) RetrieveVectorStoreFileBatch

func (c *Client) RetrieveVectorStoreFileBatch(
	ctx context.Context,
	vectorStoreID string,
	batchID string,
) (response VectorStoreFileBatch, err error)

RetrieveVectorStoreFileBatch retrieves a vector store file batch.

func (*Client) SubmitToolOutputs

func (c *Client) SubmitToolOutputs(
	ctx context.Context,
	threadID string,
	runID string,
	request SubmitToolOutputsRequest) (response Run, err error)

SubmitToolOutputs submits tool outputs.

func (*Client) UploadBatchFile

func (c *Client) UploadBatchFile(ctx context.Context, request UploadBatchFileRequest) (File, error)

UploadBatchFile — upload batch file.

type ClientConfig

type ClientConfig struct {
	BaseURL              string
	OrgID                string
	APIType              APIType
	APIVersion           string // required when APIType is APITypeAzure or APITypeAzureAD
	AssistantVersion     string
	AzureModelMapperFunc func(model string) string // replace model to azure deployment name func
	HTTPClient           HTTPDoer

	EmptyMessagesLimit uint
	// contains filtered or unexported fields
}

ClientConfig is a configuration of a client.

Example (ClientWithProxy)
config := openai.DefaultConfig(os.Getenv("OPENAI_API_KEY"))
port := os.Getenv("OPENAI_PROXY_PORT")
proxyURL, err := url.Parse(fmt.Sprintf("http://localhost:%s", port))
if err != nil {
	panic(err)
}
transport := &http.Transport{
	Proxy: http.ProxyURL(proxyURL),
}
config.HTTPClient = &http.Client{
	Transport: transport,
}

client := openai.NewClientWithConfig(config)

client.CreateChatCompletion( //nolint:errcheck // outside of the scope of this example.
	context.Background(),
	openai.ChatCompletionRequest{
		// etc...
	},
)
Output:

func DefaultAzureConfig

func DefaultAzureConfig(apiKey, baseURL string) ClientConfig
Example
azureKey := os.Getenv("AZURE_OPENAI_API_KEY")       // Your azure API key
azureEndpoint := os.Getenv("AZURE_OPENAI_ENDPOINT") // Your azure OpenAI endpoint
config := openai.DefaultAzureConfig(azureKey, azureEndpoint)
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
	context.Background(),
	openai.ChatCompletionRequest{
		Model: openai.GPT3Dot5Turbo,
		Messages: []openai.ChatCompletionMessage{
			{
				Role:    openai.ChatMessageRoleUser,
				Content: "Hello Azure OpenAI!",
			},
		},
	},
)
if err != nil {
	fmt.Printf("ChatCompletion error: %v\n", err)
	return
}

fmt.Println(resp.Choices[0].Message.Content)
Output:

func DefaultConfig

func DefaultConfig(authToken string) ClientConfig

func (ClientConfig) GetAzureDeploymentByModel

func (c ClientConfig) GetAzureDeploymentByModel(model string) string

func (ClientConfig) String

func (ClientConfig) String() string

type CodeInterpreterToolResources

type CodeInterpreterToolResources struct {
	FileIDs []string `json:"file_ids,omitempty"`
}

type CodeInterpreterToolResourcesRequest

type CodeInterpreterToolResourcesRequest struct {
	FileIDs []string `json:"file_ids,omitempty"`
}

type CompletionChoice

type CompletionChoice struct {
	Text         string        `json:"text"`
	Index        int           `json:"index"`
	FinishReason string        `json:"finish_reason"`
	LogProbs     LogprobResult `json:"logprobs"`
}

CompletionChoice represents one of possible completions.

type CompletionRequest

type CompletionRequest struct {
	Model            string  `json:"model"`
	Prompt           any     `json:"prompt,omitempty"`
	BestOf           int     `json:"best_of,omitempty"`
	Echo             bool    `json:"echo,omitempty"`
	FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
	// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
	// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
	// refs: https://platform.openai.com/docs/api-reference/completions/create#completions/create-logit_bias
	LogitBias map[string]int `json:"logit_bias,omitempty"`
	// Store can be set to true to store the output of this completion request for use in distillations and evals.
	// https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
	Store bool `json:"store,omitempty"`
	// Metadata to store with the completion.
	Metadata        map[string]string `json:"metadata,omitempty"`
	LogProbs        int               `json:"logprobs,omitempty"`
	MaxTokens       int               `json:"max_tokens,omitempty"`
	N               int               `json:"n,omitempty"`
	PresencePenalty float32           `json:"presence_penalty,omitempty"`
	Seed            *int              `json:"seed,omitempty"`
	Stop            []string          `json:"stop,omitempty"`
	Stream          bool              `json:"stream,omitempty"`
	Suffix          string            `json:"suffix,omitempty"`
	Temperature     float32           `json:"temperature,omitempty"`
	TopP            float32           `json:"top_p,omitempty"`
	User            string            `json:"user,omitempty"`
}

CompletionRequest represents a request structure for completion API.

type CompletionResponse

type CompletionResponse struct {
	ID      string             `json:"id"`
	Object  string             `json:"object"`
	Created int64              `json:"created"`
	Model   string             `json:"model"`
	Choices []CompletionChoice `json:"choices"`
	Usage   Usage              `json:"usage"`
	// contains filtered or unexported fields
}

CompletionResponse represents a response structure for completion API.

func (*CompletionResponse) GetRateLimitHeaders

func (h *CompletionResponse) GetRateLimitHeaders() RateLimitHeaders

func (*CompletionResponse) Header

func (h *CompletionResponse) Header() http.Header

func (*CompletionResponse) SetHeader

func (h *CompletionResponse) SetHeader(header http.Header)

type CompletionStream

type CompletionStream struct {
	// contains filtered or unexported fields
}

func (CompletionStream) Close

func (stream CompletionStream) Close() error

func (CompletionStream) Recv

func (stream CompletionStream) Recv() (response T, err error)

type CompletionTokensDetails

type CompletionTokensDetails struct {
	AudioTokens     int `json:"audio_tokens"`
	ReasoningTokens int `json:"reasoning_tokens"`
}

CompletionTokensDetails Breakdown of tokens used in a completion.

type ContentFilterResults

type ContentFilterResults struct {
	Hate      Hate      `json:"hate,omitempty"`
	SelfHarm  SelfHarm  `json:"self_harm,omitempty"`
	Sexual    Sexual    `json:"sexual,omitempty"`
	Violence  Violence  `json:"violence,omitempty"`
	JailBreak JailBreak `json:"jailbreak,omitempty"`
	Profanity Profanity `json:"profanity,omitempty"`
}

type CreateBatchRequest

type CreateBatchRequest struct {
	InputFileID      string         `json:"input_file_id"`
	Endpoint         BatchEndpoint  `json:"endpoint"`
	CompletionWindow string         `json:"completion_window"`
	Metadata         map[string]any `json:"metadata"`
}

type CreateBatchWithUploadFileRequest

type CreateBatchWithUploadFileRequest struct {
	Endpoint         BatchEndpoint  `json:"endpoint"`
	CompletionWindow string         `json:"completion_window"`
	Metadata         map[string]any `json:"metadata"`
	UploadBatchFileRequest
}

type CreateSpeechRequest

type CreateSpeechRequest struct {
	Model          SpeechModel          `json:"model"`
	Input          string               `json:"input"`
	Voice          SpeechVoice          `json:"voice"`
	ResponseFormat SpeechResponseFormat `json:"response_format,omitempty"` // Optional, default to mp3
	Speed          float64              `json:"speed,omitempty"`           // Optional, default to 1.0
}

type CreateThreadAndRunRequest

type CreateThreadAndRunRequest struct {
	RunRequest
	Thread ThreadRequest `json:"thread"`
}

type EditsChoice

type EditsChoice struct {
	Text  string `json:"text"`
	Index int    `json:"index"`
}

EditsChoice represents one of possible edits.

type EditsRequest

type EditsRequest struct {
	Model       *string `json:"model,omitempty"`
	Input       string  `json:"input,omitempty"`
	Instruction string  `json:"instruction,omitempty"`
	N           int     `json:"n,omitempty"`
	Temperature float32 `json:"temperature,omitempty"`
	TopP        float32 `json:"top_p,omitempty"`
}

EditsRequest represents a request structure for Edits API.

type EditsResponse

type EditsResponse struct {
	Object  string        `json:"object"`
	Created int64         `json:"created"`
	Usage   Usage         `json:"usage"`
	Choices []EditsChoice `json:"choices"`
	// contains filtered or unexported fields
}

EditsResponse represents a response structure for Edits API.

func (*EditsResponse) GetRateLimitHeaders

func (h *EditsResponse) GetRateLimitHeaders() RateLimitHeaders

func (*EditsResponse) Header

func (h *EditsResponse) Header() http.Header

func (*EditsResponse) SetHeader

func (h *EditsResponse) SetHeader(header http.Header)

type Embedding

type Embedding struct {
	Object    string    `json:"object"`
	Embedding []float32 `json:"embedding"`
	Index     int       `json:"index"`
}

Embedding is a special format of data representation that can be easily utilized by machine learning models and algorithms. The embedding is an information dense representation of the semantic meaning of a piece of text. Each embedding is a vector of floating point numbers, such that the distance between two embeddings in the vector space is correlated with semantic similarity between two inputs in the original format. For example, if two texts are similar, then their vector representations should also be similar.

func (*Embedding) DotProduct

func (e *Embedding) DotProduct(other *Embedding) (float32, error)

DotProduct calculates the dot product of the embedding vector with another embedding vector. Both vectors must have the same length; otherwise, an ErrVectorLengthMismatch is returned. The method returns the calculated dot product as a float32 value.

type EmbeddingEncodingFormat

type EmbeddingEncodingFormat string

EmbeddingEncodingFormat is the format of the embeddings data. Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. If not specified OpenAI will use "float".

const (
	EmbeddingEncodingFormatFloat  EmbeddingEncodingFormat = "float"
	EmbeddingEncodingFormatBase64 EmbeddingEncodingFormat = "base64"
)

type EmbeddingModel

type EmbeddingModel string

EmbeddingModel enumerates the models which can be used to generate Embedding vectors.

const (
	// Deprecated: The following block is shut down. Use text-embedding-ada-002 instead.
	AdaSimilarity         EmbeddingModel = "text-similarity-ada-001"
	BabbageSimilarity     EmbeddingModel = "text-similarity-babbage-001"
	CurieSimilarity       EmbeddingModel = "text-similarity-curie-001"
	DavinciSimilarity     EmbeddingModel = "text-similarity-davinci-001"
	AdaSearchDocument     EmbeddingModel = "text-search-ada-doc-001"
	AdaSearchQuery        EmbeddingModel = "text-search-ada-query-001"
	BabbageSearchDocument EmbeddingModel = "text-search-babbage-doc-001"
	BabbageSearchQuery    EmbeddingModel = "text-search-babbage-query-001"
	CurieSearchDocument   EmbeddingModel = "text-search-curie-doc-001"
	CurieSearchQuery      EmbeddingModel = "text-search-curie-query-001"
	DavinciSearchDocument EmbeddingModel = "text-search-davinci-doc-001"
	DavinciSearchQuery    EmbeddingModel = "text-search-davinci-query-001"
	AdaCodeSearchCode     EmbeddingModel = "code-search-ada-code-001"
	AdaCodeSearchText     EmbeddingModel = "code-search-ada-text-001"
	BabbageCodeSearchCode EmbeddingModel = "code-search-babbage-code-001"
	BabbageCodeSearchText EmbeddingModel = "code-search-babbage-text-001"

	AdaEmbeddingV2  EmbeddingModel = "text-embedding-ada-002"
	SmallEmbedding3 EmbeddingModel = "text-embedding-3-small"
	LargeEmbedding3 EmbeddingModel = "text-embedding-3-large"
)

type EmbeddingRequest

type EmbeddingRequest struct {
	Input          any                     `json:"input"`
	Model          EmbeddingModel          `json:"model"`
	User           string                  `json:"user"`
	EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
	// Dimensions The number of dimensions the resulting output embeddings should have.
	// Only supported in text-embedding-3 and later models.
	Dimensions int `json:"dimensions,omitempty"`
}

func (EmbeddingRequest) Convert

func (r EmbeddingRequest) Convert() EmbeddingRequest

type EmbeddingRequestConverter

type EmbeddingRequestConverter interface {
	// Needs to be of type EmbeddingRequestStrings or EmbeddingRequestTokens
	Convert() EmbeddingRequest
}

type EmbeddingRequestStrings

type EmbeddingRequestStrings struct {
	// Input is a slice of strings for which you want to generate an Embedding vector.
	// Each input must not exceed 8192 tokens in length.
	// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
	// have observed inferior results when newlines are present.
	// E.g.
	//	"The food was delicious and the waiter..."
	Input []string `json:"input"`
	// ID of the model to use. You can use the List models API to see all of your available models,
	// or see our Model overview for descriptions of them.
	Model EmbeddingModel `json:"model"`
	// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
	User string `json:"user"`
	// EmbeddingEncodingFormat is the format of the embeddings data.
	// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
	// If not specified OpenAI will use "float".
	EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
	// Dimensions The number of dimensions the resulting output embeddings should have.
	// Only supported in text-embedding-3 and later models.
	Dimensions int `json:"dimensions,omitempty"`
}

EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings.

func (EmbeddingRequestStrings) Convert

type EmbeddingRequestTokens

type EmbeddingRequestTokens struct {
	// Input is a slice of slices of ints ([][]int) for which you want to generate an Embedding vector.
	// Each input must not exceed 8192 tokens in length.
	// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
	// have observed inferior results when newlines are present.
	// E.g.
	//	"The food was delicious and the waiter..."
	Input [][]int `json:"input"`
	// ID of the model to use. You can use the List models API to see all of your available models,
	// or see our Model overview for descriptions of them.
	Model EmbeddingModel `json:"model"`
	// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
	User string `json:"user"`
	// EmbeddingEncodingFormat is the format of the embeddings data.
	// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
	// If not specified OpenAI will use "float".
	EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
	// Dimensions The number of dimensions the resulting output embeddings should have.
	// Only supported in text-embedding-3 and later models.
	Dimensions int `json:"dimensions,omitempty"`
}

func (EmbeddingRequestTokens) Convert

type EmbeddingResponse

type EmbeddingResponse struct {
	Object string         `json:"object"`
	Data   []Embedding    `json:"data"`
	Model  EmbeddingModel `json:"model"`
	Usage  Usage          `json:"usage"`
	// contains filtered or unexported fields
}

EmbeddingResponse is the response from a Create embeddings request.

func (*EmbeddingResponse) GetRateLimitHeaders

func (h *EmbeddingResponse) GetRateLimitHeaders() RateLimitHeaders

func (*EmbeddingResponse) Header

func (h *EmbeddingResponse) Header() http.Header

func (*EmbeddingResponse) SetHeader

func (h *EmbeddingResponse) SetHeader(header http.Header)

type EmbeddingResponseBase64

type EmbeddingResponseBase64 struct {
	Object string            `json:"object"`
	Data   []Base64Embedding `json:"data"`
	Model  EmbeddingModel    `json:"model"`
	Usage  Usage             `json:"usage"`
	// contains filtered or unexported fields
}

EmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.

func (*EmbeddingResponseBase64) GetRateLimitHeaders

func (h *EmbeddingResponseBase64) GetRateLimitHeaders() RateLimitHeaders

func (*EmbeddingResponseBase64) Header

func (h *EmbeddingResponseBase64) Header() http.Header

func (*EmbeddingResponseBase64) SetHeader

func (h *EmbeddingResponseBase64) SetHeader(header http.Header)

func (*EmbeddingResponseBase64) ToEmbeddingResponse

func (r *EmbeddingResponseBase64) ToEmbeddingResponse() (EmbeddingResponse, error)

ToEmbeddingResponse converts an embeddingResponseBase64 to an EmbeddingResponse.

type Engine

type Engine struct {
	ID     string `json:"id"`
	Object string `json:"object"`
	Owner  string `json:"owner"`
	Ready  bool   `json:"ready"`
	// contains filtered or unexported fields
}

Engine struct represents engine from OpenAPI API.

func (*Engine) GetRateLimitHeaders

func (h *Engine) GetRateLimitHeaders() RateLimitHeaders

func (*Engine) Header

func (h *Engine) Header() http.Header

func (*Engine) SetHeader

func (h *Engine) SetHeader(header http.Header)

type EnginesList

type EnginesList struct {
	Engines []Engine `json:"data"`
	// contains filtered or unexported fields
}

EnginesList is a list of engines.

func (*EnginesList) GetRateLimitHeaders

func (h *EnginesList) GetRateLimitHeaders() RateLimitHeaders

func (*EnginesList) Header

func (h *EnginesList) Header() http.Header

func (*EnginesList) SetHeader

func (h *EnginesList) SetHeader(header http.Header)

type ErrorResponse

type ErrorResponse struct {
	Error *APIError `json:"error,omitempty"`
}

type File

type File struct {
	Bytes         int    `json:"bytes"`
	CreatedAt     int64  `json:"created_at"`
	ID            string `json:"id"`
	FileName      string `json:"filename"`
	Object        string `json:"object"`
	Status        string `json:"status"`
	Purpose       string `json:"purpose"`
	StatusDetails string `json:"status_details"`
	// contains filtered or unexported fields
}

File struct represents an OpenAPI file.

func (*File) GetRateLimitHeaders

func (h *File) GetRateLimitHeaders() RateLimitHeaders

func (*File) Header

func (h *File) Header() http.Header

func (*File) SetHeader

func (h *File) SetHeader(header http.Header)

type FileBytesRequest

type FileBytesRequest struct {
	// the name of the uploaded file in OpenAI
	Name string
	// the bytes of the file
	Bytes []byte
	// the purpose of the file
	Purpose PurposeType
}

FileBytesRequest represents a file upload request.

type FileRequest

type FileRequest struct {
	FileName string `json:"file"`
	FilePath string `json:"-"`
	Purpose  string `json:"purpose"`
}

type FileSearchToolResources

type FileSearchToolResources struct {
	VectorStoreIDs []string `json:"vector_store_ids,omitempty"`
}

type FileSearchToolResourcesRequest

type FileSearchToolResourcesRequest struct {
	VectorStoreIDs []string                   `json:"vector_store_ids,omitempty"`
	VectorStores   []VectorStoreToolResources `json:"vector_stores,omitempty"`
}

type FilesList

type FilesList struct {
	Files []File `json:"data"`
	// contains filtered or unexported fields
}

FilesList is a list of files that belong to the user or organization.

func (*FilesList) GetRateLimitHeaders

func (h *FilesList) GetRateLimitHeaders() RateLimitHeaders

func (*FilesList) Header

func (h *FilesList) Header() http.Header

func (*FilesList) SetHeader

func (h *FilesList) SetHeader(header http.Header)

type FineTune deprecated

type FineTune struct {
	ID                string              `json:"id"`
	Object            string              `json:"object"`
	Model             string              `json:"model"`
	CreatedAt         int64               `json:"created_at"`
	FineTuneEventList []FineTuneEvent     `json:"events,omitempty"`
	FineTunedModel    string              `json:"fine_tuned_model"`
	HyperParams       FineTuneHyperParams `json:"hyperparams"`
	OrganizationID    string              `json:"organization_id"`
	ResultFiles       []File              `json:"result_files"`
	Status            string              `json:"status"`
	ValidationFiles   []File              `json:"validation_files"`
	TrainingFiles     []File              `json:"training_files"`
	UpdatedAt         int64               `json:"updated_at"`
	// contains filtered or unexported fields
}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*FineTune) GetRateLimitHeaders

func (h *FineTune) GetRateLimitHeaders() RateLimitHeaders

func (*FineTune) Header

func (h *FineTune) Header() http.Header

func (*FineTune) SetHeader

func (h *FineTune) SetHeader(header http.Header)

type FineTuneDeleteResponse deprecated

type FineTuneDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
	// contains filtered or unexported fields
}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*FineTuneDeleteResponse) GetRateLimitHeaders

func (h *FineTuneDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (*FineTuneDeleteResponse) Header

func (h *FineTuneDeleteResponse) Header() http.Header

func (*FineTuneDeleteResponse) SetHeader

func (h *FineTuneDeleteResponse) SetHeader(header http.Header)

type FineTuneEvent deprecated

type FineTuneEvent struct {
	Object    string `json:"object"`
	CreatedAt int64  `json:"created_at"`
	Level     string `json:"level"`
	Message   string `json:"message"`
}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

type FineTuneEventList deprecated

type FineTuneEventList struct {
	Object string          `json:"object"`
	Data   []FineTuneEvent `json:"data"`
	// contains filtered or unexported fields
}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*FineTuneEventList) GetRateLimitHeaders

func (h *FineTuneEventList) GetRateLimitHeaders() RateLimitHeaders

func (*FineTuneEventList) Header

func (h *FineTuneEventList) Header() http.Header

func (*FineTuneEventList) SetHeader

func (h *FineTuneEventList) SetHeader(header http.Header)

type FineTuneHyperParams deprecated

type FineTuneHyperParams struct {
	BatchSize              int     `json:"batch_size"`
	LearningRateMultiplier float64 `json:"learning_rate_multiplier"`
	Epochs                 int     `json:"n_epochs"`
	PromptLossWeight       float64 `json:"prompt_loss_weight"`
}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

type FineTuneList deprecated

type FineTuneList struct {
	Object string     `json:"object"`
	Data   []FineTune `json:"data"`
	// contains filtered or unexported fields
}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (*FineTuneList) GetRateLimitHeaders

func (h *FineTuneList) GetRateLimitHeaders() RateLimitHeaders

func (*FineTuneList) Header

func (h *FineTuneList) Header() http.Header

func (*FineTuneList) SetHeader

func (h *FineTuneList) SetHeader(header http.Header)

type FineTuneModelDeleteResponse

type FineTuneModelDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
	// contains filtered or unexported fields
}

FineTuneModelDeleteResponse represents the deletion status of a fine-tuned model.

func (*FineTuneModelDeleteResponse) GetRateLimitHeaders

func (h *FineTuneModelDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (*FineTuneModelDeleteResponse) Header

func (h *FineTuneModelDeleteResponse) Header() http.Header

func (*FineTuneModelDeleteResponse) SetHeader

func (h *FineTuneModelDeleteResponse) SetHeader(header http.Header)

type FineTuneRequest deprecated

type FineTuneRequest struct {
	TrainingFile                 string    `json:"training_file"`
	ValidationFile               string    `json:"validation_file,omitempty"`
	Model                        string    `json:"model,omitempty"`
	Epochs                       int       `json:"n_epochs,omitempty"`
	BatchSize                    int       `json:"batch_size,omitempty"`
	LearningRateMultiplier       float32   `json:"learning_rate_multiplier,omitempty"`
	PromptLossRate               float32   `json:"prompt_loss_rate,omitempty"`
	ComputeClassificationMetrics bool      `json:"compute_classification_metrics,omitempty"`
	ClassificationClasses        int       `json:"classification_n_classes,omitempty"`
	ClassificationPositiveClass  string    `json:"classification_positive_class,omitempty"`
	ClassificationBetas          []float32 `json:"classification_betas,omitempty"`
	Suffix                       string    `json:"suffix,omitempty"`
}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

type FineTuningJob

type FineTuningJob struct {
	ID              string          `json:"id"`
	Object          string          `json:"object"`
	CreatedAt       int64           `json:"created_at"`
	FinishedAt      int64           `json:"finished_at"`
	Model           string          `json:"model"`
	FineTunedModel  string          `json:"fine_tuned_model,omitempty"`
	OrganizationID  string          `json:"organization_id"`
	Status          string          `json:"status"`
	Hyperparameters Hyperparameters `json:"hyperparameters"`
	TrainingFile    string          `json:"training_file"`
	ValidationFile  string          `json:"validation_file,omitempty"`
	ResultFiles     []string        `json:"result_files"`
	TrainedTokens   int             `json:"trained_tokens"`
	// contains filtered or unexported fields
}

func (*FineTuningJob) GetRateLimitHeaders

func (h *FineTuningJob) GetRateLimitHeaders() RateLimitHeaders

func (*FineTuningJob) Header

func (h *FineTuningJob) Header() http.Header

func (*FineTuningJob) SetHeader

func (h *FineTuningJob) SetHeader(header http.Header)

type FineTuningJobEvent

type FineTuningJobEvent struct {
	Object    string `json:"object"`
	ID        string `json:"id"`
	CreatedAt int    `json:"created_at"`
	Level     string `json:"level"`
	Message   string `json:"message"`
	Data      any    `json:"data"`
	Type      string `json:"type"`
}

type FineTuningJobEventList

type FineTuningJobEventList struct {
	Object  string          `json:"object"`
	Data    []FineTuneEvent `json:"data"`
	HasMore bool            `json:"has_more"`
	// contains filtered or unexported fields
}

func (*FineTuningJobEventList) GetRateLimitHeaders

func (h *FineTuningJobEventList) GetRateLimitHeaders() RateLimitHeaders

func (*FineTuningJobEventList) Header

func (h *FineTuningJobEventList) Header() http.Header

func (*FineTuningJobEventList) SetHeader

func (h *FineTuningJobEventList) SetHeader(header http.Header)

type FineTuningJobRequest

type FineTuningJobRequest struct {
	TrainingFile    string           `json:"training_file"`
	ValidationFile  string           `json:"validation_file,omitempty"`
	Model           string           `json:"model,omitempty"`
	Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty"`
	Suffix          string           `json:"suffix,omitempty"`
}

type FinishReason

type FinishReason string
const (
	FinishReasonStop          FinishReason = "stop"
	FinishReasonLength        FinishReason = "length"
	FinishReasonFunctionCall  FinishReason = "function_call"
	FinishReasonToolCalls     FinishReason = "tool_calls"
	FinishReasonContentFilter FinishReason = "content_filter"
	FinishReasonNull          FinishReason = "null"
)

func (FinishReason) MarshalJSON

func (r FinishReason) MarshalJSON() ([]byte, error)

type FunctionCall

type FunctionCall struct {
	Name string `json:"name,omitempty"`
	// call function with arguments in JSON format
	Arguments string `json:"arguments,omitempty"`
}

type FunctionDefine deprecated

type FunctionDefine = FunctionDefinition

Deprecated: use FunctionDefinition instead.

type FunctionDefinition

type FunctionDefinition struct {
	Name        string `json:"name"`
	Description string `json:"description,omitempty"`
	Strict      bool   `json:"strict,omitempty"`
	// Parameters is an object describing the function.
	// You can pass json.RawMessage to describe the schema,
	// or you can pass in a struct which serializes to the proper JSON schema.
	// The jsonschema package is provided for convenience, but you should
	// consider another specialized library if you require more complex schemas.
	Parameters any `json:"parameters"`
}

type HTTPDoer

type HTTPDoer interface {
	Do(req *http.Request) (*http.Response, error)
}

type Hate

type Hate struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

type Hyperparameters

type Hyperparameters struct {
	Epochs                 any `json:"n_epochs,omitempty"`
	LearningRateMultiplier any `json:"learning_rate_multiplier,omitempty"`
	BatchSize              any `json:"batch_size,omitempty"`
}

type ImageEditRequest

type ImageEditRequest struct {
	Image          *os.File `json:"image,omitempty"`
	Mask           *os.File `json:"mask,omitempty"`
	Prompt         string   `json:"prompt,omitempty"`
	Model          string   `json:"model,omitempty"`
	N              int      `json:"n,omitempty"`
	Size           string   `json:"size,omitempty"`
	ResponseFormat string   `json:"response_format,omitempty"`
}

ImageEditRequest represents the request structure for the image API.

type ImageFile

type ImageFile struct {
	FileID string `json:"file_id"`
}

type ImageRequest

type ImageRequest struct {
	Prompt         string `json:"prompt,omitempty"`
	Model          string `json:"model,omitempty"`
	N              int    `json:"n,omitempty"`
	Quality        string `json:"quality,omitempty"`
	Size           string `json:"size,omitempty"`
	Style          string `json:"style,omitempty"`
	ResponseFormat string `json:"response_format,omitempty"`
	User           string `json:"user,omitempty"`
}

ImageRequest represents the request structure for the image API.

type ImageResponse

type ImageResponse struct {
	Created int64                    `json:"created,omitempty"`
	Data    []ImageResponseDataInner `json:"data,omitempty"`
	// contains filtered or unexported fields
}

ImageResponse represents a response structure for image API.

func (*ImageResponse) GetRateLimitHeaders

func (h *ImageResponse) GetRateLimitHeaders() RateLimitHeaders

func (*ImageResponse) Header

func (h *ImageResponse) Header() http.Header

func (*ImageResponse) SetHeader

func (h *ImageResponse) SetHeader(header http.Header)

type ImageResponseDataInner

type ImageResponseDataInner struct {
	URL           string `json:"url,omitempty"`
	B64JSON       string `json:"b64_json,omitempty"`
	RevisedPrompt string `json:"revised_prompt,omitempty"`
}

ImageResponseDataInner represents a response data structure for image API.

type ImageURLDetail

type ImageURLDetail string
const (
	ImageURLDetailHigh ImageURLDetail = "high"
	ImageURLDetailLow  ImageURLDetail = "low"
	ImageURLDetailAuto ImageURLDetail = "auto"
)

type ImageVariRequest

type ImageVariRequest struct {
	Image          *os.File `json:"image,omitempty"`
	Model          string   `json:"model,omitempty"`
	N              int      `json:"n,omitempty"`
	Size           string   `json:"size,omitempty"`
	ResponseFormat string   `json:"response_format,omitempty"`
}

ImageVariRequest represents the request structure for the image API.

type InnerError

type InnerError struct {
	Code                 string               `json:"code,omitempty"`
	ContentFilterResults ContentFilterResults `json:"content_filter_result,omitempty"`
}

InnerError Azure Content filtering. Only valid for Azure OpenAI Service.

type JailBreak

type JailBreak struct {
	Filtered bool `json:"filtered"`
	Detected bool `json:"detected"`
}

type ListBatchResponse

type ListBatchResponse struct {
	Object  string  `json:"object"`
	Data    []Batch `json:"data"`
	FirstID string  `json:"first_id"`
	LastID  string  `json:"last_id"`
	HasMore bool    `json:"has_more"`
	// contains filtered or unexported fields
}

func (*ListBatchResponse) GetRateLimitHeaders

func (h *ListBatchResponse) GetRateLimitHeaders() RateLimitHeaders

func (*ListBatchResponse) Header

func (h *ListBatchResponse) Header() http.Header

func (*ListBatchResponse) SetHeader

func (h *ListBatchResponse) SetHeader(header http.Header)

type ListFineTuningJobEventsParameter

type ListFineTuningJobEventsParameter func(*listFineTuningJobEventsParameters)

func ListFineTuningJobEventsWithAfter

func ListFineTuningJobEventsWithAfter(after string) ListFineTuningJobEventsParameter

func ListFineTuningJobEventsWithLimit

func ListFineTuningJobEventsWithLimit(limit int) ListFineTuningJobEventsParameter

type LogProb

type LogProb struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []byte  `json:"bytes,omitempty"` // Omitting the field if it is null
	// TopLogProbs is a list of the most likely tokens and their log probability, at this token position.
	// In rare cases, there may be fewer than the number of requested top_logprobs returned.
	TopLogProbs []TopLogProbs `json:"top_logprobs"`
}

LogProb represents the probability information for a token.

type LogProbs

type LogProbs struct {
	// Content is a list of message content tokens with log probability information.
	Content []LogProb `json:"content"`
}

LogProbs is the top-level structure containing the log probability information.

type LogprobResult

type LogprobResult struct {
	Tokens        []string             `json:"tokens"`
	TokenLogprobs []float32            `json:"token_logprobs"`
	TopLogprobs   []map[string]float32 `json:"top_logprobs"`
	TextOffset    []int                `json:"text_offset"`
}

LogprobResult represents logprob result of Choice.

type Message

type Message struct {
	ID          string           `json:"id"`
	Object      string           `json:"object"`
	CreatedAt   int              `json:"created_at"`
	ThreadID    string           `json:"thread_id"`
	Role        string           `json:"role"`
	Content     []MessageContent `json:"content"`
	FileIds     []string         `json:"file_ids"` //nolint:revive //backwards-compatibility
	AssistantID *string          `json:"assistant_id,omitempty"`
	RunID       *string          `json:"run_id,omitempty"`
	Metadata    map[string]any   `json:"metadata"`
	// contains filtered or unexported fields
}

func (*Message) GetRateLimitHeaders

func (h *Message) GetRateLimitHeaders() RateLimitHeaders

func (*Message) Header

func (h *Message) Header() http.Header

func (*Message) SetHeader

func (h *Message) SetHeader(header http.Header)

type MessageContent

type MessageContent struct {
	Type      string       `json:"type"`
	Text      *MessageText `json:"text,omitempty"`
	ImageFile *ImageFile   `json:"image_file,omitempty"`
}

type MessageDeletionStatus

type MessageDeletionStatus struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
	// contains filtered or unexported fields
}

func (*MessageDeletionStatus) GetRateLimitHeaders

func (h *MessageDeletionStatus) GetRateLimitHeaders() RateLimitHeaders

func (*MessageDeletionStatus) Header

func (h *MessageDeletionStatus) Header() http.Header

func (*MessageDeletionStatus) SetHeader

func (h *MessageDeletionStatus) SetHeader(header http.Header)

type MessageFile

type MessageFile struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	CreatedAt int    `json:"created_at"`
	MessageID string `json:"message_id"`
	// contains filtered or unexported fields
}

func (*MessageFile) GetRateLimitHeaders

func (h *MessageFile) GetRateLimitHeaders() RateLimitHeaders

func (*MessageFile) Header

func (h *MessageFile) Header() http.Header

func (*MessageFile) SetHeader

func (h *MessageFile) SetHeader(header http.Header)

type MessageFilesList

type MessageFilesList struct {
	MessageFiles []MessageFile `json:"data"`
	// contains filtered or unexported fields
}

func (*MessageFilesList) GetRateLimitHeaders

func (h *MessageFilesList) GetRateLimitHeaders() RateLimitHeaders

func (*MessageFilesList) Header

func (h *MessageFilesList) Header() http.Header

func (*MessageFilesList) SetHeader

func (h *MessageFilesList) SetHeader(header http.Header)

type MessageRequest

type MessageRequest struct {
	Role     string         `json:"role"`
	Content  string         `json:"content"`
	FileIds  []string       `json:"file_ids,omitempty"` //nolint:revive // backwards-compatibility
	Metadata map[string]any `json:"metadata,omitempty"`
}

type MessageText

type MessageText struct {
	Value       string `json:"value"`
	Annotations []any  `json:"annotations"`
}

type MessagesList

type MessagesList struct {
	Messages []Message `json:"data"`

	Object  string  `json:"object"`
	FirstID *string `json:"first_id"`
	LastID  *string `json:"last_id"`
	HasMore bool    `json:"has_more"`
	// contains filtered or unexported fields
}

func (*MessagesList) GetRateLimitHeaders

func (h *MessagesList) GetRateLimitHeaders() RateLimitHeaders

func (*MessagesList) Header

func (h *MessagesList) Header() http.Header

func (*MessagesList) SetHeader

func (h *MessagesList) SetHeader(header http.Header)

type Model

type Model struct {
	CreatedAt  int64        `json:"created"`
	ID         string       `json:"id"`
	Object     string       `json:"object"`
	OwnedBy    string       `json:"owned_by"`
	Permission []Permission `json:"permission"`
	Root       string       `json:"root"`
	Parent     string       `json:"parent"`
	// contains filtered or unexported fields
}

Model struct represents an OpenAPI model.

func (*Model) GetRateLimitHeaders

func (h *Model) GetRateLimitHeaders() RateLimitHeaders

func (*Model) Header

func (h *Model) Header() http.Header

func (*Model) SetHeader

func (h *Model) SetHeader(header http.Header)

type ModelsList

type ModelsList struct {
	Models []Model `json:"data"`
	// contains filtered or unexported fields
}

ModelsList is a list of models, including those that belong to the user or organization.

func (*ModelsList) GetRateLimitHeaders

func (h *ModelsList) GetRateLimitHeaders() RateLimitHeaders

func (*ModelsList) Header

func (h *ModelsList) Header() http.Header

func (*ModelsList) SetHeader

func (h *ModelsList) SetHeader(header http.Header)

type ModerationRequest

type ModerationRequest struct {
	Input string `json:"input,omitempty"`
	Model string `json:"model,omitempty"`
}

ModerationRequest represents a request structure for moderation API.

type ModerationResponse

type ModerationResponse struct {
	ID      string   `json:"id"`
	Model   string   `json:"model"`
	Results []Result `json:"results"`
	// contains filtered or unexported fields
}

ModerationResponse represents a response structure for moderation API.

func (*ModerationResponse) GetRateLimitHeaders

func (h *ModerationResponse) GetRateLimitHeaders() RateLimitHeaders

func (*ModerationResponse) Header

func (h *ModerationResponse) Header() http.Header

func (*ModerationResponse) SetHeader

func (h *ModerationResponse) SetHeader(header http.Header)

type ModifyThreadRequest

type ModifyThreadRequest struct {
	Metadata      map[string]any `json:"metadata"`
	ToolResources *ToolResources `json:"tool_resources,omitempty"`
}

type Pagination

type Pagination struct {
	Limit  *int
	Order  *string
	After  *string
	Before *string
}

type Permission

type Permission struct {
	CreatedAt          int64       `json:"created"`
	ID                 string      `json:"id"`
	Object             string      `json:"object"`
	AllowCreateEngine  bool        `json:"allow_create_engine"`
	AllowSampling      bool        `json:"allow_sampling"`
	AllowLogprobs      bool        `json:"allow_logprobs"`
	AllowSearchIndices bool        `json:"allow_search_indices"`
	AllowView          bool        `json:"allow_view"`
	AllowFineTuning    bool        `json:"allow_fine_tuning"`
	Organization       string      `json:"organization"`
	Group              interface{} `json:"group"`
	IsBlocking         bool        `json:"is_blocking"`
}

Permission struct represents an OpenAPI permission.

type Profanity

type Profanity struct {
	Filtered bool `json:"filtered"`
	Detected bool `json:"detected"`
}

type PromptAnnotation

type PromptAnnotation struct {
	PromptIndex          int                  `json:"prompt_index,omitempty"`
	ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}

type PromptFilterResult

type PromptFilterResult struct {
	Index                int                  `json:"index"`
	ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}

type PromptTokensDetails

type PromptTokensDetails struct {
	AudioTokens  int `json:"audio_tokens"`
	CachedTokens int `json:"cached_tokens"`
}

PromptTokensDetails Breakdown of tokens used in the prompt.

type PurposeType

type PurposeType string

PurposeType represents the purpose of the file when uploading.

const (
	PurposeFineTune         PurposeType = "fine-tune"
	PurposeFineTuneResults  PurposeType = "fine-tune-results"
	PurposeAssistants       PurposeType = "assistants"
	PurposeAssistantsOutput PurposeType = "assistants_output"
	PurposeBatch            PurposeType = "batch"
)

type RateLimitHeaders

type RateLimitHeaders struct {
	LimitRequests     int       `json:"x-ratelimit-limit-requests"`
	LimitTokens       int       `json:"x-ratelimit-limit-tokens"`
	RemainingRequests int       `json:"x-ratelimit-remaining-requests"`
	RemainingTokens   int       `json:"x-ratelimit-remaining-tokens"`
	ResetRequests     ResetTime `json:"x-ratelimit-reset-requests"`
	ResetTokens       ResetTime `json:"x-ratelimit-reset-tokens"`
}

RateLimitHeaders struct represents Openai rate limits headers.

type RawResponse

type RawResponse struct {
	io.ReadCloser
	// contains filtered or unexported fields
}

func (*RawResponse) GetRateLimitHeaders

func (h *RawResponse) GetRateLimitHeaders() RateLimitHeaders

func (*RawResponse) Header

func (h *RawResponse) Header() http.Header

func (*RawResponse) SetHeader

func (h *RawResponse) SetHeader(header http.Header)

type ReponseFormat

type ReponseFormat struct {
	Type string `json:"type"`
}

ReponseFormat specifies the format the model must output. https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-response_format. Type can either be text or json_object.

type RequestError

type RequestError struct {
	HTTPStatus     string
	HTTPStatusCode int
	Err            error
	Body           []byte
}

RequestError provides information about generic request errors.

func (*RequestError) Error

func (e *RequestError) Error() string

func (*RequestError) Unwrap

func (e *RequestError) Unwrap() error

type RequiredActionType

type RequiredActionType string
const (
	RequiredActionTypeSubmitToolOutputs RequiredActionType = "submit_tool_outputs"
)

type ResetTime

type ResetTime string

func (ResetTime) String

func (r ResetTime) String() string

func (ResetTime) Time

func (r ResetTime) Time() time.Time

type Response

type Response interface {
	SetHeader(http.Header)
}

type Result

type Result struct {
	Categories     ResultCategories     `json:"categories"`
	CategoryScores ResultCategoryScores `json:"category_scores"`
	Flagged        bool                 `json:"flagged"`
}

Result represents one of possible moderation results.

type ResultCategories

type ResultCategories struct {
	Hate                  bool `json:"hate"`
	HateThreatening       bool `json:"hate/threatening"`
	Harassment            bool `json:"harassment"`
	HarassmentThreatening bool `json:"harassment/threatening"`
	SelfHarm              bool `json:"self-harm"`
	SelfHarmIntent        bool `json:"self-harm/intent"`
	SelfHarmInstructions  bool `json:"self-harm/instructions"`
	Sexual                bool `json:"sexual"`
	SexualMinors          bool `json:"sexual/minors"`
	Violence              bool `json:"violence"`
	ViolenceGraphic       bool `json:"violence/graphic"`
}

ResultCategories represents Categories of Result.

type ResultCategoryScores

type ResultCategoryScores struct {
	Hate                  float32 `json:"hate"`
	HateThreatening       float32 `json:"hate/threatening"`
	Harassment            float32 `json:"harassment"`
	HarassmentThreatening float32 `json:"harassment/threatening"`
	SelfHarm              float32 `json:"self-harm"`
	SelfHarmIntent        float32 `json:"self-harm/intent"`
	SelfHarmInstructions  float32 `json:"self-harm/instructions"`
	Sexual                float32 `json:"sexual"`
	SexualMinors          float32 `json:"sexual/minors"`
	Violence              float32 `json:"violence"`
	ViolenceGraphic       float32 `json:"violence/graphic"`
}

ResultCategoryScores represents CategoryScores of Result.

type Run

type Run struct {
	ID             string             `json:"id"`
	Object         string             `json:"object"`
	CreatedAt      int64              `json:"created_at"`
	ThreadID       string             `json:"thread_id"`
	AssistantID    string             `json:"assistant_id"`
	Status         RunStatus          `json:"status"`
	RequiredAction *RunRequiredAction `json:"required_action,omitempty"`
	LastError      *RunLastError      `json:"last_error,omitempty"`
	ExpiresAt      int64              `json:"expires_at"`
	StartedAt      *int64             `json:"started_at,omitempty"`
	CancelledAt    *int64             `json:"cancelled_at,omitempty"`
	FailedAt       *int64             `json:"failed_at,omitempty"`
	CompletedAt    *int64             `json:"completed_at,omitempty"`
	Model          string             `json:"model"`
	Instructions   string             `json:"instructions,omitempty"`
	Tools          []Tool             `json:"tools"`
	FileIDS        []string           `json:"file_ids"` //nolint:revive // backwards-compatibility
	Metadata       map[string]any     `json:"metadata"`
	Usage          Usage              `json:"usage,omitempty"`

	Temperature *float32 `json:"temperature,omitempty"`
	// The maximum number of prompt tokens that may be used over the course of the run.
	// If the run exceeds the number of prompt tokens specified, the run will end with status 'incomplete'.
	MaxPromptTokens int `json:"max_prompt_tokens,omitempty"`
	// The maximum number of completion tokens that may be used over the course of the run.
	// If the run exceeds the number of completion tokens specified, the run will end with status 'incomplete'.
	MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
	// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
	TruncationStrategy *ThreadTruncationStrategy `json:"truncation_strategy,omitempty"`
	// contains filtered or unexported fields
}

func (*Run) GetRateLimitHeaders

func (h *Run) GetRateLimitHeaders() RateLimitHeaders

func (*Run) Header

func (h *Run) Header() http.Header

func (*Run) SetHeader

func (h *Run) SetHeader(header http.Header)

type RunError

type RunError string
const (
	RunErrorServerError       RunError = "server_error"
	RunErrorRateLimitExceeded RunError = "rate_limit_exceeded"
)

type RunLastError

type RunLastError struct {
	Code    RunError `json:"code"`
	Message string   `json:"message"`
}

type RunList

type RunList struct {
	Runs []Run `json:"data"`
	// contains filtered or unexported fields
}

RunList is a list of runs.

func (*RunList) GetRateLimitHeaders

func (h *RunList) GetRateLimitHeaders() RateLimitHeaders

func (*RunList) Header

func (h *RunList) Header() http.Header

func (*RunList) SetHeader

func (h *RunList) SetHeader(header http.Header)

type RunModifyRequest

type RunModifyRequest struct {
	Metadata map[string]any `json:"metadata,omitempty"`
}

type RunRequest

type RunRequest struct {
	AssistantID            string         `json:"assistant_id"`
	Model                  string         `json:"model,omitempty"`
	Instructions           string         `json:"instructions,omitempty"`
	AdditionalInstructions string         `json:"additional_instructions,omitempty"`
	Tools                  []Tool         `json:"tools,omitempty"`
	Metadata               map[string]any `json:"metadata,omitempty"`

	// Sampling temperature between 0 and 2. Higher values like 0.8 are  more random.
	// lower values are more focused and deterministic.
	Temperature *float32 `json:"temperature,omitempty"`
	TopP        *float32 `json:"top_p,omitempty"`

	// The maximum number of prompt tokens that may be used over the course of the run.
	// If the run exceeds the number of prompt tokens specified, the run will end with status 'incomplete'.
	MaxPromptTokens int `json:"max_prompt_tokens,omitempty"`

	// The maximum number of completion tokens that may be used over the course of the run.
	// If the run exceeds the number of completion tokens specified, the run will end with status 'incomplete'.
	MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`

	// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
	TruncationStrategy *ThreadTruncationStrategy `json:"truncation_strategy,omitempty"`

	// This can be either a string or a ToolChoice object.
	ToolChoice any `json:"tool_choice,omitempty"`
	// This can be either a string or a ResponseFormat object.
	ResponseFormat any `json:"response_format,omitempty"`
	// Disable the default behavior of parallel tool calls by setting it: false.
	ParallelToolCalls any `json:"parallel_tool_calls,omitempty"`
}

type RunRequiredAction

type RunRequiredAction struct {
	Type              RequiredActionType `json:"type"`
	SubmitToolOutputs *SubmitToolOutputs `json:"submit_tool_outputs,omitempty"`
}

type RunStatus

type RunStatus string
const (
	RunStatusQueued         RunStatus = "queued"
	RunStatusInProgress     RunStatus = "in_progress"
	RunStatusRequiresAction RunStatus = "requires_action"
	RunStatusCancelling     RunStatus = "cancelling"
	RunStatusFailed         RunStatus = "failed"
	RunStatusCompleted      RunStatus = "completed"
	RunStatusIncomplete     RunStatus = "incomplete"
	RunStatusExpired        RunStatus = "expired"
	RunStatusCancelled      RunStatus = "cancelled"
)

type RunStep

type RunStep struct {
	ID          string         `json:"id"`
	Object      string         `json:"object"`
	CreatedAt   int64          `json:"created_at"`
	AssistantID string         `json:"assistant_id"`
	ThreadID    string         `json:"thread_id"`
	RunID       string         `json:"run_id"`
	Type        RunStepType    `json:"type"`
	Status      RunStepStatus  `json:"status"`
	StepDetails StepDetails    `json:"step_details"`
	LastError   *RunLastError  `json:"last_error,omitempty"`
	ExpiredAt   *int64         `json:"expired_at,omitempty"`
	CancelledAt *int64         `json:"cancelled_at,omitempty"`
	FailedAt    *int64         `json:"failed_at,omitempty"`
	CompletedAt *int64         `json:"completed_at,omitempty"`
	Metadata    map[string]any `json:"metadata"`
	// contains filtered or unexported fields
}

func (*RunStep) GetRateLimitHeaders

func (h *RunStep) GetRateLimitHeaders() RateLimitHeaders

func (*RunStep) Header

func (h *RunStep) Header() http.Header

func (*RunStep) SetHeader

func (h *RunStep) SetHeader(header http.Header)

type RunStepList

type RunStepList struct {
	RunSteps []RunStep `json:"data"`

	FirstID string `json:"first_id"`
	LastID  string `json:"last_id"`
	HasMore bool   `json:"has_more"`
	// contains filtered or unexported fields
}

RunStepList is a list of steps.

func (*RunStepList) GetRateLimitHeaders

func (h *RunStepList) GetRateLimitHeaders() RateLimitHeaders

func (*RunStepList) Header

func (h *RunStepList) Header() http.Header

func (*RunStepList) SetHeader

func (h *RunStepList) SetHeader(header http.Header)

type RunStepStatus

type RunStepStatus string
const (
	RunStepStatusInProgress RunStepStatus = "in_progress"
	RunStepStatusCancelling RunStepStatus = "cancelled"
	RunStepStatusFailed     RunStepStatus = "failed"
	RunStepStatusCompleted  RunStepStatus = "completed"
	RunStepStatusExpired    RunStepStatus = "expired"
)

type RunStepType

type RunStepType string
const (
	RunStepTypeMessageCreation RunStepType = "message_creation"
	RunStepTypeToolCalls       RunStepType = "tool_calls"
)

type SelfHarm

type SelfHarm struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

type Sexual

type Sexual struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

type SpeechModel

type SpeechModel string
const (
	TTSModel1      SpeechModel = "tts-1"
	TTSModel1HD    SpeechModel = "tts-1-hd"
	TTSModelCanary SpeechModel = "canary-tts"
)

type SpeechResponseFormat

type SpeechResponseFormat string
const (
	SpeechResponseFormatMp3  SpeechResponseFormat = "mp3"
	SpeechResponseFormatOpus SpeechResponseFormat = "opus"
	SpeechResponseFormatAac  SpeechResponseFormat = "aac"
	SpeechResponseFormatFlac SpeechResponseFormat = "flac"
	SpeechResponseFormatWav  SpeechResponseFormat = "wav"
	SpeechResponseFormatPcm  SpeechResponseFormat = "pcm"
)

type SpeechVoice

type SpeechVoice string
const (
	VoiceAlloy   SpeechVoice = "alloy"
	VoiceEcho    SpeechVoice = "echo"
	VoiceFable   SpeechVoice = "fable"
	VoiceOnyx    SpeechVoice = "onyx"
	VoiceNova    SpeechVoice = "nova"
	VoiceShimmer SpeechVoice = "shimmer"
)

type StaticChunkingStrategy

type StaticChunkingStrategy struct {
	MaxChunkSizeTokens int `json:"max_chunk_size_tokens"`
	ChunkOverlapTokens int `json:"chunk_overlap_tokens"`
}

type StepDetails

type StepDetails struct {
	Type            RunStepType                 `json:"type"`
	MessageCreation *StepDetailsMessageCreation `json:"message_creation,omitempty"`
	ToolCalls       []ToolCall                  `json:"tool_calls,omitempty"`
}

type StepDetailsMessageCreation

type StepDetailsMessageCreation struct {
	MessageID string `json:"message_id"`
}

type StreamOptions

type StreamOptions struct {
	// If set, an additional chunk will be streamed before the data: [DONE] message.
	// The usage field on this chunk shows the token usage statistics for the entire request,
	// and the choices field will always be an empty array.
	// All other chunks will also include a usage field, but with a null value.
	IncludeUsage bool `json:"include_usage,omitempty"`
}

type SubmitToolOutputs

type SubmitToolOutputs struct {
	ToolCalls []ToolCall `json:"tool_calls"`
}

type SubmitToolOutputsRequest

type SubmitToolOutputsRequest struct {
	ToolOutputs []ToolOutput `json:"tool_outputs"`
}

type Thread

type Thread struct {
	ID            string         `json:"id"`
	Object        string         `json:"object"`
	CreatedAt     int64          `json:"created_at"`
	Metadata      map[string]any `json:"metadata"`
	ToolResources ToolResources  `json:"tool_resources,omitempty"`
	// contains filtered or unexported fields
}

func (*Thread) GetRateLimitHeaders

func (h *Thread) GetRateLimitHeaders() RateLimitHeaders

func (*Thread) Header

func (h *Thread) Header() http.Header

func (*Thread) SetHeader

func (h *Thread) SetHeader(header http.Header)

type ThreadAttachment

type ThreadAttachment struct {
	FileID string                 `json:"file_id"`
	Tools  []ThreadAttachmentTool `json:"tools"`
}

type ThreadAttachmentTool

type ThreadAttachmentTool struct {
	Type string `json:"type"`
}

type ThreadDeleteResponse

type ThreadDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
	// contains filtered or unexported fields
}

func (*ThreadDeleteResponse) GetRateLimitHeaders

func (h *ThreadDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (*ThreadDeleteResponse) Header

func (h *ThreadDeleteResponse) Header() http.Header

func (*ThreadDeleteResponse) SetHeader

func (h *ThreadDeleteResponse) SetHeader(header http.Header)

type ThreadMessage

type ThreadMessage struct {
	Role        ThreadMessageRole  `json:"role"`
	Content     string             `json:"content"`
	FileIDs     []string           `json:"file_ids,omitempty"`
	Attachments []ThreadAttachment `json:"attachments,omitempty"`
	Metadata    map[string]any     `json:"metadata,omitempty"`
}

type ThreadMessageRole

type ThreadMessageRole string
const (
	ThreadMessageRoleAssistant ThreadMessageRole = "assistant"
	ThreadMessageRoleUser      ThreadMessageRole = "user"
)

type ThreadRequest

type ThreadRequest struct {
	Messages      []ThreadMessage       `json:"messages,omitempty"`
	Metadata      map[string]any        `json:"metadata,omitempty"`
	ToolResources *ToolResourcesRequest `json:"tool_resources,omitempty"`
}

type ThreadTruncationStrategy

type ThreadTruncationStrategy struct {
	// default 'auto'.
	Type TruncationStrategy `json:"type,omitempty"`
	// this field should be set if the truncation strategy is set to LastMessages.
	LastMessages *int `json:"last_messages,omitempty"`
}

ThreadTruncationStrategy defines the truncation strategy to use for the thread. https://platform.openai.com/docs/assistants/how-it-works/truncation-strategy.

type Tool

type Tool struct {
	Type     ToolType            `json:"type"`
	Function *FunctionDefinition `json:"function,omitempty"`
}

type ToolCall

type ToolCall struct {
	// Index is not nil only in chat completion chunk object
	Index    *int         `json:"index,omitempty"`
	ID       string       `json:"id"`
	Type     ToolType     `json:"type"`
	Function FunctionCall `json:"function"`
}

type ToolChoice

type ToolChoice struct {
	Type     ToolType     `json:"type"`
	Function ToolFunction `json:"function,omitempty"`
}

type ToolFunction

type ToolFunction struct {
	Name string `json:"name"`
}

type ToolOutput

type ToolOutput struct {
	ToolCallID string `json:"tool_call_id"`
	Output     any    `json:"output"`
}

type ToolResources

type ToolResources struct {
	CodeInterpreter *CodeInterpreterToolResources `json:"code_interpreter,omitempty"`
	FileSearch      *FileSearchToolResources      `json:"file_search,omitempty"`
}

type ToolResourcesRequest

type ToolResourcesRequest struct {
	CodeInterpreter *CodeInterpreterToolResourcesRequest `json:"code_interpreter,omitempty"`
	FileSearch      *FileSearchToolResourcesRequest      `json:"file_search,omitempty"`
}

type ToolType

type ToolType string
const (
	ToolTypeFunction ToolType = "function"
)

type TopLogProbs

type TopLogProbs struct {
	Token   string  `json:"token"`
	LogProb float64 `json:"logprob"`
	Bytes   []byte  `json:"bytes,omitempty"`
}

type TranscriptionTimestampGranularity

type TranscriptionTimestampGranularity string
const (
	TranscriptionTimestampGranularityWord    TranscriptionTimestampGranularity = "word"
	TranscriptionTimestampGranularitySegment TranscriptionTimestampGranularity = "segment"
)

type TruncationStrategy

type TruncationStrategy string

TruncationStrategy defines the existing truncation strategies existing for thread management in an assistant.

type UploadBatchFileRequest

type UploadBatchFileRequest struct {
	FileName string
	Lines    []BatchLineItem
}

func (*UploadBatchFileRequest) AddChatCompletion

func (r *UploadBatchFileRequest) AddChatCompletion(customerID string, body ChatCompletionRequest)

func (*UploadBatchFileRequest) AddCompletion

func (r *UploadBatchFileRequest) AddCompletion(customerID string, body CompletionRequest)

func (*UploadBatchFileRequest) AddEmbedding

func (r *UploadBatchFileRequest) AddEmbedding(customerID string, body EmbeddingRequest)

func (*UploadBatchFileRequest) MarshalJSONL

func (r *UploadBatchFileRequest) MarshalJSONL() []byte

type Usage

type Usage struct {
	PromptTokens            int                      `json:"prompt_tokens"`
	CompletionTokens        int                      `json:"completion_tokens"`
	TotalTokens             int                      `json:"total_tokens"`
	PromptTokensDetails     *PromptTokensDetails     `json:"prompt_tokens_details"`
	CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details"`
}

Usage Represents the total token usage per request to OpenAI.

type VectorStore

type VectorStore struct {
	ID           string               `json:"id"`
	Object       string               `json:"object"`
	CreatedAt    int64                `json:"created_at"`
	Name         string               `json:"name"`
	UsageBytes   int                  `json:"usage_bytes"`
	FileCounts   VectorStoreFileCount `json:"file_counts"`
	Status       string               `json:"status"`
	ExpiresAfter *VectorStoreExpires  `json:"expires_after"`
	ExpiresAt    *int                 `json:"expires_at"`
	Metadata     map[string]any       `json:"metadata"`
	// contains filtered or unexported fields
}

func (*VectorStore) GetRateLimitHeaders

func (h *VectorStore) GetRateLimitHeaders() RateLimitHeaders

func (*VectorStore) Header

func (h *VectorStore) Header() http.Header

func (*VectorStore) SetHeader

func (h *VectorStore) SetHeader(header http.Header)

type VectorStoreDeleteResponse

type VectorStoreDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
	// contains filtered or unexported fields
}

func (*VectorStoreDeleteResponse) GetRateLimitHeaders

func (h *VectorStoreDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (*VectorStoreDeleteResponse) Header

func (h *VectorStoreDeleteResponse) Header() http.Header

func (*VectorStoreDeleteResponse) SetHeader

func (h *VectorStoreDeleteResponse) SetHeader(header http.Header)

type VectorStoreExpires

type VectorStoreExpires struct {
	Anchor string `json:"anchor"`
	Days   int    `json:"days"`
}

type VectorStoreFile

type VectorStoreFile struct {
	ID            string `json:"id"`
	Object        string `json:"object"`
	CreatedAt     int64  `json:"created_at"`
	VectorStoreID string `json:"vector_store_id"`
	UsageBytes    int    `json:"usage_bytes"`
	Status        string `json:"status"`
	// contains filtered or unexported fields
}

func (*VectorStoreFile) GetRateLimitHeaders

func (h *VectorStoreFile) GetRateLimitHeaders() RateLimitHeaders

func (*VectorStoreFile) Header

func (h *VectorStoreFile) Header() http.Header

func (*VectorStoreFile) SetHeader

func (h *VectorStoreFile) SetHeader(header http.Header)

type VectorStoreFileBatch

type VectorStoreFileBatch struct {
	ID            string               `json:"id"`
	Object        string               `json:"object"`
	CreatedAt     int64                `json:"created_at"`
	VectorStoreID string               `json:"vector_store_id"`
	Status        string               `json:"status"`
	FileCounts    VectorStoreFileCount `json:"file_counts"`
	// contains filtered or unexported fields
}

func (*VectorStoreFileBatch) GetRateLimitHeaders

func (h *VectorStoreFileBatch) GetRateLimitHeaders() RateLimitHeaders

func (*VectorStoreFileBatch) Header

func (h *VectorStoreFileBatch) Header() http.Header

func (*VectorStoreFileBatch) SetHeader

func (h *VectorStoreFileBatch) SetHeader(header http.Header)

type VectorStoreFileBatchRequest

type VectorStoreFileBatchRequest struct {
	FileIDs []string `json:"file_ids"`
}

type VectorStoreFileCount

type VectorStoreFileCount struct {
	InProgress int `json:"in_progress"`
	Completed  int `json:"completed"`
	Failed     int `json:"failed"`
	Cancelled  int `json:"cancelled"`
	Total      int `json:"total"`
}

type VectorStoreFileRequest

type VectorStoreFileRequest struct {
	FileID string `json:"file_id"`
}

type VectorStoreFilesList

type VectorStoreFilesList struct {
	VectorStoreFiles []VectorStoreFile `json:"data"`
	// contains filtered or unexported fields
}

func (*VectorStoreFilesList) GetRateLimitHeaders

func (h *VectorStoreFilesList) GetRateLimitHeaders() RateLimitHeaders

func (*VectorStoreFilesList) Header

func (h *VectorStoreFilesList) Header() http.Header

func (*VectorStoreFilesList) SetHeader

func (h *VectorStoreFilesList) SetHeader(header http.Header)

type VectorStoreRequest

type VectorStoreRequest struct {
	Name         string              `json:"name,omitempty"`
	FileIDs      []string            `json:"file_ids,omitempty"`
	ExpiresAfter *VectorStoreExpires `json:"expires_after,omitempty"`
	Metadata     map[string]any      `json:"metadata,omitempty"`
}

VectorStoreRequest provides the vector store request parameters.

type VectorStoreToolResources

type VectorStoreToolResources struct {
	FileIDs          []string          `json:"file_ids,omitempty"`
	ChunkingStrategy *ChunkingStrategy `json:"chunking_strategy,omitempty"`
	Metadata         map[string]any    `json:"metadata,omitempty"`
}

type VectorStoresList

type VectorStoresList struct {
	VectorStores []VectorStore `json:"data"`
	LastID       *string       `json:"last_id"`
	FirstID      *string       `json:"first_id"`
	HasMore      bool          `json:"has_more"`
	// contains filtered or unexported fields
}

VectorStoresList is a list of vector store.

func (*VectorStoresList) GetRateLimitHeaders

func (h *VectorStoresList) GetRateLimitHeaders() RateLimitHeaders

func (*VectorStoresList) Header

func (h *VectorStoresList) Header() http.Header

func (*VectorStoresList) SetHeader

func (h *VectorStoresList) SetHeader(header http.Header)

type Violence

type Violence struct {
	Filtered bool   `json:"filtered"`
	Severity string `json:"severity,omitempty"`
}

Directories

Path Synopsis
examples
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.
Package jsonschema provides very simple functionality for representing a JSON schema as a (nested) struct.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL