openai

package module
v0.1.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 18, 2024 License: Apache-2.0 Imports: 22 Imported by: 0

README

OpenAI Go API Library

Go Reference

[!WARNING] This release is currently in alpha. Minor breaking changes may occur.

The OpenAI Go library provides convenient access to the OpenAI REST API from applications written in Go. The full API of this library can be found in api.md.

Installation

import (
	"github.com/LeverageSales/openai-go" // imported as openai
)

Or to pin the version:

go get -u 'github.com/LeverageSales/openai-go@v0.1.0-alpha.18'

Requirements

This library requires Go 1.18+.

Usage

The full API of this library can be found in api.md.

See the examples directory for complete and runnable examples.

package main

import (
	"context"

	"github.com/LeverageSales/openai-go"
	"github.com/LeverageSales/openai-go/option"
)

func main() {
	client := openai.NewClient(
		option.WithAPIKey("My API Key"), // defaults to os.LookupEnv("OPENAI_API_KEY")
	)
	chatCompletion, err := client.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{
		Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
			 openai.UserMessage("Say this is a test"),
		}),
		Model: openai.F(openai.ChatModelGPT4o),
	})
	if err != nil {
		panic(err.Error())
	}
}

Conversations
param := openai.ChatCompletionNewParams{
	Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
		openai.UserMessage("What kind of houseplant is easy to take care of?"),
  }),
	Seed:     openai.Int(1),
	Model:    openai.F(openai.ChatModelGPT4o),
}

completion, err := client.Chat.Completions.New(ctx, params)

param.Messages.Value = append(param.Messages.Value, completion.Choices[0].Message)
param.Messages.Value = append(param.Messages.Value, openai.UserMessage("How big are those?"))

// continue the conversation
completion, err = client.Chat.Completions.New(ctx, param)
Streaming responses
question := "Write an epic"

stream := client.Chat.Completions.NewStreaming(ctx, openai.ChatCompletionNewParams{
	Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
		openai.UserMessage(question),
	}),
	Seed:  openai.Int(0),
	Model: openai.F(openai.ChatModelGPT4o),
})

// optionally, an accumulator helper can be used
acc := openai.ChatCompletionAccumulator{}

for stream.Next() {
	chunk := stream.Current()
	acc.AddChunk(chunk)

	if content, ok := acc.JustFinishedContent(); ok {
		println("Content stream finished:", content)
	}

	// if using tool calls
	if tool, ok := acc.JustFinishedToolCall(); ok {
		println("Tool call stream finished:", tool.Index, tool.Name, tool.Arguments)
	}

	if refusal, ok := acc.JustFinishedRefusal(); ok {
		println("Refusal stream finished:", refusal)
	}

	// it's best to use chunks after handling JustFinished events
	if len(chunk.Choices) > 0 {
		println(chunk.Choices[0].Delta.Content)
	}
}

if err := stream.Err(); err != nil {
	panic(err)
}

// After the stream is finished, acc can be used like a ChatCompletion
_ = acc.Choices[0].Message.Content

See the full streaming and accumulation example

Tool calling
import (
	"encoding/json"
	// ...
)

// ...

question := "What is the weather in New York City?"

params := openai.ChatCompletionNewParams{
	Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
		openai.UserMessage(question),
	}),
	Tools: openai.F([]openai.ChatCompletionToolParam{
		{
			Type: openai.F(openai.ChatCompletionToolTypeFunction),
			Function: openai.F(openai.FunctionDefinitionParam{
				Name:        openai.String("get_weather"),
				Description: openai.String("Get weather at the given location"),
				Parameters: openai.F(openai.FunctionParameters{
					"type": "object",
					"properties": map[string]interface{}{
						"location": map[string]string{
							"type": "string",
						},
					},
					"required": []string{"location"},
				}),
			}),
		},
	}),
	Model: openai.F(openai.ChatModelGPT4o),
}

// chat completion request with tool calls
completion, _ := client.Chat.Completions.New(ctx, params)

for _, toolCall := range completion.Choices[0].Message.ToolCalls {
	if toolCall.Function.Name == "get_weather" {
		// extract the location from the function call arguments
		var args map[string]interface{}
		_ := json.Unmarshal([]byte(toolCall.Function.Arguments), &args)

		// call a weather API with the arguments requested by the model
		weatherData := getWeather(args["location"].(string))
		params.Messages.Value = append(params.Messages.Value, openai.ToolMessage(toolCall.ID, weatherData))
	}
}

// ... continue the conversation with the information provided by the tool

See the full tool calling example

Structured outputs
import (
	"encoding/json"
	"github.com/invopop/jsonschema"
	// ...
)

// A struct that will be converted to a Structured Outputs response schema
type HistoricalComputer struct {
	Origin       Origin   `json:"origin" jsonschema_description:"The origin of the computer"`
	Name         string   `json:"full_name" jsonschema_description:"The name of the device model"`
	NotableFacts []string `json:"notable_facts" jsonschema_description:"A few key facts about the computer"`
}

type Origin struct {
	YearBuilt    int64  `json:"year_of_construction" jsonschema_description:"The year it was made"`
	Organization string `json:"organization" jsonschema_description:"The organization that was in charge of its development"`
}

func GenerateSchema[T any]() interface{} {
	reflector := jsonschema.Reflector{
		AllowAdditionalProperties: false,
		DoNotReference:            true,
	}
	var v T
	schema := reflector.Reflect(v)
	return schema
}

// Generate the JSON schema at initialization time
var HistoricalComputerResponseSchema = GenerateSchema[HistoricalComputer]()

func main() {

	// ...

	question := "What computer ran the first neural network?"

	schemaParam := openai.ResponseFormatJSONSchemaJSONSchemaParam{
		Name:        openai.F("biography"),
		Description: openai.F("Notable information about a person"),
		Schema:      openai.F(HistoricalComputerResponseSchema),
		Strict:      openai.Bool(true),
	}

	chat, _ := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
		// ...
		ResponseFormat: openai.F[openai.ChatCompletionNewParamsResponseFormatUnion](
			openai.ResponseFormatJSONSchemaParam{
				Type:       openai.F(openai.ResponseFormatJSONSchemaTypeJSONSchema),
				JSONSchema: openai.F(schemaParam),
			},
		),
		// only certain models can perform structured outputs
		Model: openai.F(openai.ChatModelGPT4o2024_08_06),
	})

	// extract into a well-typed struct
	historicalComputer := HistoricalComputer{}
	_ = json.Unmarshal([]byte(chat.Choices[0].Message.Content), &historicalComputer)

	historicalComputer.Name
	historicalComputer.Origin.YearBuilt
	historicalComputer.Origin.Organization
	for i, fact := range historicalComputer.NotableFacts {
		// ...
	}
}

See the full structured outputs example

Request fields

All request parameters are wrapped in a generic Field type, which we use to distinguish zero values from null or omitted fields.

This prevents accidentally sending a zero value if you forget a required parameter, and enables explicitly sending null, false, '', or 0 on optional parameters. Any field not specified is not sent.

To construct fields with values, use the helpers String(), Int(), Float(), or most commonly, the generic F[T](). To send a null, use Null[T](), and to send a nonconforming value, use Raw[T](any). For example:

params := FooParams{
	Name: openai.F("hello"),

	// Explicitly send `"description": null`
	Description: openai.Null[string](),

	Point: openai.F(openai.Point{
		X: openai.Int(0),
		Y: openai.Int(1),

		// In cases where the API specifies a given type,
		// but you want to send something else, use `Raw`:
		Z: openai.Raw[int64](0.01), // sends a float
	}),
}
Response objects

All fields in response structs are value types (not pointers or wrappers).

If a given field is null, not present, or invalid, the corresponding field will simply be its zero value.

All response structs also include a special JSON field, containing more detailed information about each property, which you can use like so:

if res.Name == "" {
	// true if `"name"` is either not present or explicitly null
	res.JSON.Name.IsNull()

	// true if the `"name"` key was not present in the repsonse JSON at all
	res.JSON.Name.IsMissing()

	// When the API returns data that cannot be coerced to the expected type:
	if res.JSON.Name.IsInvalid() {
		raw := res.JSON.Name.Raw()

		legacyName := struct{
			First string `json:"first"`
			Last  string `json:"last"`
		}{}
		json.Unmarshal([]byte(raw), &legacyName)
		name = legacyName.First + " " + legacyName.Last
	}
}

These .JSON structs also include an Extras map containing any properties in the json response that were not specified in the struct. This can be useful for API features not yet present in the SDK.

body := res.JSON.ExtraFields["my_unexpected_field"].Raw()
RequestOptions

This library uses the functional options pattern. Functions defined in the option package return a RequestOption, which is a closure that mutates a RequestConfig. These options can be supplied to the client or at individual requests. For example:

client := openai.NewClient(
	// Adds a header to every request made by the client
	option.WithHeader("X-Some-Header", "custom_header_info"),
)

client.Chat.Completions.New(context.TODO(), ...,
	// Override the header
	option.WithHeader("X-Some-Header", "some_other_custom_header_info"),
	// Add an undocumented field to the request body, using sjson syntax
	option.WithJSONSet("some.json.path", map[string]string{"my": "object"}),
)

See the full list of request options.

Pagination

This library provides some conveniences for working with paginated list endpoints.

You can use .ListAutoPaging() methods to iterate through items across all pages:

iter := client.FineTuning.Jobs.ListAutoPaging(context.TODO(), openai.FineTuningJobListParams{
	Limit: openai.F(int64(20)),
})
// Automatically fetches more pages as needed.
for iter.Next() {
	fineTuningJob := iter.Current()
	fmt.Printf("%+v\n", fineTuningJob)
}
if err := iter.Err(); err != nil {
	panic(err.Error())
}

Or you can use simple .List() methods to fetch a single page and receive a standard response object with additional helper methods like .GetNextPage(), e.g.:

page, err := client.FineTuning.Jobs.List(context.TODO(), openai.FineTuningJobListParams{
	Limit: openai.F(int64(20)),
})
for page != nil {
	for _, job := range page.Data {
		fmt.Printf("%+v\n", job)
	}
	page, err = page.GetNextPage()
}
if err != nil {
	panic(err.Error())
}
Errors

When the API returns a non-success status code, we return an error with type *openai.Error. This contains the StatusCode, *http.Request, and *http.Response values of the request, as well as the JSON of the error body (much like other response objects in the SDK).

To handle errors, we recommend that you use the errors.As pattern:

_, err := client.FineTuning.Jobs.New(context.TODO(), openai.FineTuningJobNewParams{
	Model:        openai.F(openai.FineTuningJobNewParamsModelBabbage002),
	TrainingFile: openai.F("file-abc123"),
})
if err != nil {
	var apierr *openai.Error
	if errors.As(err, &apierr) {
		println(string(apierr.DumpRequest(true)))  // Prints the serialized HTTP request
		println(string(apierr.DumpResponse(true))) // Prints the serialized HTTP response
	}
	panic(err.Error()) // GET "/fine_tuning/jobs": 400 Bad Request { ... }
}

When other errors occur, they are returned unwrapped; for example, if HTTP transport fails, you might receive *url.Error wrapping *net.OpError.

Timeouts

Requests do not time out by default; use context to configure a timeout for a request lifecycle.

Note that if a request is retried, the context timeout does not start over. To set a per-retry timeout, use option.WithRequestTimeout().

// This sets the timeout for the request, including all the retries.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
client.Chat.Completions.New(
	ctx,
	openai.ChatCompletionNewParams{
		Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
			 openai.UserMessage("Say this is a test"),
		}),
		Model: openai.F(openai.ChatModelGPT4o),
	},
	// This sets the per-retry timeout
	option.WithRequestTimeout(20*time.Second),
)
File uploads

Request parameters that correspond to file uploads in multipart requests are typed as param.Field[io.Reader]. The contents of the io.Reader will by default be sent as a multipart form part with the file name of "anonymous_file" and content-type of "application/octet-stream".

The file name and content-type can be customized by implementing Name() string or ContentType() string on the run-time type of io.Reader. Note that os.File implements Name() string, so a file returned by os.Open will be sent with the file name on disk.

We also provide a helper openai.FileParam(reader io.Reader, filename string, contentType string) which can be used to wrap any io.Reader with the appropriate file name and content type.

// A file from the file system
file, err := os.Open("input.jsonl")
openai.FileNewParams{
	File:    openai.F[io.Reader](file),
	Purpose: openai.F(openai.FilePurposeFineTune),
}

// A file from a string
openai.FileNewParams{
	File:    openai.F[io.Reader](strings.NewReader("my file contents")),
	Purpose: openai.F(openai.FilePurposeFineTune),
}

// With a custom filename and contentType
openai.FileNewParams{
	File:    openai.FileParam(strings.NewReader(`{"hello": "foo"}`), "file.go", "application/json"),
	Purpose: openai.F(openai.FilePurposeFineTune),
}
Retries

Certain errors will be automatically retried 2 times by default, with a short exponential backoff. We retry by default all connection errors, 408 Request Timeout, 409 Conflict, 429 Rate Limit, and >=500 Internal errors.

You can use the WithMaxRetries option to configure or disable this:

// Configure the default for all requests:
client := openai.NewClient(
	option.WithMaxRetries(0), // default is 2
)

// Override per-request:
client.Chat.Completions.New(
	context.TODO(),
	openai.ChatCompletionNewParams{
		Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
			 openai.UserMessage("Say this is a test"),
		}),
		Model: openai.F(openai.ChatModelGPT4o),
	},
	option.WithMaxRetries(5),
)
Making custom/undocumented requests

This library is typed for convenient access to the documented API. If you need to access undocumented endpoints, params, or response properties, the library can still be used.

Undocumented endpoints

To make requests to undocumented endpoints, you can use client.Get, client.Post, and other HTTP verbs. RequestOptions on the client, such as retries, will be respected when making these requests.

var (
    // params can be an io.Reader, a []byte, an encoding/json serializable object,
    // or a "…Params" struct defined in this library.
    params map[string]interface{}

    // result can be an []byte, *http.Response, a encoding/json deserializable object,
    // or a model defined in this library.
    result *http.Response
)
err := client.Post(context.Background(), "/unspecified", params, &result)
if err != nil {
    …
}
Undocumented request params

To make requests using undocumented parameters, you may use either the option.WithQuerySet() or the option.WithJSONSet() methods.

params := FooNewParams{
    ID:   openai.F("id_xxxx"),
    Data: openai.F(FooNewParamsData{
        FirstName: openai.F("John"),
    }),
}
client.Foo.New(context.Background(), params, option.WithJSONSet("data.last_name", "Doe"))
Undocumented response properties

To access undocumented response properties, you may either access the raw JSON of the response as a string with result.JSON.RawJSON(), or get the raw JSON of a particular field on the result with result.JSON.Foo.Raw().

Any fields that are not present on the response struct will be saved and can be accessed by result.JSON.ExtraFields() which returns the extra fields as a map[string]Field.

Middleware

We provide option.WithMiddleware which applies the given middleware to requests.

func Logger(req *http.Request, next option.MiddlewareNext) (res *http.Response, err error) {
	// Before the request
	start := time.Now()
	LogReq(req)

	// Forward the request to the next handler
	res, err = next(req)

	// Handle stuff after the request
	end := time.Now()
	LogRes(res, err, start - end)

    return res, err
}

client := openai.NewClient(
	option.WithMiddleware(Logger),
)

When multiple middlewares are provided as variadic arguments, the middlewares are applied left to right. If option.WithMiddleware is given multiple times, for example first in the client then the method, the middleware in the client will run first and the middleware given in the method will run next.

You may also replace the default http.Client with option.WithHTTPClient(client). Only one http client is accepted (this overwrites any previous client) and receives requests after any middleware has been applied.

Microsoft Azure OpenAI

To use this library with Azure OpenAI, use the option.RequestOption functions in the azure package.

package main

import (
	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
	"github.com/LeverageSales/openai-go"
	"github.com/LeverageSales/openai-go/azure"
)

func main() {
	const azureOpenAIEndpoint = "https://<azure-openai-resource>.openai.azure.com"

	// The latest API versions, including previews, can be found here:
	// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
	const azureOpenAIAPIVersion = "2024-06-01"

	tokenCredential, err := azidentity.NewDefaultAzureCredential(nil)

	if err != nil {
		fmt.Printf("Failed to create the DefaultAzureCredential: %s", err)
		os.Exit(1)
	}

	client := openai.NewClient(
		azure.WithEndpoint(azureOpenAIEndpoint, azureOpenAIAPIVersion),

		// Choose between authenticating using a TokenCredential or an API Key
		azure.WithTokenCredential(tokenCredential),
		// or azure.WithAPIKey(azureOpenAIAPIKey),
	)
}

Semantic versioning

This package generally follows SemVer conventions, though certain backwards-incompatible changes may be released as minor versions:

  1. Changes to library internals which are technically public but not intended or documented for external use. (Please open a GitHub issue to let us know if you are relying on such internals).
  2. Changes that we do not expect to impact the vast majority of users in practice.

We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.

We are keen for your feedback; please open an issue with questions, bugs, or suggestions.

Documentation

Index

Constants

View Source
const ResponseFormatJSONObjectTypeJSONObject = shared.ResponseFormatJSONObjectTypeJSONObject

This is an alias to an internal value.

View Source
const ResponseFormatJSONSchemaTypeJSONSchema = shared.ResponseFormatJSONSchemaTypeJSONSchema

This is an alias to an internal value.

View Source
const ResponseFormatTextTypeText = shared.ResponseFormatTextTypeText

This is an alias to an internal value.

Variables

This section is empty.

Functions

func Bool

func Bool(value bool) param.Field[bool]

Bool is a param field helper which helps specify bools.

func F

func F[T any](value T) param.Field[T]

F is a param field helper used to initialize a param.Field generic struct. This helps specify null, zero values, and overrides, as well as normal values. You can read more about this in our README.

func FileParam

func FileParam(reader io.Reader, filename string, contentType string) param.Field[io.Reader]

FileParam is a param field helper which helps files with a mime content-type.

func Float

func Float(value float64) param.Field[float64]

Float is a param field helper which helps specify floats.

func Int

func Int(value int64) param.Field[int64]

Int is a param field helper which helps specify integers. This is particularly helpful when specifying integer constants for fields.

func Null

func Null[T any]() param.Field[T]

Null is a param field helper which explicitly sends null to the API.

func Raw

func Raw[T any](value any) param.Field[T]

Raw is a param field helper for specifying values for fields when the type you are looking to send is different from the type that is specified in the SDK. For example, if the type of the field is an integer, but you want to send a float, you could do that by setting the corresponding field with Raw[int](0.5).

func String

func String(value string) param.Field[string]

String is a param field helper which helps specify strings.

Types

type Annotation

type Annotation struct {
	// Always `file_citation`.
	Type AnnotationType `json:"type,required"`
	// The text in the message content that needs to be replaced.
	Text string `json:"text,required"`
	// This field can have the runtime type of [FileCitationAnnotationFileCitation].
	FileCitation interface{} `json:"file_citation,required"`
	StartIndex   int64       `json:"start_index,required"`
	EndIndex     int64       `json:"end_index,required"`
	// This field can have the runtime type of [FilePathAnnotationFilePath].
	FilePath interface{}    `json:"file_path,required"`
	JSON     annotationJSON `json:"-"`
	// contains filtered or unexported fields
}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

func (Annotation) AsUnion

func (r Annotation) AsUnion() AnnotationUnion

AsUnion returns a AnnotationUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are FileCitationAnnotation, FilePathAnnotation.

func (*Annotation) UnmarshalJSON

func (r *Annotation) UnmarshalJSON(data []byte) (err error)

type AnnotationDelta

type AnnotationDelta struct {
	// The index of the annotation in the text content part.
	Index int64 `json:"index,required"`
	// Always `file_citation`.
	Type AnnotationDeltaType `json:"type,required"`
	// The text in the message content that needs to be replaced.
	Text string `json:"text"`
	// This field can have the runtime type of
	// [FileCitationDeltaAnnotationFileCitation].
	FileCitation interface{} `json:"file_citation,required"`
	StartIndex   int64       `json:"start_index"`
	EndIndex     int64       `json:"end_index"`
	// This field can have the runtime type of [FilePathDeltaAnnotationFilePath].
	FilePath interface{}         `json:"file_path,required"`
	JSON     annotationDeltaJSON `json:"-"`
	// contains filtered or unexported fields
}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

func (AnnotationDelta) AsUnion

AsUnion returns a AnnotationDeltaUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are FileCitationDeltaAnnotation, FilePathDeltaAnnotation.

func (*AnnotationDelta) UnmarshalJSON

func (r *AnnotationDelta) UnmarshalJSON(data []byte) (err error)

type AnnotationDeltaType

type AnnotationDeltaType string

Always `file_citation`.

const (
	AnnotationDeltaTypeFileCitation AnnotationDeltaType = "file_citation"
	AnnotationDeltaTypeFilePath     AnnotationDeltaType = "file_path"
)

func (AnnotationDeltaType) IsKnown

func (r AnnotationDeltaType) IsKnown() bool

type AnnotationDeltaUnion

type AnnotationDeltaUnion interface {
	// contains filtered or unexported methods
}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

Union satisfied by FileCitationDeltaAnnotation or FilePathDeltaAnnotation.

type AnnotationType

type AnnotationType string

Always `file_citation`.

const (
	AnnotationTypeFileCitation AnnotationType = "file_citation"
	AnnotationTypeFilePath     AnnotationType = "file_path"
)

func (AnnotationType) IsKnown

func (r AnnotationType) IsKnown() bool

type AnnotationUnion

type AnnotationUnion interface {
	// contains filtered or unexported methods
}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

Union satisfied by FileCitationAnnotation or FilePathAnnotation.

type Assistant

type Assistant struct {
	// The identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the assistant was created.
	CreatedAt int64 `json:"created_at,required"`
	// The description of the assistant. The maximum length is 512 characters.
	Description string `json:"description,required,nullable"`
	// The system instructions that the assistant uses. The maximum length is 256,000
	// characters.
	Instructions string `json:"instructions,required,nullable"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata interface{} `json:"metadata,required,nullable"`
	// ID of the model to use. You can use the
	// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
	// see all of your available models, or see our
	// [Model overview](https://platform.openai.com/docs/models/overview) for
	// descriptions of them.
	Model string `json:"model,required"`
	// The name of the assistant. The maximum length is 256 characters.
	Name string `json:"name,required,nullable"`
	// The object type, which is always `assistant`.
	Object AssistantObject `json:"object,required"`
	// A list of tool enabled on the assistant. There can be a maximum of 128 tools per
	// assistant. Tools can be of types `code_interpreter`, `file_search`, or
	// `function`.
	Tools []AssistantTool `json:"tools,required"`
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
	// make the output more random, while lower values like 0.2 will make it more
	// focused and deterministic.
	Temperature float64 `json:"temperature,nullable"`
	// A set of resources that are used by the assistant's tools. The resources are
	// specific to the type of tool. For example, the `code_interpreter` tool requires
	// a list of file IDs, while the `file_search` tool requires a list of vector store
	// IDs.
	ToolResources AssistantToolResources `json:"tool_resources,nullable"`
	// An alternative to sampling with temperature, called nucleus sampling, where the
	// model considers the results of the tokens with top_p probability mass. So 0.1
	// means only the tokens comprising the top 10% probability mass are considered.
	//
	// We generally recommend altering this or temperature but not both.
	TopP float64       `json:"top_p,nullable"`
	JSON assistantJSON `json:"-"`
}

Represents an `assistant` that can call the model and use tools.

func (*Assistant) UnmarshalJSON

func (r *Assistant) UnmarshalJSON(data []byte) (err error)

type AssistantDeleted

type AssistantDeleted struct {
	ID      string                 `json:"id,required"`
	Deleted bool                   `json:"deleted,required"`
	Object  AssistantDeletedObject `json:"object,required"`
	JSON    assistantDeletedJSON   `json:"-"`
}

func (*AssistantDeleted) UnmarshalJSON

func (r *AssistantDeleted) UnmarshalJSON(data []byte) (err error)

type AssistantDeletedObject

type AssistantDeletedObject string
const (
	AssistantDeletedObjectAssistantDeleted AssistantDeletedObject = "assistant.deleted"
)

func (AssistantDeletedObject) IsKnown

func (r AssistantDeletedObject) IsKnown() bool

type AssistantObject

type AssistantObject string

The object type, which is always `assistant`.

const (
	AssistantObjectAssistant AssistantObject = "assistant"
)

func (AssistantObject) IsKnown

func (r AssistantObject) IsKnown() bool

type AssistantStreamEvent

type AssistantStreamEvent struct {
	Event AssistantStreamEventEvent `json:"event,required"`
	// This field can have the runtime type of [Thread], [Run], [RunStep],
	// [RunStepDeltaEvent], [Message], [MessageDeltaEvent], [shared.ErrorObject].
	Data interface{}              `json:"data"`
	JSON assistantStreamEventJSON `json:"-"`
	// contains filtered or unexported fields
}

Represents an event emitted when streaming a Run.

Each event in a server-sent events stream has an `event` and `data` property:

``` event: thread.created data: {"id": "thread_123", "object": "thread", ...} ```

We emit events whenever a new object is created, transitions to a new state, or is being streamed in parts (deltas). For example, we emit `thread.run.created` when a new run is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses to create a message during a run, we emit a `thread.message.created event`, a `thread.message.in_progress` event, many `thread.message.delta` events, and finally a `thread.message.completed` event.

We may add additional events over time, so we recommend handling unknown events gracefully in your code. See the [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) to learn how to integrate the Assistants API with streaming.

func (*AssistantStreamEvent) UnmarshalJSON

func (r *AssistantStreamEvent) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventErrorEvent

type AssistantStreamEventErrorEvent struct {
	Data  shared.ErrorObject                  `json:"data,required"`
	Event AssistantStreamEventErrorEventEvent `json:"event,required"`
	JSON  assistantStreamEventErrorEventJSON  `json:"-"`
}

Occurs when an [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout.

func (*AssistantStreamEventErrorEvent) UnmarshalJSON

func (r *AssistantStreamEventErrorEvent) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventErrorEventEvent

type AssistantStreamEventErrorEventEvent string
const (
	AssistantStreamEventErrorEventEventError AssistantStreamEventErrorEventEvent = "error"
)

func (AssistantStreamEventErrorEventEvent) IsKnown

type AssistantStreamEventEvent

type AssistantStreamEventEvent string
const (
	AssistantStreamEventEventThreadCreated           AssistantStreamEventEvent = "thread.created"
	AssistantStreamEventEventThreadRunCreated        AssistantStreamEventEvent = "thread.run.created"
	AssistantStreamEventEventThreadRunQueued         AssistantStreamEventEvent = "thread.run.queued"
	AssistantStreamEventEventThreadRunInProgress     AssistantStreamEventEvent = "thread.run.in_progress"
	AssistantStreamEventEventThreadRunRequiresAction AssistantStreamEventEvent = "thread.run.requires_action"
	AssistantStreamEventEventThreadRunCompleted      AssistantStreamEventEvent = "thread.run.completed"
	AssistantStreamEventEventThreadRunIncomplete     AssistantStreamEventEvent = "thread.run.incomplete"
	AssistantStreamEventEventThreadRunFailed         AssistantStreamEventEvent = "thread.run.failed"
	AssistantStreamEventEventThreadRunCancelling     AssistantStreamEventEvent = "thread.run.cancelling"
	AssistantStreamEventEventThreadRunCancelled      AssistantStreamEventEvent = "thread.run.cancelled"
	AssistantStreamEventEventThreadRunExpired        AssistantStreamEventEvent = "thread.run.expired"
	AssistantStreamEventEventThreadRunStepCreated    AssistantStreamEventEvent = "thread.run.step.created"
	AssistantStreamEventEventThreadRunStepInProgress AssistantStreamEventEvent = "thread.run.step.in_progress"
	AssistantStreamEventEventThreadRunStepDelta      AssistantStreamEventEvent = "thread.run.step.delta"
	AssistantStreamEventEventThreadRunStepCompleted  AssistantStreamEventEvent = "thread.run.step.completed"
	AssistantStreamEventEventThreadRunStepFailed     AssistantStreamEventEvent = "thread.run.step.failed"
	AssistantStreamEventEventThreadRunStepCancelled  AssistantStreamEventEvent = "thread.run.step.cancelled"
	AssistantStreamEventEventThreadRunStepExpired    AssistantStreamEventEvent = "thread.run.step.expired"
	AssistantStreamEventEventThreadMessageCreated    AssistantStreamEventEvent = "thread.message.created"
	AssistantStreamEventEventThreadMessageInProgress AssistantStreamEventEvent = "thread.message.in_progress"
	AssistantStreamEventEventThreadMessageDelta      AssistantStreamEventEvent = "thread.message.delta"
	AssistantStreamEventEventThreadMessageCompleted  AssistantStreamEventEvent = "thread.message.completed"
	AssistantStreamEventEventThreadMessageIncomplete AssistantStreamEventEvent = "thread.message.incomplete"
	AssistantStreamEventEventError                   AssistantStreamEventEvent = "error"
)

func (AssistantStreamEventEvent) IsKnown

func (r AssistantStreamEventEvent) IsKnown() bool

type AssistantStreamEventThreadCreated

type AssistantStreamEventThreadCreated struct {
	// Represents a thread that contains
	// [messages](https://platform.openai.com/docs/api-reference/messages).
	Data  Thread                                 `json:"data,required"`
	Event AssistantStreamEventThreadCreatedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadCreatedJSON  `json:"-"`
}

Occurs when a new [thread](https://platform.openai.com/docs/api-reference/threads/object) is created.

func (*AssistantStreamEventThreadCreated) UnmarshalJSON

func (r *AssistantStreamEventThreadCreated) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadCreatedEvent

type AssistantStreamEventThreadCreatedEvent string
const (
	AssistantStreamEventThreadCreatedEventThreadCreated AssistantStreamEventThreadCreatedEvent = "thread.created"
)

func (AssistantStreamEventThreadCreatedEvent) IsKnown

type AssistantStreamEventThreadMessageCompleted

type AssistantStreamEventThreadMessageCompleted struct {
	// Represents a message within a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Message                                         `json:"data,required"`
	Event AssistantStreamEventThreadMessageCompletedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadMessageCompletedJSON  `json:"-"`
}

Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is completed.

func (*AssistantStreamEventThreadMessageCompleted) UnmarshalJSON

func (r *AssistantStreamEventThreadMessageCompleted) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadMessageCompletedEvent

type AssistantStreamEventThreadMessageCompletedEvent string
const (
	AssistantStreamEventThreadMessageCompletedEventThreadMessageCompleted AssistantStreamEventThreadMessageCompletedEvent = "thread.message.completed"
)

func (AssistantStreamEventThreadMessageCompletedEvent) IsKnown

type AssistantStreamEventThreadMessageCreated

type AssistantStreamEventThreadMessageCreated struct {
	// Represents a message within a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Message                                       `json:"data,required"`
	Event AssistantStreamEventThreadMessageCreatedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadMessageCreatedJSON  `json:"-"`
}

Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is created.

func (*AssistantStreamEventThreadMessageCreated) UnmarshalJSON

func (r *AssistantStreamEventThreadMessageCreated) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadMessageCreatedEvent

type AssistantStreamEventThreadMessageCreatedEvent string
const (
	AssistantStreamEventThreadMessageCreatedEventThreadMessageCreated AssistantStreamEventThreadMessageCreatedEvent = "thread.message.created"
)

func (AssistantStreamEventThreadMessageCreatedEvent) IsKnown

type AssistantStreamEventThreadMessageDelta

type AssistantStreamEventThreadMessageDelta struct {
	// Represents a message delta i.e. any changed fields on a message during
	// streaming.
	Data  MessageDeltaEvent                           `json:"data,required"`
	Event AssistantStreamEventThreadMessageDeltaEvent `json:"event,required"`
	JSON  assistantStreamEventThreadMessageDeltaJSON  `json:"-"`
}

Occurs when parts of a Message(https://platform.openai.com/docs/api-reference/messages/object) are being streamed.

func (*AssistantStreamEventThreadMessageDelta) UnmarshalJSON

func (r *AssistantStreamEventThreadMessageDelta) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadMessageDeltaEvent

type AssistantStreamEventThreadMessageDeltaEvent string
const (
	AssistantStreamEventThreadMessageDeltaEventThreadMessageDelta AssistantStreamEventThreadMessageDeltaEvent = "thread.message.delta"
)

func (AssistantStreamEventThreadMessageDeltaEvent) IsKnown

type AssistantStreamEventThreadMessageInProgress

type AssistantStreamEventThreadMessageInProgress struct {
	// Represents a message within a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Message                                          `json:"data,required"`
	Event AssistantStreamEventThreadMessageInProgressEvent `json:"event,required"`
	JSON  assistantStreamEventThreadMessageInProgressJSON  `json:"-"`
}

Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves to an `in_progress` state.

func (*AssistantStreamEventThreadMessageInProgress) UnmarshalJSON

func (r *AssistantStreamEventThreadMessageInProgress) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadMessageInProgressEvent

type AssistantStreamEventThreadMessageInProgressEvent string
const (
	AssistantStreamEventThreadMessageInProgressEventThreadMessageInProgress AssistantStreamEventThreadMessageInProgressEvent = "thread.message.in_progress"
)

func (AssistantStreamEventThreadMessageInProgressEvent) IsKnown

type AssistantStreamEventThreadMessageIncomplete

type AssistantStreamEventThreadMessageIncomplete struct {
	// Represents a message within a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Message                                          `json:"data,required"`
	Event AssistantStreamEventThreadMessageIncompleteEvent `json:"event,required"`
	JSON  assistantStreamEventThreadMessageIncompleteJSON  `json:"-"`
}

Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends before it is completed.

func (*AssistantStreamEventThreadMessageIncomplete) UnmarshalJSON

func (r *AssistantStreamEventThreadMessageIncomplete) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadMessageIncompleteEvent

type AssistantStreamEventThreadMessageIncompleteEvent string
const (
	AssistantStreamEventThreadMessageIncompleteEventThreadMessageIncomplete AssistantStreamEventThreadMessageIncompleteEvent = "thread.message.incomplete"
)

func (AssistantStreamEventThreadMessageIncompleteEvent) IsKnown

type AssistantStreamEventThreadRunCancelled

type AssistantStreamEventThreadRunCancelled struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                         `json:"data,required"`
	Event AssistantStreamEventThreadRunCancelledEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunCancelledJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled.

func (*AssistantStreamEventThreadRunCancelled) UnmarshalJSON

func (r *AssistantStreamEventThreadRunCancelled) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunCancelledEvent

type AssistantStreamEventThreadRunCancelledEvent string
const (
	AssistantStreamEventThreadRunCancelledEventThreadRunCancelled AssistantStreamEventThreadRunCancelledEvent = "thread.run.cancelled"
)

func (AssistantStreamEventThreadRunCancelledEvent) IsKnown

type AssistantStreamEventThreadRunCancelling

type AssistantStreamEventThreadRunCancelling struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                          `json:"data,required"`
	Event AssistantStreamEventThreadRunCancellingEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunCancellingJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `cancelling` status.

func (*AssistantStreamEventThreadRunCancelling) UnmarshalJSON

func (r *AssistantStreamEventThreadRunCancelling) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunCancellingEvent

type AssistantStreamEventThreadRunCancellingEvent string
const (
	AssistantStreamEventThreadRunCancellingEventThreadRunCancelling AssistantStreamEventThreadRunCancellingEvent = "thread.run.cancelling"
)

func (AssistantStreamEventThreadRunCancellingEvent) IsKnown

type AssistantStreamEventThreadRunCompleted

type AssistantStreamEventThreadRunCompleted struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                         `json:"data,required"`
	Event AssistantStreamEventThreadRunCompletedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunCompletedJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed.

func (*AssistantStreamEventThreadRunCompleted) UnmarshalJSON

func (r *AssistantStreamEventThreadRunCompleted) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunCompletedEvent

type AssistantStreamEventThreadRunCompletedEvent string
const (
	AssistantStreamEventThreadRunCompletedEventThreadRunCompleted AssistantStreamEventThreadRunCompletedEvent = "thread.run.completed"
)

func (AssistantStreamEventThreadRunCompletedEvent) IsKnown

type AssistantStreamEventThreadRunCreated

type AssistantStreamEventThreadRunCreated struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                       `json:"data,required"`
	Event AssistantStreamEventThreadRunCreatedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunCreatedJSON  `json:"-"`
}

Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created.

func (*AssistantStreamEventThreadRunCreated) UnmarshalJSON

func (r *AssistantStreamEventThreadRunCreated) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunCreatedEvent

type AssistantStreamEventThreadRunCreatedEvent string
const (
	AssistantStreamEventThreadRunCreatedEventThreadRunCreated AssistantStreamEventThreadRunCreatedEvent = "thread.run.created"
)

func (AssistantStreamEventThreadRunCreatedEvent) IsKnown

type AssistantStreamEventThreadRunExpired

type AssistantStreamEventThreadRunExpired struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                       `json:"data,required"`
	Event AssistantStreamEventThreadRunExpiredEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunExpiredJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires.

func (*AssistantStreamEventThreadRunExpired) UnmarshalJSON

func (r *AssistantStreamEventThreadRunExpired) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunExpiredEvent

type AssistantStreamEventThreadRunExpiredEvent string
const (
	AssistantStreamEventThreadRunExpiredEventThreadRunExpired AssistantStreamEventThreadRunExpiredEvent = "thread.run.expired"
)

func (AssistantStreamEventThreadRunExpiredEvent) IsKnown

type AssistantStreamEventThreadRunFailed

type AssistantStreamEventThreadRunFailed struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                      `json:"data,required"`
	Event AssistantStreamEventThreadRunFailedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunFailedJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails.

func (*AssistantStreamEventThreadRunFailed) UnmarshalJSON

func (r *AssistantStreamEventThreadRunFailed) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunFailedEvent

type AssistantStreamEventThreadRunFailedEvent string
const (
	AssistantStreamEventThreadRunFailedEventThreadRunFailed AssistantStreamEventThreadRunFailedEvent = "thread.run.failed"
)

func (AssistantStreamEventThreadRunFailedEvent) IsKnown

type AssistantStreamEventThreadRunInProgress

type AssistantStreamEventThreadRunInProgress struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                          `json:"data,required"`
	Event AssistantStreamEventThreadRunInProgressEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunInProgressJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an `in_progress` status.

func (*AssistantStreamEventThreadRunInProgress) UnmarshalJSON

func (r *AssistantStreamEventThreadRunInProgress) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunInProgressEvent

type AssistantStreamEventThreadRunInProgressEvent string
const (
	AssistantStreamEventThreadRunInProgressEventThreadRunInProgress AssistantStreamEventThreadRunInProgressEvent = "thread.run.in_progress"
)

func (AssistantStreamEventThreadRunInProgressEvent) IsKnown

type AssistantStreamEventThreadRunIncomplete

type AssistantStreamEventThreadRunIncomplete struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                          `json:"data,required"`
	Event AssistantStreamEventThreadRunIncompleteEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunIncompleteJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with status `incomplete`.

func (*AssistantStreamEventThreadRunIncomplete) UnmarshalJSON

func (r *AssistantStreamEventThreadRunIncomplete) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunIncompleteEvent

type AssistantStreamEventThreadRunIncompleteEvent string
const (
	AssistantStreamEventThreadRunIncompleteEventThreadRunIncomplete AssistantStreamEventThreadRunIncompleteEvent = "thread.run.incomplete"
)

func (AssistantStreamEventThreadRunIncompleteEvent) IsKnown

type AssistantStreamEventThreadRunQueued

type AssistantStreamEventThreadRunQueued struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                      `json:"data,required"`
	Event AssistantStreamEventThreadRunQueuedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunQueuedJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `queued` status.

func (*AssistantStreamEventThreadRunQueued) UnmarshalJSON

func (r *AssistantStreamEventThreadRunQueued) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunQueuedEvent

type AssistantStreamEventThreadRunQueuedEvent string
const (
	AssistantStreamEventThreadRunQueuedEventThreadRunQueued AssistantStreamEventThreadRunQueuedEvent = "thread.run.queued"
)

func (AssistantStreamEventThreadRunQueuedEvent) IsKnown

type AssistantStreamEventThreadRunRequiresAction

type AssistantStreamEventThreadRunRequiresAction struct {
	// Represents an execution run on a
	// [thread](https://platform.openai.com/docs/api-reference/threads).
	Data  Run                                              `json:"data,required"`
	Event AssistantStreamEventThreadRunRequiresActionEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunRequiresActionJSON  `json:"-"`
}

Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `requires_action` status.

func (*AssistantStreamEventThreadRunRequiresAction) UnmarshalJSON

func (r *AssistantStreamEventThreadRunRequiresAction) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunRequiresActionEvent

type AssistantStreamEventThreadRunRequiresActionEvent string
const (
	AssistantStreamEventThreadRunRequiresActionEventThreadRunRequiresAction AssistantStreamEventThreadRunRequiresActionEvent = "thread.run.requires_action"
)

func (AssistantStreamEventThreadRunRequiresActionEvent) IsKnown

type AssistantStreamEventThreadRunStepCancelled

type AssistantStreamEventThreadRunStepCancelled struct {
	// Represents a step in execution of a run.
	Data  RunStep                                         `json:"data,required"`
	Event AssistantStreamEventThreadRunStepCancelledEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunStepCancelledJSON  `json:"-"`
}

Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is cancelled.

func (*AssistantStreamEventThreadRunStepCancelled) UnmarshalJSON

func (r *AssistantStreamEventThreadRunStepCancelled) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunStepCancelledEvent

type AssistantStreamEventThreadRunStepCancelledEvent string
const (
	AssistantStreamEventThreadRunStepCancelledEventThreadRunStepCancelled AssistantStreamEventThreadRunStepCancelledEvent = "thread.run.step.cancelled"
)

func (AssistantStreamEventThreadRunStepCancelledEvent) IsKnown

type AssistantStreamEventThreadRunStepCompleted

type AssistantStreamEventThreadRunStepCompleted struct {
	// Represents a step in execution of a run.
	Data  RunStep                                         `json:"data,required"`
	Event AssistantStreamEventThreadRunStepCompletedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunStepCompletedJSON  `json:"-"`
}

Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is completed.

func (*AssistantStreamEventThreadRunStepCompleted) UnmarshalJSON

func (r *AssistantStreamEventThreadRunStepCompleted) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunStepCompletedEvent

type AssistantStreamEventThreadRunStepCompletedEvent string
const (
	AssistantStreamEventThreadRunStepCompletedEventThreadRunStepCompleted AssistantStreamEventThreadRunStepCompletedEvent = "thread.run.step.completed"
)

func (AssistantStreamEventThreadRunStepCompletedEvent) IsKnown

type AssistantStreamEventThreadRunStepCreated

type AssistantStreamEventThreadRunStepCreated struct {
	// Represents a step in execution of a run.
	Data  RunStep                                       `json:"data,required"`
	Event AssistantStreamEventThreadRunStepCreatedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunStepCreatedJSON  `json:"-"`
}

Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created.

func (*AssistantStreamEventThreadRunStepCreated) UnmarshalJSON

func (r *AssistantStreamEventThreadRunStepCreated) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunStepCreatedEvent

type AssistantStreamEventThreadRunStepCreatedEvent string
const (
	AssistantStreamEventThreadRunStepCreatedEventThreadRunStepCreated AssistantStreamEventThreadRunStepCreatedEvent = "thread.run.step.created"
)

func (AssistantStreamEventThreadRunStepCreatedEvent) IsKnown

type AssistantStreamEventThreadRunStepDelta

type AssistantStreamEventThreadRunStepDelta struct {
	// Represents a run step delta i.e. any changed fields on a run step during
	// streaming.
	Data  RunStepDeltaEvent                           `json:"data,required"`
	Event AssistantStreamEventThreadRunStepDeltaEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunStepDeltaJSON  `json:"-"`
}

Occurs when parts of a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being streamed.

func (*AssistantStreamEventThreadRunStepDelta) UnmarshalJSON

func (r *AssistantStreamEventThreadRunStepDelta) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunStepDeltaEvent

type AssistantStreamEventThreadRunStepDeltaEvent string
const (
	AssistantStreamEventThreadRunStepDeltaEventThreadRunStepDelta AssistantStreamEventThreadRunStepDeltaEvent = "thread.run.step.delta"
)

func (AssistantStreamEventThreadRunStepDeltaEvent) IsKnown

type AssistantStreamEventThreadRunStepExpired

type AssistantStreamEventThreadRunStepExpired struct {
	// Represents a step in execution of a run.
	Data  RunStep                                       `json:"data,required"`
	Event AssistantStreamEventThreadRunStepExpiredEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunStepExpiredJSON  `json:"-"`
}

Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires.

func (*AssistantStreamEventThreadRunStepExpired) UnmarshalJSON

func (r *AssistantStreamEventThreadRunStepExpired) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunStepExpiredEvent

type AssistantStreamEventThreadRunStepExpiredEvent string
const (
	AssistantStreamEventThreadRunStepExpiredEventThreadRunStepExpired AssistantStreamEventThreadRunStepExpiredEvent = "thread.run.step.expired"
)

func (AssistantStreamEventThreadRunStepExpiredEvent) IsKnown

type AssistantStreamEventThreadRunStepFailed

type AssistantStreamEventThreadRunStepFailed struct {
	// Represents a step in execution of a run.
	Data  RunStep                                      `json:"data,required"`
	Event AssistantStreamEventThreadRunStepFailedEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunStepFailedJSON  `json:"-"`
}

Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails.

func (*AssistantStreamEventThreadRunStepFailed) UnmarshalJSON

func (r *AssistantStreamEventThreadRunStepFailed) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunStepFailedEvent

type AssistantStreamEventThreadRunStepFailedEvent string
const (
	AssistantStreamEventThreadRunStepFailedEventThreadRunStepFailed AssistantStreamEventThreadRunStepFailedEvent = "thread.run.step.failed"
)

func (AssistantStreamEventThreadRunStepFailedEvent) IsKnown

type AssistantStreamEventThreadRunStepInProgress

type AssistantStreamEventThreadRunStepInProgress struct {
	// Represents a step in execution of a run.
	Data  RunStep                                          `json:"data,required"`
	Event AssistantStreamEventThreadRunStepInProgressEvent `json:"event,required"`
	JSON  assistantStreamEventThreadRunStepInProgressJSON  `json:"-"`
}

Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an `in_progress` state.

func (*AssistantStreamEventThreadRunStepInProgress) UnmarshalJSON

func (r *AssistantStreamEventThreadRunStepInProgress) UnmarshalJSON(data []byte) (err error)

type AssistantStreamEventThreadRunStepInProgressEvent

type AssistantStreamEventThreadRunStepInProgressEvent string
const (
	AssistantStreamEventThreadRunStepInProgressEventThreadRunStepInProgress AssistantStreamEventThreadRunStepInProgressEvent = "thread.run.step.in_progress"
)

func (AssistantStreamEventThreadRunStepInProgressEvent) IsKnown

type AssistantStreamEventUnion

type AssistantStreamEventUnion interface {
	// contains filtered or unexported methods
}

Represents an event emitted when streaming a Run.

Each event in a server-sent events stream has an `event` and `data` property:

``` event: thread.created data: {"id": "thread_123", "object": "thread", ...} ```

We emit events whenever a new object is created, transitions to a new state, or is being streamed in parts (deltas). For example, we emit `thread.run.created` when a new run is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses to create a message during a run, we emit a `thread.message.created event`, a `thread.message.in_progress` event, many `thread.message.delta` events, and finally a `thread.message.completed` event.

We may add additional events over time, so we recommend handling unknown events gracefully in your code. See the [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) to learn how to integrate the Assistants API with streaming.

Union satisfied by AssistantStreamEventThreadCreated, AssistantStreamEventThreadRunCreated, AssistantStreamEventThreadRunQueued, AssistantStreamEventThreadRunInProgress, AssistantStreamEventThreadRunRequiresAction, AssistantStreamEventThreadRunCompleted, AssistantStreamEventThreadRunIncomplete, AssistantStreamEventThreadRunFailed, AssistantStreamEventThreadRunCancelling, AssistantStreamEventThreadRunCancelled, AssistantStreamEventThreadRunExpired, AssistantStreamEventThreadRunStepCreated, AssistantStreamEventThreadRunStepInProgress, AssistantStreamEventThreadRunStepDelta, AssistantStreamEventThreadRunStepCompleted, AssistantStreamEventThreadRunStepFailed, AssistantStreamEventThreadRunStepCancelled, AssistantStreamEventThreadRunStepExpired, AssistantStreamEventThreadMessageCreated, AssistantStreamEventThreadMessageInProgress, AssistantStreamEventThreadMessageDelta, AssistantStreamEventThreadMessageCompleted, AssistantStreamEventThreadMessageIncomplete or AssistantStreamEventErrorEvent.

type AssistantTool

type AssistantTool struct {
	// The type of tool being defined: `code_interpreter`
	Type AssistantToolType `json:"type,required"`
	// This field can have the runtime type of [FileSearchToolFileSearch].
	FileSearch interface{}               `json:"file_search,required"`
	Function   shared.FunctionDefinition `json:"function"`
	JSON       assistantToolJSON         `json:"-"`
	// contains filtered or unexported fields
}

func (AssistantTool) AsUnion

func (r AssistantTool) AsUnion() AssistantToolUnion

AsUnion returns a AssistantToolUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are CodeInterpreterTool, FileSearchTool, FunctionTool.

func (*AssistantTool) UnmarshalJSON

func (r *AssistantTool) UnmarshalJSON(data []byte) (err error)

type AssistantToolChoice

type AssistantToolChoice struct {
	// The type of the tool. If type is `function`, the function name must be set
	Type     AssistantToolChoiceType     `json:"type,required"`
	Function AssistantToolChoiceFunction `json:"function"`
	JSON     assistantToolChoiceJSON     `json:"-"`
}

Specifies a tool the model should use. Use to force the model to call a specific tool.

func (*AssistantToolChoice) UnmarshalJSON

func (r *AssistantToolChoice) UnmarshalJSON(data []byte) (err error)

type AssistantToolChoiceFunction

type AssistantToolChoiceFunction struct {
	// The name of the function to call.
	Name string                          `json:"name,required"`
	JSON assistantToolChoiceFunctionJSON `json:"-"`
}

func (*AssistantToolChoiceFunction) UnmarshalJSON

func (r *AssistantToolChoiceFunction) UnmarshalJSON(data []byte) (err error)

type AssistantToolChoiceFunctionParam

type AssistantToolChoiceFunctionParam struct {
	// The name of the function to call.
	Name param.Field[string] `json:"name,required"`
}

func (AssistantToolChoiceFunctionParam) MarshalJSON

func (r AssistantToolChoiceFunctionParam) MarshalJSON() (data []byte, err error)

type AssistantToolChoiceOptionString

type AssistantToolChoiceOptionString string

`none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.

const (
	AssistantToolChoiceOptionStringNone     AssistantToolChoiceOptionString = "none"
	AssistantToolChoiceOptionStringAuto     AssistantToolChoiceOptionString = "auto"
	AssistantToolChoiceOptionStringRequired AssistantToolChoiceOptionString = "required"
)

func (AssistantToolChoiceOptionString) IsKnown

type AssistantToolChoiceOptionUnion

type AssistantToolChoiceOptionUnion interface {
	// contains filtered or unexported methods
}

Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value and means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.

Union satisfied by AssistantToolChoiceOptionString or AssistantToolChoice.

type AssistantToolChoiceOptionUnionParam

type AssistantToolChoiceOptionUnionParam interface {
	// contains filtered or unexported methods
}

Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value and means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.

Satisfied by AssistantToolChoiceOptionString, AssistantToolChoiceParam.

type AssistantToolChoiceParam

type AssistantToolChoiceParam struct {
	// The type of the tool. If type is `function`, the function name must be set
	Type     param.Field[AssistantToolChoiceType]          `json:"type,required"`
	Function param.Field[AssistantToolChoiceFunctionParam] `json:"function"`
}

Specifies a tool the model should use. Use to force the model to call a specific tool.

func (AssistantToolChoiceParam) MarshalJSON

func (r AssistantToolChoiceParam) MarshalJSON() (data []byte, err error)

type AssistantToolChoiceType

type AssistantToolChoiceType string

The type of the tool. If type is `function`, the function name must be set

const (
	AssistantToolChoiceTypeFunction        AssistantToolChoiceType = "function"
	AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
	AssistantToolChoiceTypeFileSearch      AssistantToolChoiceType = "file_search"
)

func (AssistantToolChoiceType) IsKnown

func (r AssistantToolChoiceType) IsKnown() bool

type AssistantToolParam

type AssistantToolParam struct {
	// The type of tool being defined: `code_interpreter`
	Type       param.Field[AssistantToolType]              `json:"type,required"`
	FileSearch param.Field[interface{}]                    `json:"file_search,required"`
	Function   param.Field[shared.FunctionDefinitionParam] `json:"function"`
}

func (AssistantToolParam) MarshalJSON

func (r AssistantToolParam) MarshalJSON() (data []byte, err error)

type AssistantToolResources

type AssistantToolResources struct {
	CodeInterpreter AssistantToolResourcesCodeInterpreter `json:"code_interpreter"`
	FileSearch      AssistantToolResourcesFileSearch      `json:"file_search"`
	JSON            assistantToolResourcesJSON            `json:"-"`
}

A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (*AssistantToolResources) UnmarshalJSON

func (r *AssistantToolResources) UnmarshalJSON(data []byte) (err error)

type AssistantToolResourcesCodeInterpreter

type AssistantToolResourcesCodeInterpreter struct {
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
	// available to the `code_interpreter“ tool. There can be a maximum of 20 files
	// associated with the tool.
	FileIDs []string                                  `json:"file_ids"`
	JSON    assistantToolResourcesCodeInterpreterJSON `json:"-"`
}

func (*AssistantToolResourcesCodeInterpreter) UnmarshalJSON

func (r *AssistantToolResourcesCodeInterpreter) UnmarshalJSON(data []byte) (err error)

type AssistantToolResourcesFileSearch

type AssistantToolResourcesFileSearch struct {
	// The ID of the
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this assistant. There can be a maximum of 1 vector store attached to
	// the assistant.
	VectorStoreIDs []string                             `json:"vector_store_ids"`
	JSON           assistantToolResourcesFileSearchJSON `json:"-"`
}

func (*AssistantToolResourcesFileSearch) UnmarshalJSON

func (r *AssistantToolResourcesFileSearch) UnmarshalJSON(data []byte) (err error)

type AssistantToolType

type AssistantToolType string

The type of tool being defined: `code_interpreter`

const (
	AssistantToolTypeCodeInterpreter AssistantToolType = "code_interpreter"
	AssistantToolTypeFileSearch      AssistantToolType = "file_search"
	AssistantToolTypeFunction        AssistantToolType = "function"
)

func (AssistantToolType) IsKnown

func (r AssistantToolType) IsKnown() bool

type AssistantToolUnion

type AssistantToolUnion interface {
	// contains filtered or unexported methods
}

Union satisfied by CodeInterpreterTool, FileSearchTool or FunctionTool.

type AssistantToolUnionParam

type AssistantToolUnionParam interface {
	// contains filtered or unexported methods
}

Satisfied by CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam, AssistantToolParam.

type AudioModel

type AudioModel = string
const (
	AudioModelWhisper1 AudioModel = "whisper-1"
)

type AudioService

type AudioService struct {
	Options        []option.RequestOption
	Transcriptions *AudioTranscriptionService
	Translations   *AudioTranslationService
	Speech         *AudioSpeechService
}

AudioService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewAudioService method instead.

func NewAudioService

func NewAudioService(opts ...option.RequestOption) (r *AudioService)

NewAudioService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

type AudioSpeechNewParams

type AudioSpeechNewParams struct {
	// The text to generate audio for. The maximum length is 4096 characters.
	Input param.Field[string] `json:"input,required"`
	// One of the available [TTS models](https://platform.openai.com/docs/models/tts):
	// `tts-1` or `tts-1-hd`
	Model param.Field[SpeechModel] `json:"model,required"`
	// The voice to use when generating the audio. Supported voices are `alloy`,
	// `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
	// available in the
	// [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
	Voice param.Field[AudioSpeechNewParamsVoice] `json:"voice,required"`
	// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
	// `wav`, and `pcm`.
	ResponseFormat param.Field[AudioSpeechNewParamsResponseFormat] `json:"response_format"`
	// The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
	// the default.
	Speed param.Field[float64] `json:"speed"`
}

func (AudioSpeechNewParams) MarshalJSON

func (r AudioSpeechNewParams) MarshalJSON() (data []byte, err error)

type AudioSpeechNewParamsResponseFormat

type AudioSpeechNewParamsResponseFormat string

The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.

const (
	AudioSpeechNewParamsResponseFormatMP3  AudioSpeechNewParamsResponseFormat = "mp3"
	AudioSpeechNewParamsResponseFormatOpus AudioSpeechNewParamsResponseFormat = "opus"
	AudioSpeechNewParamsResponseFormatAAC  AudioSpeechNewParamsResponseFormat = "aac"
	AudioSpeechNewParamsResponseFormatFLAC AudioSpeechNewParamsResponseFormat = "flac"
	AudioSpeechNewParamsResponseFormatWAV  AudioSpeechNewParamsResponseFormat = "wav"
	AudioSpeechNewParamsResponseFormatPCM  AudioSpeechNewParamsResponseFormat = "pcm"
)

func (AudioSpeechNewParamsResponseFormat) IsKnown

type AudioSpeechNewParamsVoice

type AudioSpeechNewParamsVoice string

The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).

const (
	AudioSpeechNewParamsVoiceAlloy   AudioSpeechNewParamsVoice = "alloy"
	AudioSpeechNewParamsVoiceEcho    AudioSpeechNewParamsVoice = "echo"
	AudioSpeechNewParamsVoiceFable   AudioSpeechNewParamsVoice = "fable"
	AudioSpeechNewParamsVoiceOnyx    AudioSpeechNewParamsVoice = "onyx"
	AudioSpeechNewParamsVoiceNova    AudioSpeechNewParamsVoice = "nova"
	AudioSpeechNewParamsVoiceShimmer AudioSpeechNewParamsVoice = "shimmer"
)

func (AudioSpeechNewParamsVoice) IsKnown

func (r AudioSpeechNewParamsVoice) IsKnown() bool

type AudioSpeechService

type AudioSpeechService struct {
	Options []option.RequestOption
}

AudioSpeechService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewAudioSpeechService method instead.

func NewAudioSpeechService

func NewAudioSpeechService(opts ...option.RequestOption) (r *AudioSpeechService)

NewAudioSpeechService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*AudioSpeechService) New

Generates audio from the input text.

type AudioTranscriptionNewParams

type AudioTranscriptionNewParams struct {
	// The audio file object (not file name) to transcribe, in one of these formats:
	// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
	File param.Field[io.Reader] `json:"file,required" format:"binary"`
	// ID of the model to use. Only `whisper-1` (which is powered by our open source
	// Whisper V2 model) is currently available.
	Model param.Field[AudioModel] `json:"model,required"`
	// The language of the input audio. Supplying the input language in
	// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
	// improve accuracy and latency.
	Language param.Field[string] `json:"language"`
	// An optional text to guide the model's style or continue a previous audio
	// segment. The
	// [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
	// should match the audio language.
	Prompt param.Field[string] `json:"prompt"`
	// The format of the transcript output, in one of these options: `json`, `text`,
	// `srt`, `verbose_json`, or `vtt`.
	ResponseFormat param.Field[AudioTranscriptionNewParamsResponseFormat] `json:"response_format"`
	// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
	// output more random, while lower values like 0.2 will make it more focused and
	// deterministic. If set to 0, the model will use
	// [log probability](https://en.wikipedia.org/wiki/Log_probability) to
	// automatically increase the temperature until certain thresholds are hit.
	Temperature param.Field[float64] `json:"temperature"`
	// The timestamp granularities to populate for this transcription.
	// `response_format` must be set `verbose_json` to use timestamp granularities.
	// Either or both of these options are supported: `word`, or `segment`. Note: There
	// is no additional latency for segment timestamps, but generating word timestamps
	// incurs additional latency.
	TimestampGranularities param.Field[[]AudioTranscriptionNewParamsTimestampGranularity] `json:"timestamp_granularities"`
}

func (AudioTranscriptionNewParams) MarshalMultipart

func (r AudioTranscriptionNewParams) MarshalMultipart() (data []byte, contentType string, err error)

type AudioTranscriptionNewParamsResponseFormat

type AudioTranscriptionNewParamsResponseFormat string

The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.

const (
	AudioTranscriptionNewParamsResponseFormatJSON        AudioTranscriptionNewParamsResponseFormat = "json"
	AudioTranscriptionNewParamsResponseFormatText        AudioTranscriptionNewParamsResponseFormat = "text"
	AudioTranscriptionNewParamsResponseFormatSRT         AudioTranscriptionNewParamsResponseFormat = "srt"
	AudioTranscriptionNewParamsResponseFormatVerboseJSON AudioTranscriptionNewParamsResponseFormat = "verbose_json"
	AudioTranscriptionNewParamsResponseFormatVTT         AudioTranscriptionNewParamsResponseFormat = "vtt"
)

func (AudioTranscriptionNewParamsResponseFormat) IsKnown

type AudioTranscriptionNewParamsTimestampGranularity

type AudioTranscriptionNewParamsTimestampGranularity string
const (
	AudioTranscriptionNewParamsTimestampGranularityWord    AudioTranscriptionNewParamsTimestampGranularity = "word"
	AudioTranscriptionNewParamsTimestampGranularitySegment AudioTranscriptionNewParamsTimestampGranularity = "segment"
)

func (AudioTranscriptionNewParamsTimestampGranularity) IsKnown

type AudioTranscriptionService

type AudioTranscriptionService struct {
	Options []option.RequestOption
}

AudioTranscriptionService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewAudioTranscriptionService method instead.

func NewAudioTranscriptionService

func NewAudioTranscriptionService(opts ...option.RequestOption) (r *AudioTranscriptionService)

NewAudioTranscriptionService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*AudioTranscriptionService) New

Transcribes audio into the input language.

type AudioTranslationNewParams

type AudioTranslationNewParams struct {
	// The audio file object (not file name) translate, in one of these formats: flac,
	// mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
	File param.Field[io.Reader] `json:"file,required" format:"binary"`
	// ID of the model to use. Only `whisper-1` (which is powered by our open source
	// Whisper V2 model) is currently available.
	Model param.Field[AudioModel] `json:"model,required"`
	// An optional text to guide the model's style or continue a previous audio
	// segment. The
	// [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
	// should be in English.
	Prompt param.Field[string] `json:"prompt"`
	// The format of the transcript output, in one of these options: `json`, `text`,
	// `srt`, `verbose_json`, or `vtt`.
	ResponseFormat param.Field[string] `json:"response_format"`
	// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
	// output more random, while lower values like 0.2 will make it more focused and
	// deterministic. If set to 0, the model will use
	// [log probability](https://en.wikipedia.org/wiki/Log_probability) to
	// automatically increase the temperature until certain thresholds are hit.
	Temperature param.Field[float64] `json:"temperature"`
}

func (AudioTranslationNewParams) MarshalMultipart

func (r AudioTranslationNewParams) MarshalMultipart() (data []byte, contentType string, err error)

type AudioTranslationService

type AudioTranslationService struct {
	Options []option.RequestOption
}

AudioTranslationService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewAudioTranslationService method instead.

func NewAudioTranslationService

func NewAudioTranslationService(opts ...option.RequestOption) (r *AudioTranslationService)

NewAudioTranslationService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*AudioTranslationService) New

Translates audio into English.

type AutoFileChunkingStrategyParam

type AutoFileChunkingStrategyParam struct {
	// Always `auto`.
	Type param.Field[AutoFileChunkingStrategyParamType] `json:"type,required"`
}

The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.

func (AutoFileChunkingStrategyParam) MarshalJSON

func (r AutoFileChunkingStrategyParam) MarshalJSON() (data []byte, err error)

type AutoFileChunkingStrategyParamType

type AutoFileChunkingStrategyParamType string

Always `auto`.

const (
	AutoFileChunkingStrategyParamTypeAuto AutoFileChunkingStrategyParamType = "auto"
)

func (AutoFileChunkingStrategyParamType) IsKnown

type Batch

type Batch struct {
	ID string `json:"id,required"`
	// The time frame within which the batch should be processed.
	CompletionWindow string `json:"completion_window,required"`
	// The Unix timestamp (in seconds) for when the batch was created.
	CreatedAt int64 `json:"created_at,required"`
	// The OpenAI API endpoint used by the batch.
	Endpoint string `json:"endpoint,required"`
	// The ID of the input file for the batch.
	InputFileID string `json:"input_file_id,required"`
	// The object type, which is always `batch`.
	Object BatchObject `json:"object,required"`
	// The current status of the batch.
	Status BatchStatus `json:"status,required"`
	// The Unix timestamp (in seconds) for when the batch was cancelled.
	CancelledAt int64 `json:"cancelled_at"`
	// The Unix timestamp (in seconds) for when the batch started cancelling.
	CancellingAt int64 `json:"cancelling_at"`
	// The Unix timestamp (in seconds) for when the batch was completed.
	CompletedAt int64 `json:"completed_at"`
	// The ID of the file containing the outputs of requests with errors.
	ErrorFileID string      `json:"error_file_id"`
	Errors      BatchErrors `json:"errors"`
	// The Unix timestamp (in seconds) for when the batch expired.
	ExpiredAt int64 `json:"expired_at"`
	// The Unix timestamp (in seconds) for when the batch will expire.
	ExpiresAt int64 `json:"expires_at"`
	// The Unix timestamp (in seconds) for when the batch failed.
	FailedAt int64 `json:"failed_at"`
	// The Unix timestamp (in seconds) for when the batch started finalizing.
	FinalizingAt int64 `json:"finalizing_at"`
	// The Unix timestamp (in seconds) for when the batch started processing.
	InProgressAt int64 `json:"in_progress_at"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata interface{} `json:"metadata,nullable"`
	// The ID of the file containing the outputs of successfully executed requests.
	OutputFileID string `json:"output_file_id"`
	// The request counts for different statuses within the batch.
	RequestCounts BatchRequestCounts `json:"request_counts"`
	JSON          batchJSON          `json:"-"`
}

func (*Batch) UnmarshalJSON

func (r *Batch) UnmarshalJSON(data []byte) (err error)

type BatchError

type BatchError struct {
	// An error code identifying the error type.
	Code string `json:"code"`
	// The line number of the input file where the error occurred, if applicable.
	Line int64 `json:"line,nullable"`
	// A human-readable message providing more details about the error.
	Message string `json:"message"`
	// The name of the parameter that caused the error, if applicable.
	Param string         `json:"param,nullable"`
	JSON  batchErrorJSON `json:"-"`
}

func (*BatchError) UnmarshalJSON

func (r *BatchError) UnmarshalJSON(data []byte) (err error)

type BatchErrors

type BatchErrors struct {
	Data []BatchError `json:"data"`
	// The object type, which is always `list`.
	Object string          `json:"object"`
	JSON   batchErrorsJSON `json:"-"`
}

func (*BatchErrors) UnmarshalJSON

func (r *BatchErrors) UnmarshalJSON(data []byte) (err error)

type BatchListParams

type BatchListParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
}

func (BatchListParams) URLQuery

func (r BatchListParams) URLQuery() (v url.Values)

URLQuery serializes BatchListParams's query parameters as `url.Values`.

type BatchNewParams

type BatchNewParams struct {
	// The time frame within which the batch should be processed. Currently only `24h`
	// is supported.
	CompletionWindow param.Field[BatchNewParamsCompletionWindow] `json:"completion_window,required"`
	// The endpoint to be used for all requests in the batch. Currently
	// `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
	// Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
	// embedding inputs across all requests in the batch.
	Endpoint param.Field[BatchNewParamsEndpoint] `json:"endpoint,required"`
	// The ID of an uploaded file that contains requests for the new batch.
	//
	// See [upload file](https://platform.openai.com/docs/api-reference/files/create)
	// for how to upload a file.
	//
	// Your input file must be formatted as a
	// [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
	// and must be uploaded with the purpose `batch`. The file can contain up to 50,000
	// requests, and can be up to 100 MB in size.
	InputFileID param.Field[string] `json:"input_file_id,required"`
	// Optional custom metadata for the batch.
	Metadata param.Field[map[string]string] `json:"metadata"`
}

func (BatchNewParams) MarshalJSON

func (r BatchNewParams) MarshalJSON() (data []byte, err error)

type BatchNewParamsCompletionWindow

type BatchNewParamsCompletionWindow string

The time frame within which the batch should be processed. Currently only `24h` is supported.

const (
	BatchNewParamsCompletionWindow24h BatchNewParamsCompletionWindow = "24h"
)

func (BatchNewParamsCompletionWindow) IsKnown

type BatchNewParamsEndpoint

type BatchNewParamsEndpoint string

The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.

const (
	BatchNewParamsEndpointV1ChatCompletions BatchNewParamsEndpoint = "/v1/chat/completions"
	BatchNewParamsEndpointV1Embeddings      BatchNewParamsEndpoint = "/v1/embeddings"
	BatchNewParamsEndpointV1Completions     BatchNewParamsEndpoint = "/v1/completions"
)

func (BatchNewParamsEndpoint) IsKnown

func (r BatchNewParamsEndpoint) IsKnown() bool

type BatchObject

type BatchObject string

The object type, which is always `batch`.

const (
	BatchObjectBatch BatchObject = "batch"
)

func (BatchObject) IsKnown

func (r BatchObject) IsKnown() bool

type BatchRequestCounts

type BatchRequestCounts struct {
	// Number of requests that have been completed successfully.
	Completed int64 `json:"completed,required"`
	// Number of requests that have failed.
	Failed int64 `json:"failed,required"`
	// Total number of requests in the batch.
	Total int64                  `json:"total,required"`
	JSON  batchRequestCountsJSON `json:"-"`
}

The request counts for different statuses within the batch.

func (*BatchRequestCounts) UnmarshalJSON

func (r *BatchRequestCounts) UnmarshalJSON(data []byte) (err error)

type BatchService

type BatchService struct {
	Options []option.RequestOption
}

BatchService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBatchService method instead.

func NewBatchService

func NewBatchService(opts ...option.RequestOption) (r *BatchService)

NewBatchService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BatchService) Cancel

func (r *BatchService) Cancel(ctx context.Context, batchID string, opts ...option.RequestOption) (res *Batch, err error)

Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file.

func (*BatchService) Get

func (r *BatchService) Get(ctx context.Context, batchID string, opts ...option.RequestOption) (res *Batch, err error)

Retrieves a batch.

func (*BatchService) List

func (r *BatchService) List(ctx context.Context, query BatchListParams, opts ...option.RequestOption) (res *pagination.CursorPage[Batch], err error)

List your organization's batches.

func (*BatchService) ListAutoPaging

List your organization's batches.

func (*BatchService) New

func (r *BatchService) New(ctx context.Context, body BatchNewParams, opts ...option.RequestOption) (res *Batch, err error)

Creates and executes a batch from an uploaded file of requests

type BatchStatus

type BatchStatus string

The current status of the batch.

const (
	BatchStatusValidating BatchStatus = "validating"
	BatchStatusFailed     BatchStatus = "failed"
	BatchStatusInProgress BatchStatus = "in_progress"
	BatchStatusFinalizing BatchStatus = "finalizing"
	BatchStatusCompleted  BatchStatus = "completed"
	BatchStatusExpired    BatchStatus = "expired"
	BatchStatusCancelling BatchStatus = "cancelling"
	BatchStatusCancelled  BatchStatus = "cancelled"
)

func (BatchStatus) IsKnown

func (r BatchStatus) IsKnown() bool

type BetaAssistantListParams

type BetaAssistantListParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A cursor for use in pagination. `before` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include before=obj_foo in order to
	// fetch the previous page of the list.
	Before param.Field[string] `query:"before"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
	// order and `desc` for descending order.
	Order param.Field[BetaAssistantListParamsOrder] `query:"order"`
}

func (BetaAssistantListParams) URLQuery

func (r BetaAssistantListParams) URLQuery() (v url.Values)

URLQuery serializes BetaAssistantListParams's query parameters as `url.Values`.

type BetaAssistantListParamsOrder

type BetaAssistantListParamsOrder string

Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.

const (
	BetaAssistantListParamsOrderAsc  BetaAssistantListParamsOrder = "asc"
	BetaAssistantListParamsOrderDesc BetaAssistantListParamsOrder = "desc"
)

func (BetaAssistantListParamsOrder) IsKnown

func (r BetaAssistantListParamsOrder) IsKnown() bool

type BetaAssistantNewParams

type BetaAssistantNewParams struct {
	// ID of the model to use. You can use the
	// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
	// see all of your available models, or see our
	// [Model overview](https://platform.openai.com/docs/models/overview) for
	// descriptions of them.
	Model param.Field[ChatModel] `json:"model,required"`
	// The description of the assistant. The maximum length is 512 characters.
	Description param.Field[string] `json:"description"`
	// The system instructions that the assistant uses. The maximum length is 256,000
	// characters.
	Instructions param.Field[string] `json:"instructions"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// The name of the assistant. The maximum length is 256 characters.
	Name param.Field[string] `json:"name"`
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
	// make the output more random, while lower values like 0.2 will make it more
	// focused and deterministic.
	Temperature param.Field[float64] `json:"temperature"`
	// A set of resources that are used by the assistant's tools. The resources are
	// specific to the type of tool. For example, the `code_interpreter` tool requires
	// a list of file IDs, while the `file_search` tool requires a list of vector store
	// IDs.
	ToolResources param.Field[BetaAssistantNewParamsToolResources] `json:"tool_resources"`
	// A list of tool enabled on the assistant. There can be a maximum of 128 tools per
	// assistant. Tools can be of types `code_interpreter`, `file_search`, or
	// `function`.
	Tools param.Field[[]AssistantToolUnionParam] `json:"tools"`
	// An alternative to sampling with temperature, called nucleus sampling, where the
	// model considers the results of the tokens with top_p probability mass. So 0.1
	// means only the tokens comprising the top 10% probability mass are considered.
	//
	// We generally recommend altering this or temperature but not both.
	TopP param.Field[float64] `json:"top_p"`
}

func (BetaAssistantNewParams) MarshalJSON

func (r BetaAssistantNewParams) MarshalJSON() (data []byte, err error)

type BetaAssistantNewParamsToolResources

type BetaAssistantNewParamsToolResources struct {
	CodeInterpreter param.Field[BetaAssistantNewParamsToolResourcesCodeInterpreter] `json:"code_interpreter"`
	FileSearch      param.Field[BetaAssistantNewParamsToolResourcesFileSearch]      `json:"file_search"`
}

A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (BetaAssistantNewParamsToolResources) MarshalJSON

func (r BetaAssistantNewParamsToolResources) MarshalJSON() (data []byte, err error)

type BetaAssistantNewParamsToolResourcesCodeInterpreter

type BetaAssistantNewParamsToolResourcesCodeInterpreter struct {
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
	// available to the `code_interpreter` tool. There can be a maximum of 20 files
	// associated with the tool.
	FileIDs param.Field[[]string] `json:"file_ids"`
}

func (BetaAssistantNewParamsToolResourcesCodeInterpreter) MarshalJSON

func (r BetaAssistantNewParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error)

type BetaAssistantNewParamsToolResourcesFileSearch

type BetaAssistantNewParamsToolResourcesFileSearch struct {
	// The
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this assistant. There can be a maximum of 1 vector store attached to
	// the assistant.
	VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"`
	// A helper to create a
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// with file_ids and attach it to this assistant. There can be a maximum of 1
	// vector store attached to the assistant.
	VectorStores param.Field[[]BetaAssistantNewParamsToolResourcesFileSearchVectorStore] `json:"vector_stores"`
}

func (BetaAssistantNewParamsToolResourcesFileSearch) MarshalJSON

func (r BetaAssistantNewParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error)

type BetaAssistantNewParamsToolResourcesFileSearchVectorStore

type BetaAssistantNewParamsToolResourcesFileSearchVectorStore struct {
	// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
	// strategy. Only applicable if `file_ids` is non-empty.
	ChunkingStrategy param.Field[FileChunkingStrategyParamUnion] `json:"chunking_strategy"`
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
	// add to the vector store. There can be a maximum of 10000 files in a vector
	// store.
	FileIDs param.Field[[]string] `json:"file_ids"`
	// Set of 16 key-value pairs that can be attached to a vector store. This can be
	// useful for storing additional information about the vector store in a structured
	// format. Keys can be a maximum of 64 characters long and values can be a maxium
	// of 512 characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaAssistantNewParamsToolResourcesFileSearchVectorStore) MarshalJSON

type BetaAssistantService

type BetaAssistantService struct {
	Options []option.RequestOption
}

BetaAssistantService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaAssistantService method instead.

func NewBetaAssistantService

func NewBetaAssistantService(opts ...option.RequestOption) (r *BetaAssistantService)

NewBetaAssistantService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaAssistantService) Delete

func (r *BetaAssistantService) Delete(ctx context.Context, assistantID string, opts ...option.RequestOption) (res *AssistantDeleted, err error)

Delete an assistant.

func (*BetaAssistantService) Get

func (r *BetaAssistantService) Get(ctx context.Context, assistantID string, opts ...option.RequestOption) (res *Assistant, err error)

Retrieves an assistant.

func (*BetaAssistantService) List

Returns a list of assistants.

func (*BetaAssistantService) ListAutoPaging

Returns a list of assistants.

func (*BetaAssistantService) New

Create an assistant with a model and instructions.

func (*BetaAssistantService) Update

func (r *BetaAssistantService) Update(ctx context.Context, assistantID string, body BetaAssistantUpdateParams, opts ...option.RequestOption) (res *Assistant, err error)

Modifies an assistant.

type BetaAssistantUpdateParams

type BetaAssistantUpdateParams struct {
	// The description of the assistant. The maximum length is 512 characters.
	Description param.Field[string] `json:"description"`
	// The system instructions that the assistant uses. The maximum length is 256,000
	// characters.
	Instructions param.Field[string] `json:"instructions"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// ID of the model to use. You can use the
	// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
	// see all of your available models, or see our
	// [Model overview](https://platform.openai.com/docs/models/overview) for
	// descriptions of them.
	Model param.Field[string] `json:"model"`
	// The name of the assistant. The maximum length is 256 characters.
	Name param.Field[string] `json:"name"`
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
	// make the output more random, while lower values like 0.2 will make it more
	// focused and deterministic.
	Temperature param.Field[float64] `json:"temperature"`
	// A set of resources that are used by the assistant's tools. The resources are
	// specific to the type of tool. For example, the `code_interpreter` tool requires
	// a list of file IDs, while the `file_search` tool requires a list of vector store
	// IDs.
	ToolResources param.Field[BetaAssistantUpdateParamsToolResources] `json:"tool_resources"`
	// A list of tool enabled on the assistant. There can be a maximum of 128 tools per
	// assistant. Tools can be of types `code_interpreter`, `file_search`, or
	// `function`.
	Tools param.Field[[]AssistantToolUnionParam] `json:"tools"`
	// An alternative to sampling with temperature, called nucleus sampling, where the
	// model considers the results of the tokens with top_p probability mass. So 0.1
	// means only the tokens comprising the top 10% probability mass are considered.
	//
	// We generally recommend altering this or temperature but not both.
	TopP param.Field[float64] `json:"top_p"`
}

func (BetaAssistantUpdateParams) MarshalJSON

func (r BetaAssistantUpdateParams) MarshalJSON() (data []byte, err error)

type BetaAssistantUpdateParamsToolResources

type BetaAssistantUpdateParamsToolResources struct {
	CodeInterpreter param.Field[BetaAssistantUpdateParamsToolResourcesCodeInterpreter] `json:"code_interpreter"`
	FileSearch      param.Field[BetaAssistantUpdateParamsToolResourcesFileSearch]      `json:"file_search"`
}

A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (BetaAssistantUpdateParamsToolResources) MarshalJSON

func (r BetaAssistantUpdateParamsToolResources) MarshalJSON() (data []byte, err error)

type BetaAssistantUpdateParamsToolResourcesCodeInterpreter

type BetaAssistantUpdateParamsToolResourcesCodeInterpreter struct {
	// Overrides the list of
	// [file](https://platform.openai.com/docs/api-reference/files) IDs made available
	// to the `code_interpreter` tool. There can be a maximum of 20 files associated
	// with the tool.
	FileIDs param.Field[[]string] `json:"file_ids"`
}

func (BetaAssistantUpdateParamsToolResourcesCodeInterpreter) MarshalJSON

type BetaAssistantUpdateParamsToolResourcesFileSearch

type BetaAssistantUpdateParamsToolResourcesFileSearch struct {
	// Overrides the
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this assistant. There can be a maximum of 1 vector store attached to
	// the assistant.
	VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"`
}

func (BetaAssistantUpdateParamsToolResourcesFileSearch) MarshalJSON

func (r BetaAssistantUpdateParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error)

type BetaService

type BetaService struct {
	Options      []option.RequestOption
	VectorStores *BetaVectorStoreService
	Assistants   *BetaAssistantService
	Threads      *BetaThreadService
}

BetaService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaService method instead.

func NewBetaService

func NewBetaService(opts ...option.RequestOption) (r *BetaService)

NewBetaService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

type BetaThreadMessageListParams

type BetaThreadMessageListParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A cursor for use in pagination. `before` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include before=obj_foo in order to
	// fetch the previous page of the list.
	Before param.Field[string] `query:"before"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
	// order and `desc` for descending order.
	Order param.Field[BetaThreadMessageListParamsOrder] `query:"order"`
	// Filter messages by the run ID that generated them.
	RunID param.Field[string] `query:"run_id"`
}

func (BetaThreadMessageListParams) URLQuery

func (r BetaThreadMessageListParams) URLQuery() (v url.Values)

URLQuery serializes BetaThreadMessageListParams's query parameters as `url.Values`.

type BetaThreadMessageListParamsOrder

type BetaThreadMessageListParamsOrder string

Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.

const (
	BetaThreadMessageListParamsOrderAsc  BetaThreadMessageListParamsOrder = "asc"
	BetaThreadMessageListParamsOrderDesc BetaThreadMessageListParamsOrder = "desc"
)

func (BetaThreadMessageListParamsOrder) IsKnown

type BetaThreadMessageNewParams

type BetaThreadMessageNewParams struct {
	// An array of content parts with a defined type, each can be of type `text` or
	// images can be passed with `image_url` or `image_file`. Image types are only
	// supported on
	// [Vision-compatible models](https://platform.openai.com/docs/models/overview).
	Content param.Field[[]MessageContentPartParamUnion] `json:"content,required"`
	// The role of the entity that is creating the message. Allowed values include:
	//
	//   - `user`: Indicates the message is sent by an actual user and should be used in
	//     most cases to represent user-generated messages.
	//   - `assistant`: Indicates the message is generated by the assistant. Use this
	//     value to insert messages from the assistant into the conversation.
	Role param.Field[BetaThreadMessageNewParamsRole] `json:"role,required"`
	// A list of files attached to the message, and the tools they should be added to.
	Attachments param.Field[[]BetaThreadMessageNewParamsAttachment] `json:"attachments"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadMessageNewParams) MarshalJSON

func (r BetaThreadMessageNewParams) MarshalJSON() (data []byte, err error)

type BetaThreadMessageNewParamsAttachment

type BetaThreadMessageNewParamsAttachment struct {
	// The ID of the file to attach to the message.
	FileID param.Field[string] `json:"file_id"`
	// The tools to add this file to.
	Tools param.Field[[]BetaThreadMessageNewParamsAttachmentsToolUnion] `json:"tools"`
}

func (BetaThreadMessageNewParamsAttachment) MarshalJSON

func (r BetaThreadMessageNewParamsAttachment) MarshalJSON() (data []byte, err error)

type BetaThreadMessageNewParamsAttachmentsTool

type BetaThreadMessageNewParamsAttachmentsTool struct {
	// The type of tool being defined: `code_interpreter`
	Type param.Field[BetaThreadMessageNewParamsAttachmentsToolsType] `json:"type,required"`
}

func (BetaThreadMessageNewParamsAttachmentsTool) MarshalJSON

func (r BetaThreadMessageNewParamsAttachmentsTool) MarshalJSON() (data []byte, err error)

type BetaThreadMessageNewParamsAttachmentsToolUnion

type BetaThreadMessageNewParamsAttachmentsToolUnion interface {
	// contains filtered or unexported methods
}

Satisfied by CodeInterpreterToolParam, BetaThreadMessageNewParamsAttachmentsToolsFileSearch, BetaThreadMessageNewParamsAttachmentsTool.

type BetaThreadMessageNewParamsAttachmentsToolsFileSearch

type BetaThreadMessageNewParamsAttachmentsToolsFileSearch struct {
	// The type of tool being defined: `file_search`
	Type param.Field[BetaThreadMessageNewParamsAttachmentsToolsFileSearchType] `json:"type,required"`
}

func (BetaThreadMessageNewParamsAttachmentsToolsFileSearch) MarshalJSON

func (r BetaThreadMessageNewParamsAttachmentsToolsFileSearch) MarshalJSON() (data []byte, err error)

type BetaThreadMessageNewParamsAttachmentsToolsFileSearchType

type BetaThreadMessageNewParamsAttachmentsToolsFileSearchType string

The type of tool being defined: `file_search`

const (
	BetaThreadMessageNewParamsAttachmentsToolsFileSearchTypeFileSearch BetaThreadMessageNewParamsAttachmentsToolsFileSearchType = "file_search"
)

func (BetaThreadMessageNewParamsAttachmentsToolsFileSearchType) IsKnown

type BetaThreadMessageNewParamsAttachmentsToolsType

type BetaThreadMessageNewParamsAttachmentsToolsType string

The type of tool being defined: `code_interpreter`

const (
	BetaThreadMessageNewParamsAttachmentsToolsTypeCodeInterpreter BetaThreadMessageNewParamsAttachmentsToolsType = "code_interpreter"
	BetaThreadMessageNewParamsAttachmentsToolsTypeFileSearch      BetaThreadMessageNewParamsAttachmentsToolsType = "file_search"
)

func (BetaThreadMessageNewParamsAttachmentsToolsType) IsKnown

type BetaThreadMessageNewParamsRole

type BetaThreadMessageNewParamsRole string

The role of the entity that is creating the message. Allowed values include:

  • `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.
  • `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.
const (
	BetaThreadMessageNewParamsRoleUser      BetaThreadMessageNewParamsRole = "user"
	BetaThreadMessageNewParamsRoleAssistant BetaThreadMessageNewParamsRole = "assistant"
)

func (BetaThreadMessageNewParamsRole) IsKnown

type BetaThreadMessageService

type BetaThreadMessageService struct {
	Options []option.RequestOption
}

BetaThreadMessageService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaThreadMessageService method instead.

func NewBetaThreadMessageService

func NewBetaThreadMessageService(opts ...option.RequestOption) (r *BetaThreadMessageService)

NewBetaThreadMessageService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaThreadMessageService) Delete

func (r *BetaThreadMessageService) Delete(ctx context.Context, threadID string, messageID string, opts ...option.RequestOption) (res *MessageDeleted, err error)

Deletes a message.

func (*BetaThreadMessageService) Get

func (r *BetaThreadMessageService) Get(ctx context.Context, threadID string, messageID string, opts ...option.RequestOption) (res *Message, err error)

Retrieve a message.

func (*BetaThreadMessageService) List

Returns a list of messages for a given thread.

func (*BetaThreadMessageService) ListAutoPaging

Returns a list of messages for a given thread.

func (*BetaThreadMessageService) New

Create a message.

func (*BetaThreadMessageService) Update

func (r *BetaThreadMessageService) Update(ctx context.Context, threadID string, messageID string, body BetaThreadMessageUpdateParams, opts ...option.RequestOption) (res *Message, err error)

Modifies a message.

type BetaThreadMessageUpdateParams

type BetaThreadMessageUpdateParams struct {
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadMessageUpdateParams) MarshalJSON

func (r BetaThreadMessageUpdateParams) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParams

type BetaThreadNewAndRunParams struct {
	// The ID of the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
	// execute this run.
	AssistantID param.Field[string] `json:"assistant_id,required"`
	// Override the default system message of the assistant. This is useful for
	// modifying the behavior on a per-run basis.
	Instructions param.Field[string] `json:"instructions"`
	// The maximum number of completion tokens that may be used over the course of the
	// run. The run will make a best effort to use only the number of completion tokens
	// specified, across multiple turns of the run. If the run exceeds the number of
	// completion tokens specified, the run will end with status `incomplete`. See
	// `incomplete_details` for more info.
	MaxCompletionTokens param.Field[int64] `json:"max_completion_tokens"`
	// The maximum number of prompt tokens that may be used over the course of the run.
	// The run will make a best effort to use only the number of prompt tokens
	// specified, across multiple turns of the run. If the run exceeds the number of
	// prompt tokens specified, the run will end with status `incomplete`. See
	// `incomplete_details` for more info.
	MaxPromptTokens param.Field[int64] `json:"max_prompt_tokens"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
	// be used to execute this run. If a value is provided here, it will override the
	// model associated with the assistant. If not, the model associated with the
	// assistant will be used.
	Model param.Field[ChatModel] `json:"model"`
	// Whether to enable
	// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
	// during tool use.
	ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"`
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
	// make the output more random, while lower values like 0.2 will make it more
	// focused and deterministic.
	Temperature param.Field[float64] `json:"temperature"`
	// If no thread is provided, an empty thread will be created.
	Thread param.Field[BetaThreadNewAndRunParamsThread] `json:"thread"`
	// Controls which (if any) tool is called by the model. `none` means the model will
	// not call any tools and instead generates a message. `auto` is the default value
	// and means the model can pick between generating a message or calling one or more
	// tools. `required` means the model must call one or more tools before responding
	// to the user. Specifying a particular tool like `{"type": "file_search"}` or
	// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
	// call that tool.
	ToolChoice param.Field[AssistantToolChoiceOptionUnionParam] `json:"tool_choice"`
	// A set of resources that are used by the assistant's tools. The resources are
	// specific to the type of tool. For example, the `code_interpreter` tool requires
	// a list of file IDs, while the `file_search` tool requires a list of vector store
	// IDs.
	ToolResources param.Field[BetaThreadNewAndRunParamsToolResources] `json:"tool_resources"`
	// Override the tools the assistant can use for this run. This is useful for
	// modifying the behavior on a per-run basis.
	Tools param.Field[[]BetaThreadNewAndRunParamsToolUnion] `json:"tools"`
	// An alternative to sampling with temperature, called nucleus sampling, where the
	// model considers the results of the tokens with top_p probability mass. So 0.1
	// means only the tokens comprising the top 10% probability mass are considered.
	//
	// We generally recommend altering this or temperature but not both.
	TopP param.Field[float64] `json:"top_p"`
	// Controls for how a thread will be truncated prior to the run. Use this to
	// control the intial context window of the run.
	TruncationStrategy param.Field[BetaThreadNewAndRunParamsTruncationStrategy] `json:"truncation_strategy"`
}

func (BetaThreadNewAndRunParams) MarshalJSON

func (r BetaThreadNewAndRunParams) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsThread

type BetaThreadNewAndRunParamsThread struct {
	// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
	// start the thread with.
	Messages param.Field[[]BetaThreadNewAndRunParamsThreadMessage] `json:"messages"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// A set of resources that are made available to the assistant's tools in this
	// thread. The resources are specific to the type of tool. For example, the
	// `code_interpreter` tool requires a list of file IDs, while the `file_search`
	// tool requires a list of vector store IDs.
	ToolResources param.Field[BetaThreadNewAndRunParamsThreadToolResources] `json:"tool_resources"`
}

If no thread is provided, an empty thread will be created.

func (BetaThreadNewAndRunParamsThread) MarshalJSON

func (r BetaThreadNewAndRunParamsThread) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsThreadMessage

type BetaThreadNewAndRunParamsThreadMessage struct {
	// An array of content parts with a defined type, each can be of type `text` or
	// images can be passed with `image_url` or `image_file`. Image types are only
	// supported on
	// [Vision-compatible models](https://platform.openai.com/docs/models/overview).
	Content param.Field[[]MessageContentPartParamUnion] `json:"content,required"`
	// The role of the entity that is creating the message. Allowed values include:
	//
	//   - `user`: Indicates the message is sent by an actual user and should be used in
	//     most cases to represent user-generated messages.
	//   - `assistant`: Indicates the message is generated by the assistant. Use this
	//     value to insert messages from the assistant into the conversation.
	Role param.Field[BetaThreadNewAndRunParamsThreadMessagesRole] `json:"role,required"`
	// A list of files attached to the message, and the tools they should be added to.
	Attachments param.Field[[]BetaThreadNewAndRunParamsThreadMessagesAttachment] `json:"attachments"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadNewAndRunParamsThreadMessage) MarshalJSON

func (r BetaThreadNewAndRunParamsThreadMessage) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsThreadMessagesAttachment

type BetaThreadNewAndRunParamsThreadMessagesAttachment struct {
	// The ID of the file to attach to the message.
	FileID param.Field[string] `json:"file_id"`
	// The tools to add this file to.
	Tools param.Field[[]BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion] `json:"tools"`
}

func (BetaThreadNewAndRunParamsThreadMessagesAttachment) MarshalJSON

func (r BetaThreadNewAndRunParamsThreadMessagesAttachment) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool struct {
	// The type of tool being defined: `code_interpreter`
	Type param.Field[BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType] `json:"type,required"`
}

func (BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool) MarshalJSON

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolUnion interface {
	// contains filtered or unexported methods
}

Satisfied by CodeInterpreterToolParam, BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch, BetaThreadNewAndRunParamsThreadMessagesAttachmentsTool.

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch struct {
	// The type of tool being defined: `file_search`
	Type param.Field[BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType] `json:"type,required"`
}

func (BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearch) MarshalJSON

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType string

The type of tool being defined: `file_search`

const (
	BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchTypeFileSearch BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType = "file_search"
)

func (BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsFileSearchType) IsKnown

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType

type BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType string

The type of tool being defined: `code_interpreter`

const (
	BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsTypeCodeInterpreter BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType = "code_interpreter"
	BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsTypeFileSearch      BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType = "file_search"
)

func (BetaThreadNewAndRunParamsThreadMessagesAttachmentsToolsType) IsKnown

type BetaThreadNewAndRunParamsThreadMessagesRole

type BetaThreadNewAndRunParamsThreadMessagesRole string

The role of the entity that is creating the message. Allowed values include:

  • `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.
  • `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.
const (
	BetaThreadNewAndRunParamsThreadMessagesRoleUser      BetaThreadNewAndRunParamsThreadMessagesRole = "user"
	BetaThreadNewAndRunParamsThreadMessagesRoleAssistant BetaThreadNewAndRunParamsThreadMessagesRole = "assistant"
)

func (BetaThreadNewAndRunParamsThreadMessagesRole) IsKnown

type BetaThreadNewAndRunParamsThreadToolResources

type BetaThreadNewAndRunParamsThreadToolResources struct {
	CodeInterpreter param.Field[BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter] `json:"code_interpreter"`
	FileSearch      param.Field[BetaThreadNewAndRunParamsThreadToolResourcesFileSearch]      `json:"file_search"`
}

A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (BetaThreadNewAndRunParamsThreadToolResources) MarshalJSON

func (r BetaThreadNewAndRunParamsThreadToolResources) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter

type BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter struct {
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
	// available to the `code_interpreter` tool. There can be a maximum of 20 files
	// associated with the tool.
	FileIDs param.Field[[]string] `json:"file_ids"`
}

func (BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreter) MarshalJSON

type BetaThreadNewAndRunParamsThreadToolResourcesFileSearch

type BetaThreadNewAndRunParamsThreadToolResourcesFileSearch struct {
	// The
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this thread. There can be a maximum of 1 vector store attached to
	// the thread.
	VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"`
	// A helper to create a
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// with file_ids and attach it to this thread. There can be a maximum of 1 vector
	// store attached to the thread.
	VectorStores param.Field[[]BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore] `json:"vector_stores"`
}

func (BetaThreadNewAndRunParamsThreadToolResourcesFileSearch) MarshalJSON

type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore

type BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore struct {
	// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
	// strategy. Only applicable if `file_ids` is non-empty.
	ChunkingStrategy param.Field[FileChunkingStrategyParamUnion] `json:"chunking_strategy"`
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
	// add to the vector store. There can be a maximum of 10000 files in a vector
	// store.
	FileIDs param.Field[[]string] `json:"file_ids"`
	// Set of 16 key-value pairs that can be attached to a vector store. This can be
	// useful for storing additional information about the vector store in a structured
	// format. Keys can be a maximum of 64 characters long and values can be a maxium
	// of 512 characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadNewAndRunParamsThreadToolResourcesFileSearchVectorStore) MarshalJSON

type BetaThreadNewAndRunParamsTool

type BetaThreadNewAndRunParamsTool struct {
	// The type of tool being defined: `code_interpreter`
	Type       param.Field[BetaThreadNewAndRunParamsToolsType] `json:"type,required"`
	FileSearch param.Field[interface{}]                        `json:"file_search,required"`
	Function   param.Field[shared.FunctionDefinitionParam]     `json:"function"`
}

func (BetaThreadNewAndRunParamsTool) MarshalJSON

func (r BetaThreadNewAndRunParamsTool) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsToolResources

type BetaThreadNewAndRunParamsToolResources struct {
	CodeInterpreter param.Field[BetaThreadNewAndRunParamsToolResourcesCodeInterpreter] `json:"code_interpreter"`
	FileSearch      param.Field[BetaThreadNewAndRunParamsToolResourcesFileSearch]      `json:"file_search"`
}

A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (BetaThreadNewAndRunParamsToolResources) MarshalJSON

func (r BetaThreadNewAndRunParamsToolResources) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsToolResourcesCodeInterpreter

type BetaThreadNewAndRunParamsToolResourcesCodeInterpreter struct {
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
	// available to the `code_interpreter` tool. There can be a maximum of 20 files
	// associated with the tool.
	FileIDs param.Field[[]string] `json:"file_ids"`
}

func (BetaThreadNewAndRunParamsToolResourcesCodeInterpreter) MarshalJSON

type BetaThreadNewAndRunParamsToolResourcesFileSearch

type BetaThreadNewAndRunParamsToolResourcesFileSearch struct {
	// The ID of the
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this assistant. There can be a maximum of 1 vector store attached to
	// the assistant.
	VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"`
}

func (BetaThreadNewAndRunParamsToolResourcesFileSearch) MarshalJSON

func (r BetaThreadNewAndRunParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsToolUnion

type BetaThreadNewAndRunParamsToolUnion interface {
	// contains filtered or unexported methods
}

Satisfied by CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam, BetaThreadNewAndRunParamsTool.

type BetaThreadNewAndRunParamsToolsType

type BetaThreadNewAndRunParamsToolsType string

The type of tool being defined: `code_interpreter`

const (
	BetaThreadNewAndRunParamsToolsTypeCodeInterpreter BetaThreadNewAndRunParamsToolsType = "code_interpreter"
	BetaThreadNewAndRunParamsToolsTypeFileSearch      BetaThreadNewAndRunParamsToolsType = "file_search"
	BetaThreadNewAndRunParamsToolsTypeFunction        BetaThreadNewAndRunParamsToolsType = "function"
)

func (BetaThreadNewAndRunParamsToolsType) IsKnown

type BetaThreadNewAndRunParamsTruncationStrategy

type BetaThreadNewAndRunParamsTruncationStrategy struct {
	// The truncation strategy to use for the thread. The default is `auto`. If set to
	// `last_messages`, the thread will be truncated to the n most recent messages in
	// the thread. When set to `auto`, messages in the middle of the thread will be
	// dropped to fit the context length of the model, `max_prompt_tokens`.
	Type param.Field[BetaThreadNewAndRunParamsTruncationStrategyType] `json:"type,required"`
	// The number of most recent messages from the thread when constructing the context
	// for the run.
	LastMessages param.Field[int64] `json:"last_messages"`
}

Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.

func (BetaThreadNewAndRunParamsTruncationStrategy) MarshalJSON

func (r BetaThreadNewAndRunParamsTruncationStrategy) MarshalJSON() (data []byte, err error)

type BetaThreadNewAndRunParamsTruncationStrategyType

type BetaThreadNewAndRunParamsTruncationStrategyType string

The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.

const (
	BetaThreadNewAndRunParamsTruncationStrategyTypeAuto         BetaThreadNewAndRunParamsTruncationStrategyType = "auto"
	BetaThreadNewAndRunParamsTruncationStrategyTypeLastMessages BetaThreadNewAndRunParamsTruncationStrategyType = "last_messages"
)

func (BetaThreadNewAndRunParamsTruncationStrategyType) IsKnown

type BetaThreadNewParams

type BetaThreadNewParams struct {
	// A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
	// start the thread with.
	Messages param.Field[[]BetaThreadNewParamsMessage] `json:"messages"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// A set of resources that are made available to the assistant's tools in this
	// thread. The resources are specific to the type of tool. For example, the
	// `code_interpreter` tool requires a list of file IDs, while the `file_search`
	// tool requires a list of vector store IDs.
	ToolResources param.Field[BetaThreadNewParamsToolResources] `json:"tool_resources"`
}

func (BetaThreadNewParams) MarshalJSON

func (r BetaThreadNewParams) MarshalJSON() (data []byte, err error)

type BetaThreadNewParamsMessage

type BetaThreadNewParamsMessage struct {
	// An array of content parts with a defined type, each can be of type `text` or
	// images can be passed with `image_url` or `image_file`. Image types are only
	// supported on
	// [Vision-compatible models](https://platform.openai.com/docs/models/overview).
	Content param.Field[[]MessageContentPartParamUnion] `json:"content,required"`
	// The role of the entity that is creating the message. Allowed values include:
	//
	//   - `user`: Indicates the message is sent by an actual user and should be used in
	//     most cases to represent user-generated messages.
	//   - `assistant`: Indicates the message is generated by the assistant. Use this
	//     value to insert messages from the assistant into the conversation.
	Role param.Field[BetaThreadNewParamsMessagesRole] `json:"role,required"`
	// A list of files attached to the message, and the tools they should be added to.
	Attachments param.Field[[]BetaThreadNewParamsMessagesAttachment] `json:"attachments"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadNewParamsMessage) MarshalJSON

func (r BetaThreadNewParamsMessage) MarshalJSON() (data []byte, err error)

type BetaThreadNewParamsMessagesAttachment

type BetaThreadNewParamsMessagesAttachment struct {
	// The ID of the file to attach to the message.
	FileID param.Field[string] `json:"file_id"`
	// The tools to add this file to.
	Tools param.Field[[]BetaThreadNewParamsMessagesAttachmentsToolUnion] `json:"tools"`
}

func (BetaThreadNewParamsMessagesAttachment) MarshalJSON

func (r BetaThreadNewParamsMessagesAttachment) MarshalJSON() (data []byte, err error)

type BetaThreadNewParamsMessagesAttachmentsTool

type BetaThreadNewParamsMessagesAttachmentsTool struct {
	// The type of tool being defined: `code_interpreter`
	Type param.Field[BetaThreadNewParamsMessagesAttachmentsToolsType] `json:"type,required"`
}

func (BetaThreadNewParamsMessagesAttachmentsTool) MarshalJSON

func (r BetaThreadNewParamsMessagesAttachmentsTool) MarshalJSON() (data []byte, err error)

type BetaThreadNewParamsMessagesAttachmentsToolUnion

type BetaThreadNewParamsMessagesAttachmentsToolUnion interface {
	// contains filtered or unexported methods
}

Satisfied by CodeInterpreterToolParam, BetaThreadNewParamsMessagesAttachmentsToolsFileSearch, BetaThreadNewParamsMessagesAttachmentsTool.

type BetaThreadNewParamsMessagesAttachmentsToolsFileSearch

type BetaThreadNewParamsMessagesAttachmentsToolsFileSearch struct {
	// The type of tool being defined: `file_search`
	Type param.Field[BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType] `json:"type,required"`
}

func (BetaThreadNewParamsMessagesAttachmentsToolsFileSearch) MarshalJSON

type BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType

type BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType string

The type of tool being defined: `file_search`

const (
	BetaThreadNewParamsMessagesAttachmentsToolsFileSearchTypeFileSearch BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType = "file_search"
)

func (BetaThreadNewParamsMessagesAttachmentsToolsFileSearchType) IsKnown

type BetaThreadNewParamsMessagesAttachmentsToolsType

type BetaThreadNewParamsMessagesAttachmentsToolsType string

The type of tool being defined: `code_interpreter`

const (
	BetaThreadNewParamsMessagesAttachmentsToolsTypeCodeInterpreter BetaThreadNewParamsMessagesAttachmentsToolsType = "code_interpreter"
	BetaThreadNewParamsMessagesAttachmentsToolsTypeFileSearch      BetaThreadNewParamsMessagesAttachmentsToolsType = "file_search"
)

func (BetaThreadNewParamsMessagesAttachmentsToolsType) IsKnown

type BetaThreadNewParamsMessagesRole

type BetaThreadNewParamsMessagesRole string

The role of the entity that is creating the message. Allowed values include:

  • `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.
  • `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.
const (
	BetaThreadNewParamsMessagesRoleUser      BetaThreadNewParamsMessagesRole = "user"
	BetaThreadNewParamsMessagesRoleAssistant BetaThreadNewParamsMessagesRole = "assistant"
)

func (BetaThreadNewParamsMessagesRole) IsKnown

type BetaThreadNewParamsToolResources

type BetaThreadNewParamsToolResources struct {
	CodeInterpreter param.Field[BetaThreadNewParamsToolResourcesCodeInterpreter] `json:"code_interpreter"`
	FileSearch      param.Field[BetaThreadNewParamsToolResourcesFileSearch]      `json:"file_search"`
}

A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (BetaThreadNewParamsToolResources) MarshalJSON

func (r BetaThreadNewParamsToolResources) MarshalJSON() (data []byte, err error)

type BetaThreadNewParamsToolResourcesCodeInterpreter

type BetaThreadNewParamsToolResourcesCodeInterpreter struct {
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
	// available to the `code_interpreter` tool. There can be a maximum of 20 files
	// associated with the tool.
	FileIDs param.Field[[]string] `json:"file_ids"`
}

func (BetaThreadNewParamsToolResourcesCodeInterpreter) MarshalJSON

func (r BetaThreadNewParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error)

type BetaThreadNewParamsToolResourcesFileSearch

type BetaThreadNewParamsToolResourcesFileSearch struct {
	// The
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this thread. There can be a maximum of 1 vector store attached to
	// the thread.
	VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"`
	// A helper to create a
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// with file_ids and attach it to this thread. There can be a maximum of 1 vector
	// store attached to the thread.
	VectorStores param.Field[[]BetaThreadNewParamsToolResourcesFileSearchVectorStore] `json:"vector_stores"`
}

func (BetaThreadNewParamsToolResourcesFileSearch) MarshalJSON

func (r BetaThreadNewParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error)

type BetaThreadNewParamsToolResourcesFileSearchVectorStore

type BetaThreadNewParamsToolResourcesFileSearchVectorStore struct {
	// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
	// strategy. Only applicable if `file_ids` is non-empty.
	ChunkingStrategy param.Field[FileChunkingStrategyParamUnion] `json:"chunking_strategy"`
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
	// add to the vector store. There can be a maximum of 10000 files in a vector
	// store.
	FileIDs param.Field[[]string] `json:"file_ids"`
	// Set of 16 key-value pairs that can be attached to a vector store. This can be
	// useful for storing additional information about the vector store in a structured
	// format. Keys can be a maximum of 64 characters long and values can be a maxium
	// of 512 characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadNewParamsToolResourcesFileSearchVectorStore) MarshalJSON

type BetaThreadRunListParams

type BetaThreadRunListParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A cursor for use in pagination. `before` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include before=obj_foo in order to
	// fetch the previous page of the list.
	Before param.Field[string] `query:"before"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
	// order and `desc` for descending order.
	Order param.Field[BetaThreadRunListParamsOrder] `query:"order"`
}

func (BetaThreadRunListParams) URLQuery

func (r BetaThreadRunListParams) URLQuery() (v url.Values)

URLQuery serializes BetaThreadRunListParams's query parameters as `url.Values`.

type BetaThreadRunListParamsOrder

type BetaThreadRunListParamsOrder string

Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.

const (
	BetaThreadRunListParamsOrderAsc  BetaThreadRunListParamsOrder = "asc"
	BetaThreadRunListParamsOrderDesc BetaThreadRunListParamsOrder = "desc"
)

func (BetaThreadRunListParamsOrder) IsKnown

func (r BetaThreadRunListParamsOrder) IsKnown() bool

type BetaThreadRunNewParams

type BetaThreadRunNewParams struct {
	// The ID of the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
	// execute this run.
	AssistantID param.Field[string] `json:"assistant_id,required"`
	// A list of additional fields to include in the response. Currently the only
	// supported value is `step_details.tool_calls[*].file_search.results[*].content`
	// to fetch the file search result content.
	//
	// See the
	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
	// for more information.
	Include param.Field[[]RunStepInclude] `query:"include"`
	// Appends additional instructions at the end of the instructions for the run. This
	// is useful for modifying the behavior on a per-run basis without overriding other
	// instructions.
	AdditionalInstructions param.Field[string] `json:"additional_instructions"`
	// Adds additional messages to the thread before creating the run.
	AdditionalMessages param.Field[[]BetaThreadRunNewParamsAdditionalMessage] `json:"additional_messages"`
	// Overrides the
	// [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
	// of the assistant. This is useful for modifying the behavior on a per-run basis.
	Instructions param.Field[string] `json:"instructions"`
	// The maximum number of completion tokens that may be used over the course of the
	// run. The run will make a best effort to use only the number of completion tokens
	// specified, across multiple turns of the run. If the run exceeds the number of
	// completion tokens specified, the run will end with status `incomplete`. See
	// `incomplete_details` for more info.
	MaxCompletionTokens param.Field[int64] `json:"max_completion_tokens"`
	// The maximum number of prompt tokens that may be used over the course of the run.
	// The run will make a best effort to use only the number of prompt tokens
	// specified, across multiple turns of the run. If the run exceeds the number of
	// prompt tokens specified, the run will end with status `incomplete`. See
	// `incomplete_details` for more info.
	MaxPromptTokens param.Field[int64] `json:"max_prompt_tokens"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
	// be used to execute this run. If a value is provided here, it will override the
	// model associated with the assistant. If not, the model associated with the
	// assistant will be used.
	Model param.Field[ChatModel] `json:"model"`
	// Whether to enable
	// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
	// during tool use.
	ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"`
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
	// make the output more random, while lower values like 0.2 will make it more
	// focused and deterministic.
	Temperature param.Field[float64] `json:"temperature"`
	// Controls which (if any) tool is called by the model. `none` means the model will
	// not call any tools and instead generates a message. `auto` is the default value
	// and means the model can pick between generating a message or calling one or more
	// tools. `required` means the model must call one or more tools before responding
	// to the user. Specifying a particular tool like `{"type": "file_search"}` or
	// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
	// call that tool.
	ToolChoice param.Field[AssistantToolChoiceOptionUnionParam] `json:"tool_choice"`
	// Override the tools the assistant can use for this run. This is useful for
	// modifying the behavior on a per-run basis.
	Tools param.Field[[]AssistantToolUnionParam] `json:"tools"`
	// An alternative to sampling with temperature, called nucleus sampling, where the
	// model considers the results of the tokens with top_p probability mass. So 0.1
	// means only the tokens comprising the top 10% probability mass are considered.
	//
	// We generally recommend altering this or temperature but not both.
	TopP param.Field[float64] `json:"top_p"`
	// Controls for how a thread will be truncated prior to the run. Use this to
	// control the intial context window of the run.
	TruncationStrategy param.Field[BetaThreadRunNewParamsTruncationStrategy] `json:"truncation_strategy"`

	// An object specifying the format that the model must output. Compatible with
	// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
	// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
	// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
	// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
	//
	// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
	// Outputs which ensures the model will match your supplied JSON schema. Learn more
	// in the
	// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
	//
	// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
	// message the model generates is valid JSON.
	//
	// **Important:** when using JSON mode, you **must** also instruct the model to
	// produce JSON yourself via a system or user message. Without this, the model may
	// generate an unending stream of whitespace until the generation reaches the token
	// limit, resulting in a long-running and seemingly "stuck" request. Also note that
	// the message content may be partially cut off if `finish_reason="length"`, which
	// indicates the generation exceeded `max_tokens` or the conversation exceeded the
	// max context length.
	ResponseFormat param.Field[ThreadRunParamsResponseFormatUnion] `json:"response_format"`
}

func (BetaThreadRunNewParams) MarshalJSON

func (r BetaThreadRunNewParams) MarshalJSON() (data []byte, err error)

func (BetaThreadRunNewParams) URLQuery

func (r BetaThreadRunNewParams) URLQuery() (v url.Values)

URLQuery serializes BetaThreadRunNewParams's query parameters as `url.Values`.

type BetaThreadRunNewParamsAdditionalMessage

type BetaThreadRunNewParamsAdditionalMessage struct {
	// An array of content parts with a defined type, each can be of type `text` or
	// images can be passed with `image_url` or `image_file`. Image types are only
	// supported on
	// [Vision-compatible models](https://platform.openai.com/docs/models/overview).
	Content param.Field[[]MessageContentPartParamUnion] `json:"content,required"`
	// The role of the entity that is creating the message. Allowed values include:
	//
	//   - `user`: Indicates the message is sent by an actual user and should be used in
	//     most cases to represent user-generated messages.
	//   - `assistant`: Indicates the message is generated by the assistant. Use this
	//     value to insert messages from the assistant into the conversation.
	Role param.Field[BetaThreadRunNewParamsAdditionalMessagesRole] `json:"role,required"`
	// A list of files attached to the message, and the tools they should be added to.
	Attachments param.Field[[]BetaThreadRunNewParamsAdditionalMessagesAttachment] `json:"attachments"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadRunNewParamsAdditionalMessage) MarshalJSON

func (r BetaThreadRunNewParamsAdditionalMessage) MarshalJSON() (data []byte, err error)

type BetaThreadRunNewParamsAdditionalMessagesAttachment

type BetaThreadRunNewParamsAdditionalMessagesAttachment struct {
	// The ID of the file to attach to the message.
	FileID param.Field[string] `json:"file_id"`
	// The tools to add this file to.
	Tools param.Field[[]BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion] `json:"tools"`
}

func (BetaThreadRunNewParamsAdditionalMessagesAttachment) MarshalJSON

func (r BetaThreadRunNewParamsAdditionalMessagesAttachment) MarshalJSON() (data []byte, err error)

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool struct {
	// The type of tool being defined: `code_interpreter`
	Type param.Field[BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType] `json:"type,required"`
}

func (BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool) MarshalJSON

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolUnion interface {
	// contains filtered or unexported methods
}

Satisfied by CodeInterpreterToolParam, BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch, BetaThreadRunNewParamsAdditionalMessagesAttachmentsTool.

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch struct {
	// The type of tool being defined: `file_search`
	Type param.Field[BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType] `json:"type,required"`
}

func (BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearch) MarshalJSON

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType string

The type of tool being defined: `file_search`

const (
	BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchTypeFileSearch BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType = "file_search"
)

func (BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsFileSearchType) IsKnown

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType

type BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType string

The type of tool being defined: `code_interpreter`

const (
	BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsTypeCodeInterpreter BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType = "code_interpreter"
	BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsTypeFileSearch      BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType = "file_search"
)

func (BetaThreadRunNewParamsAdditionalMessagesAttachmentsToolsType) IsKnown

type BetaThreadRunNewParamsAdditionalMessagesRole

type BetaThreadRunNewParamsAdditionalMessagesRole string

The role of the entity that is creating the message. Allowed values include:

  • `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.
  • `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.
const (
	BetaThreadRunNewParamsAdditionalMessagesRoleUser      BetaThreadRunNewParamsAdditionalMessagesRole = "user"
	BetaThreadRunNewParamsAdditionalMessagesRoleAssistant BetaThreadRunNewParamsAdditionalMessagesRole = "assistant"
)

func (BetaThreadRunNewParamsAdditionalMessagesRole) IsKnown

type BetaThreadRunNewParamsTruncationStrategy

type BetaThreadRunNewParamsTruncationStrategy struct {
	// The truncation strategy to use for the thread. The default is `auto`. If set to
	// `last_messages`, the thread will be truncated to the n most recent messages in
	// the thread. When set to `auto`, messages in the middle of the thread will be
	// dropped to fit the context length of the model, `max_prompt_tokens`.
	Type param.Field[BetaThreadRunNewParamsTruncationStrategyType] `json:"type,required"`
	// The number of most recent messages from the thread when constructing the context
	// for the run.
	LastMessages param.Field[int64] `json:"last_messages"`
}

Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.

func (BetaThreadRunNewParamsTruncationStrategy) MarshalJSON

func (r BetaThreadRunNewParamsTruncationStrategy) MarshalJSON() (data []byte, err error)

type BetaThreadRunNewParamsTruncationStrategyType

type BetaThreadRunNewParamsTruncationStrategyType string

The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.

const (
	BetaThreadRunNewParamsTruncationStrategyTypeAuto         BetaThreadRunNewParamsTruncationStrategyType = "auto"
	BetaThreadRunNewParamsTruncationStrategyTypeLastMessages BetaThreadRunNewParamsTruncationStrategyType = "last_messages"
)

func (BetaThreadRunNewParamsTruncationStrategyType) IsKnown

type BetaThreadRunService

type BetaThreadRunService struct {
	Options []option.RequestOption
	Steps   *BetaThreadRunStepService
}

BetaThreadRunService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaThreadRunService method instead.

func NewBetaThreadRunService

func NewBetaThreadRunService(opts ...option.RequestOption) (r *BetaThreadRunService)

NewBetaThreadRunService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaThreadRunService) Cancel

func (r *BetaThreadRunService) Cancel(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error)

Cancels a run that is `in_progress`.

func (*BetaThreadRunService) Get

func (r *BetaThreadRunService) Get(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error)

Retrieves a run.

func (*BetaThreadRunService) List

Returns a list of runs belonging to a thread.

func (*BetaThreadRunService) ListAutoPaging

Returns a list of runs belonging to a thread.

func (*BetaThreadRunService) New

func (r *BetaThreadRunService) New(ctx context.Context, threadID string, params BetaThreadRunNewParams, opts ...option.RequestOption) (res *Run, err error)

Create a run.

func (*BetaThreadRunService) NewStreaming

Create a run.

func (*BetaThreadRunService) SubmitToolOutputs

func (r *BetaThreadRunService) SubmitToolOutputs(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (res *Run, err error)

When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.

func (*BetaThreadRunService) SubmitToolOutputsStreaming

func (r *BetaThreadRunService) SubmitToolOutputsStreaming(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEvent])

When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.

func (*BetaThreadRunService) Update

func (r *BetaThreadRunService) Update(ctx context.Context, threadID string, runID string, body BetaThreadRunUpdateParams, opts ...option.RequestOption) (res *Run, err error)

Modifies a run.

type BetaThreadRunStepGetParams

type BetaThreadRunStepGetParams struct {
	// A list of additional fields to include in the response. Currently the only
	// supported value is `step_details.tool_calls[*].file_search.results[*].content`
	// to fetch the file search result content.
	//
	// See the
	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
	// for more information.
	Include param.Field[[]RunStepInclude] `query:"include"`
}

func (BetaThreadRunStepGetParams) URLQuery

func (r BetaThreadRunStepGetParams) URLQuery() (v url.Values)

URLQuery serializes BetaThreadRunStepGetParams's query parameters as `url.Values`.

type BetaThreadRunStepListParams

type BetaThreadRunStepListParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A cursor for use in pagination. `before` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include before=obj_foo in order to
	// fetch the previous page of the list.
	Before param.Field[string] `query:"before"`
	// A list of additional fields to include in the response. Currently the only
	// supported value is `step_details.tool_calls[*].file_search.results[*].content`
	// to fetch the file search result content.
	//
	// See the
	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
	// for more information.
	Include param.Field[[]RunStepInclude] `query:"include"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
	// order and `desc` for descending order.
	Order param.Field[BetaThreadRunStepListParamsOrder] `query:"order"`
}

func (BetaThreadRunStepListParams) URLQuery

func (r BetaThreadRunStepListParams) URLQuery() (v url.Values)

URLQuery serializes BetaThreadRunStepListParams's query parameters as `url.Values`.

type BetaThreadRunStepListParamsOrder

type BetaThreadRunStepListParamsOrder string

Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.

const (
	BetaThreadRunStepListParamsOrderAsc  BetaThreadRunStepListParamsOrder = "asc"
	BetaThreadRunStepListParamsOrderDesc BetaThreadRunStepListParamsOrder = "desc"
)

func (BetaThreadRunStepListParamsOrder) IsKnown

type BetaThreadRunStepService

type BetaThreadRunStepService struct {
	Options []option.RequestOption
}

BetaThreadRunStepService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaThreadRunStepService method instead.

func NewBetaThreadRunStepService

func NewBetaThreadRunStepService(opts ...option.RequestOption) (r *BetaThreadRunStepService)

NewBetaThreadRunStepService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaThreadRunStepService) Get

func (r *BetaThreadRunStepService) Get(ctx context.Context, threadID string, runID string, stepID string, query BetaThreadRunStepGetParams, opts ...option.RequestOption) (res *RunStep, err error)

Retrieves a run step.

func (*BetaThreadRunStepService) List

Returns a list of run steps belonging to a run.

func (*BetaThreadRunStepService) ListAutoPaging

Returns a list of run steps belonging to a run.

type BetaThreadRunSubmitToolOutputsParams

type BetaThreadRunSubmitToolOutputsParams struct {
	// A list of tools for which the outputs are being submitted.
	ToolOutputs param.Field[[]BetaThreadRunSubmitToolOutputsParamsToolOutput] `json:"tool_outputs,required"`
}

func (BetaThreadRunSubmitToolOutputsParams) MarshalJSON

func (r BetaThreadRunSubmitToolOutputsParams) MarshalJSON() (data []byte, err error)

type BetaThreadRunSubmitToolOutputsParamsToolOutput

type BetaThreadRunSubmitToolOutputsParamsToolOutput struct {
	// The output of the tool call to be submitted to continue the run.
	Output param.Field[string] `json:"output"`
	// The ID of the tool call in the `required_action` object within the run object
	// the output is being submitted for.
	ToolCallID param.Field[string] `json:"tool_call_id"`
}

func (BetaThreadRunSubmitToolOutputsParamsToolOutput) MarshalJSON

func (r BetaThreadRunSubmitToolOutputsParamsToolOutput) MarshalJSON() (data []byte, err error)

type BetaThreadRunUpdateParams

type BetaThreadRunUpdateParams struct {
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
}

func (BetaThreadRunUpdateParams) MarshalJSON

func (r BetaThreadRunUpdateParams) MarshalJSON() (data []byte, err error)

type BetaThreadService

type BetaThreadService struct {
	Options  []option.RequestOption
	Runs     *BetaThreadRunService
	Messages *BetaThreadMessageService
}

BetaThreadService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaThreadService method instead.

func NewBetaThreadService

func NewBetaThreadService(opts ...option.RequestOption) (r *BetaThreadService)

NewBetaThreadService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaThreadService) Delete

func (r *BetaThreadService) Delete(ctx context.Context, threadID string, opts ...option.RequestOption) (res *ThreadDeleted, err error)

Delete a thread.

func (*BetaThreadService) Get

func (r *BetaThreadService) Get(ctx context.Context, threadID string, opts ...option.RequestOption) (res *Thread, err error)

Retrieves a thread.

func (*BetaThreadService) New

func (r *BetaThreadService) New(ctx context.Context, body BetaThreadNewParams, opts ...option.RequestOption) (res *Thread, err error)

Create a thread.

func (*BetaThreadService) NewAndRun

func (r *BetaThreadService) NewAndRun(ctx context.Context, body BetaThreadNewAndRunParams, opts ...option.RequestOption) (res *Run, err error)

Create a thread and run it in one request.

func (*BetaThreadService) NewAndRunStreaming

Create a thread and run it in one request.

func (*BetaThreadService) Update

func (r *BetaThreadService) Update(ctx context.Context, threadID string, body BetaThreadUpdateParams, opts ...option.RequestOption) (res *Thread, err error)

Modifies a thread.

type BetaThreadUpdateParams

type BetaThreadUpdateParams struct {
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// A set of resources that are made available to the assistant's tools in this
	// thread. The resources are specific to the type of tool. For example, the
	// `code_interpreter` tool requires a list of file IDs, while the `file_search`
	// tool requires a list of vector store IDs.
	ToolResources param.Field[BetaThreadUpdateParamsToolResources] `json:"tool_resources"`
}

func (BetaThreadUpdateParams) MarshalJSON

func (r BetaThreadUpdateParams) MarshalJSON() (data []byte, err error)

type BetaThreadUpdateParamsToolResources

type BetaThreadUpdateParamsToolResources struct {
	CodeInterpreter param.Field[BetaThreadUpdateParamsToolResourcesCodeInterpreter] `json:"code_interpreter"`
	FileSearch      param.Field[BetaThreadUpdateParamsToolResourcesFileSearch]      `json:"file_search"`
}

A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (BetaThreadUpdateParamsToolResources) MarshalJSON

func (r BetaThreadUpdateParamsToolResources) MarshalJSON() (data []byte, err error)

type BetaThreadUpdateParamsToolResourcesCodeInterpreter

type BetaThreadUpdateParamsToolResourcesCodeInterpreter struct {
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
	// available to the `code_interpreter` tool. There can be a maximum of 20 files
	// associated with the tool.
	FileIDs param.Field[[]string] `json:"file_ids"`
}

func (BetaThreadUpdateParamsToolResourcesCodeInterpreter) MarshalJSON

func (r BetaThreadUpdateParamsToolResourcesCodeInterpreter) MarshalJSON() (data []byte, err error)

type BetaThreadUpdateParamsToolResourcesFileSearch

type BetaThreadUpdateParamsToolResourcesFileSearch struct {
	// The
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this thread. There can be a maximum of 1 vector store attached to
	// the thread.
	VectorStoreIDs param.Field[[]string] `json:"vector_store_ids"`
}

func (BetaThreadUpdateParamsToolResourcesFileSearch) MarshalJSON

func (r BetaThreadUpdateParamsToolResourcesFileSearch) MarshalJSON() (data []byte, err error)

type BetaVectorStoreFileBatchListFilesParams

type BetaVectorStoreFileBatchListFilesParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A cursor for use in pagination. `before` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include before=obj_foo in order to
	// fetch the previous page of the list.
	Before param.Field[string] `query:"before"`
	// Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
	Filter param.Field[BetaVectorStoreFileBatchListFilesParamsFilter] `query:"filter"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
	// order and `desc` for descending order.
	Order param.Field[BetaVectorStoreFileBatchListFilesParamsOrder] `query:"order"`
}

func (BetaVectorStoreFileBatchListFilesParams) URLQuery

URLQuery serializes BetaVectorStoreFileBatchListFilesParams's query parameters as `url.Values`.

type BetaVectorStoreFileBatchListFilesParamsFilter

type BetaVectorStoreFileBatchListFilesParamsFilter string

Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.

const (
	BetaVectorStoreFileBatchListFilesParamsFilterInProgress BetaVectorStoreFileBatchListFilesParamsFilter = "in_progress"
	BetaVectorStoreFileBatchListFilesParamsFilterCompleted  BetaVectorStoreFileBatchListFilesParamsFilter = "completed"
	BetaVectorStoreFileBatchListFilesParamsFilterFailed     BetaVectorStoreFileBatchListFilesParamsFilter = "failed"
	BetaVectorStoreFileBatchListFilesParamsFilterCancelled  BetaVectorStoreFileBatchListFilesParamsFilter = "cancelled"
)

func (BetaVectorStoreFileBatchListFilesParamsFilter) IsKnown

type BetaVectorStoreFileBatchListFilesParamsOrder

type BetaVectorStoreFileBatchListFilesParamsOrder string

Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.

const (
	BetaVectorStoreFileBatchListFilesParamsOrderAsc  BetaVectorStoreFileBatchListFilesParamsOrder = "asc"
	BetaVectorStoreFileBatchListFilesParamsOrderDesc BetaVectorStoreFileBatchListFilesParamsOrder = "desc"
)

func (BetaVectorStoreFileBatchListFilesParamsOrder) IsKnown

type BetaVectorStoreFileBatchNewParams

type BetaVectorStoreFileBatchNewParams struct {
	// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
	// the vector store should use. Useful for tools like `file_search` that can access
	// files.
	FileIDs param.Field[[]string] `json:"file_ids,required"`
	// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
	// strategy. Only applicable if `file_ids` is non-empty.
	ChunkingStrategy param.Field[FileChunkingStrategyParamUnion] `json:"chunking_strategy"`
}

func (BetaVectorStoreFileBatchNewParams) MarshalJSON

func (r BetaVectorStoreFileBatchNewParams) MarshalJSON() (data []byte, err error)

type BetaVectorStoreFileBatchService

type BetaVectorStoreFileBatchService struct {
	Options []option.RequestOption
}

BetaVectorStoreFileBatchService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaVectorStoreFileBatchService method instead.

func NewBetaVectorStoreFileBatchService

func NewBetaVectorStoreFileBatchService(opts ...option.RequestOption) (r *BetaVectorStoreFileBatchService)

NewBetaVectorStoreFileBatchService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaVectorStoreFileBatchService) Cancel

func (r *BetaVectorStoreFileBatchService) Cancel(ctx context.Context, vectorStoreID string, batchID string, opts ...option.RequestOption) (res *VectorStoreFileBatch, err error)

Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible.

func (*BetaVectorStoreFileBatchService) Get

func (r *BetaVectorStoreFileBatchService) Get(ctx context.Context, vectorStoreID string, batchID string, opts ...option.RequestOption) (res *VectorStoreFileBatch, err error)

Retrieves a vector store file batch.

func (*BetaVectorStoreFileBatchService) ListFiles

Returns a list of vector store files in a batch.

func (*BetaVectorStoreFileBatchService) ListFilesAutoPaging

Returns a list of vector store files in a batch.

func (*BetaVectorStoreFileBatchService) New

Create a vector store file batch.

type BetaVectorStoreFileListParams

type BetaVectorStoreFileListParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A cursor for use in pagination. `before` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include before=obj_foo in order to
	// fetch the previous page of the list.
	Before param.Field[string] `query:"before"`
	// Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
	Filter param.Field[BetaVectorStoreFileListParamsFilter] `query:"filter"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
	// order and `desc` for descending order.
	Order param.Field[BetaVectorStoreFileListParamsOrder] `query:"order"`
}

func (BetaVectorStoreFileListParams) URLQuery

func (r BetaVectorStoreFileListParams) URLQuery() (v url.Values)

URLQuery serializes BetaVectorStoreFileListParams's query parameters as `url.Values`.

type BetaVectorStoreFileListParamsFilter

type BetaVectorStoreFileListParamsFilter string

Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.

const (
	BetaVectorStoreFileListParamsFilterInProgress BetaVectorStoreFileListParamsFilter = "in_progress"
	BetaVectorStoreFileListParamsFilterCompleted  BetaVectorStoreFileListParamsFilter = "completed"
	BetaVectorStoreFileListParamsFilterFailed     BetaVectorStoreFileListParamsFilter = "failed"
	BetaVectorStoreFileListParamsFilterCancelled  BetaVectorStoreFileListParamsFilter = "cancelled"
)

func (BetaVectorStoreFileListParamsFilter) IsKnown

type BetaVectorStoreFileListParamsOrder

type BetaVectorStoreFileListParamsOrder string

Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.

const (
	BetaVectorStoreFileListParamsOrderAsc  BetaVectorStoreFileListParamsOrder = "asc"
	BetaVectorStoreFileListParamsOrderDesc BetaVectorStoreFileListParamsOrder = "desc"
)

func (BetaVectorStoreFileListParamsOrder) IsKnown

type BetaVectorStoreFileNewParams

type BetaVectorStoreFileNewParams struct {
	// A [File](https://platform.openai.com/docs/api-reference/files) ID that the
	// vector store should use. Useful for tools like `file_search` that can access
	// files.
	FileID param.Field[string] `json:"file_id,required"`
	// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
	// strategy. Only applicable if `file_ids` is non-empty.
	ChunkingStrategy param.Field[FileChunkingStrategyParamUnion] `json:"chunking_strategy"`
}

func (BetaVectorStoreFileNewParams) MarshalJSON

func (r BetaVectorStoreFileNewParams) MarshalJSON() (data []byte, err error)

type BetaVectorStoreFileService

type BetaVectorStoreFileService struct {
	Options []option.RequestOption
}

BetaVectorStoreFileService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaVectorStoreFileService method instead.

func NewBetaVectorStoreFileService

func NewBetaVectorStoreFileService(opts ...option.RequestOption) (r *BetaVectorStoreFileService)

NewBetaVectorStoreFileService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaVectorStoreFileService) Delete

func (r *BetaVectorStoreFileService) Delete(ctx context.Context, vectorStoreID string, fileID string, opts ...option.RequestOption) (res *VectorStoreFileDeleted, err error)

Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](https://platform.openai.com/docs/api-reference/files/delete) endpoint.

func (*BetaVectorStoreFileService) Get

func (r *BetaVectorStoreFileService) Get(ctx context.Context, vectorStoreID string, fileID string, opts ...option.RequestOption) (res *VectorStoreFile, err error)

Retrieves a vector store file.

func (*BetaVectorStoreFileService) List

Returns a list of vector store files.

func (*BetaVectorStoreFileService) ListAutoPaging

Returns a list of vector store files.

func (*BetaVectorStoreFileService) New

Create a vector store file by attaching a [File](https://platform.openai.com/docs/api-reference/files) to a [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).

type BetaVectorStoreListParams

type BetaVectorStoreListParams struct {
	// A cursor for use in pagination. `after` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
	// fetch the next page of the list.
	After param.Field[string] `query:"after"`
	// A cursor for use in pagination. `before` is an object ID that defines your place
	// in the list. For instance, if you make a list request and receive 100 objects,
	// ending with obj_foo, your subsequent call can include before=obj_foo in order to
	// fetch the previous page of the list.
	Before param.Field[string] `query:"before"`
	// A limit on the number of objects to be returned. Limit can range between 1 and
	// 100, and the default is 20.
	Limit param.Field[int64] `query:"limit"`
	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
	// order and `desc` for descending order.
	Order param.Field[BetaVectorStoreListParamsOrder] `query:"order"`
}

func (BetaVectorStoreListParams) URLQuery

func (r BetaVectorStoreListParams) URLQuery() (v url.Values)

URLQuery serializes BetaVectorStoreListParams's query parameters as `url.Values`.

type BetaVectorStoreListParamsOrder

type BetaVectorStoreListParamsOrder string

Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.

const (
	BetaVectorStoreListParamsOrderAsc  BetaVectorStoreListParamsOrder = "asc"
	BetaVectorStoreListParamsOrderDesc BetaVectorStoreListParamsOrder = "desc"
)

func (BetaVectorStoreListParamsOrder) IsKnown

type BetaVectorStoreNewParams

type BetaVectorStoreNewParams struct {
	// The chunking strategy used to chunk the file(s). If not set, will use the `auto`
	// strategy. Only applicable if `file_ids` is non-empty.
	ChunkingStrategy param.Field[FileChunkingStrategyParamUnion] `json:"chunking_strategy"`
	// The expiration policy for a vector store.
	ExpiresAfter param.Field[BetaVectorStoreNewParamsExpiresAfter] `json:"expires_after"`
	// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
	// the vector store should use. Useful for tools like `file_search` that can access
	// files.
	FileIDs param.Field[[]string] `json:"file_ids"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// The name of the vector store.
	Name param.Field[string] `json:"name"`
}

func (BetaVectorStoreNewParams) MarshalJSON

func (r BetaVectorStoreNewParams) MarshalJSON() (data []byte, err error)

type BetaVectorStoreNewParamsExpiresAfter

type BetaVectorStoreNewParamsExpiresAfter struct {
	// Anchor timestamp after which the expiration policy applies. Supported anchors:
	// `last_active_at`.
	Anchor param.Field[BetaVectorStoreNewParamsExpiresAfterAnchor] `json:"anchor,required"`
	// The number of days after the anchor time that the vector store will expire.
	Days param.Field[int64] `json:"days,required"`
}

The expiration policy for a vector store.

func (BetaVectorStoreNewParamsExpiresAfter) MarshalJSON

func (r BetaVectorStoreNewParamsExpiresAfter) MarshalJSON() (data []byte, err error)

type BetaVectorStoreNewParamsExpiresAfterAnchor

type BetaVectorStoreNewParamsExpiresAfterAnchor string

Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.

const (
	BetaVectorStoreNewParamsExpiresAfterAnchorLastActiveAt BetaVectorStoreNewParamsExpiresAfterAnchor = "last_active_at"
)

func (BetaVectorStoreNewParamsExpiresAfterAnchor) IsKnown

type BetaVectorStoreService

type BetaVectorStoreService struct {
	Options     []option.RequestOption
	Files       *BetaVectorStoreFileService
	FileBatches *BetaVectorStoreFileBatchService
}

BetaVectorStoreService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewBetaVectorStoreService method instead.

func NewBetaVectorStoreService

func NewBetaVectorStoreService(opts ...option.RequestOption) (r *BetaVectorStoreService)

NewBetaVectorStoreService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*BetaVectorStoreService) Delete

func (r *BetaVectorStoreService) Delete(ctx context.Context, vectorStoreID string, opts ...option.RequestOption) (res *VectorStoreDeleted, err error)

Delete a vector store.

func (*BetaVectorStoreService) Get

func (r *BetaVectorStoreService) Get(ctx context.Context, vectorStoreID string, opts ...option.RequestOption) (res *VectorStore, err error)

Retrieves a vector store.

func (*BetaVectorStoreService) List

Returns a list of vector stores.

func (*BetaVectorStoreService) ListAutoPaging

Returns a list of vector stores.

func (*BetaVectorStoreService) New

Create a vector store.

func (*BetaVectorStoreService) Update

func (r *BetaVectorStoreService) Update(ctx context.Context, vectorStoreID string, body BetaVectorStoreUpdateParams, opts ...option.RequestOption) (res *VectorStore, err error)

Modifies a vector store.

type BetaVectorStoreUpdateParams

type BetaVectorStoreUpdateParams struct {
	// The expiration policy for a vector store.
	ExpiresAfter param.Field[BetaVectorStoreUpdateParamsExpiresAfter] `json:"expires_after"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata param.Field[interface{}] `json:"metadata"`
	// The name of the vector store.
	Name param.Field[string] `json:"name"`
}

func (BetaVectorStoreUpdateParams) MarshalJSON

func (r BetaVectorStoreUpdateParams) MarshalJSON() (data []byte, err error)

type BetaVectorStoreUpdateParamsExpiresAfter

type BetaVectorStoreUpdateParamsExpiresAfter struct {
	// Anchor timestamp after which the expiration policy applies. Supported anchors:
	// `last_active_at`.
	Anchor param.Field[BetaVectorStoreUpdateParamsExpiresAfterAnchor] `json:"anchor,required"`
	// The number of days after the anchor time that the vector store will expire.
	Days param.Field[int64] `json:"days,required"`
}

The expiration policy for a vector store.

func (BetaVectorStoreUpdateParamsExpiresAfter) MarshalJSON

func (r BetaVectorStoreUpdateParamsExpiresAfter) MarshalJSON() (data []byte, err error)

type BetaVectorStoreUpdateParamsExpiresAfterAnchor

type BetaVectorStoreUpdateParamsExpiresAfterAnchor string

Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.

const (
	BetaVectorStoreUpdateParamsExpiresAfterAnchorLastActiveAt BetaVectorStoreUpdateParamsExpiresAfterAnchor = "last_active_at"
)

func (BetaVectorStoreUpdateParamsExpiresAfterAnchor) IsKnown

type ChatCompletion

type ChatCompletion struct {
	// A unique identifier for the chat completion.
	ID string `json:"id,required"`
	// A list of chat completion choices. Can be more than one if `n` is greater
	// than 1.
	Choices []ChatCompletionChoice `json:"choices,required"`
	// The Unix timestamp (in seconds) of when the chat completion was created.
	Created int64 `json:"created,required"`
	// The model used for the chat completion.
	Model string `json:"model,required"`
	// The object type, which is always `chat.completion`.
	Object ChatCompletionObject `json:"object,required"`
	// The service tier used for processing the request. This field is only included if
	// the `service_tier` parameter is specified in the request.
	ServiceTier ChatCompletionServiceTier `json:"service_tier,nullable"`
	// This fingerprint represents the backend configuration that the model runs with.
	//
	// Can be used in conjunction with the `seed` request parameter to understand when
	// backend changes have been made that might impact determinism.
	SystemFingerprint string `json:"system_fingerprint"`
	// Usage statistics for the completion request.
	Usage CompletionUsage    `json:"usage"`
	JSON  chatCompletionJSON `json:"-"`
}

Represents a chat completion response returned by model, based on the provided input.

func (*ChatCompletion) UnmarshalJSON

func (r *ChatCompletion) UnmarshalJSON(data []byte) (err error)

type ChatCompletionAccumulator

type ChatCompletionAccumulator struct {
	// The up-to-date accumulation of model's responses
	ChatCompletion
	// contains filtered or unexported fields
}

Helper to accumulate chunks from a stream

func (*ChatCompletionAccumulator) AddChunk

func (acc *ChatCompletionAccumulator) AddChunk(chunk ChatCompletionChunk) bool

AddChunk incorporates a chunk into the accumulation. Chunks must be added in order. Returns false if the chunk could not be successfully accumulated.

The ChatCompletion field JSON does not get accumulated.

func (*ChatCompletionAccumulator) JustFinishedContent

func (acc *ChatCompletionAccumulator) JustFinishedContent() (content string, ok bool)

JustFinishedRefusal retrieves the chat completion refusal when it is known to have just been completed. The content is "just completed" when the last added chunk no longer contains a content delta. If the content is just completed, the content is returned and the boolean is true. Otherwise, an empty string is returned and the boolean will be false.

func (*ChatCompletionAccumulator) JustFinishedRefusal

func (acc *ChatCompletionAccumulator) JustFinishedRefusal() (refusal string, ok bool)

JustFinishedRefusal retrieves the chat completion refusal when it is known to have just been completed. The refusal is "just completed" when the last added chunk no longer contains a refusal delta. If the refusal is just completed, the refusal is returned and the boolean is true. Otherwise, an empty string is returned and the boolean will be false.

func (*ChatCompletionAccumulator) JustFinishedToolCall

func (acc *ChatCompletionAccumulator) JustFinishedToolCall() (toolcall FinishedChatCompletionToolCall, ok bool)

JustFinishedToolCall retrieves a tool call when it is known to have just been completed. A tool call is "just completed" when the last added chunk no longer contains a tool call delta or contains a delta for a different tool call. If the tool call is just completed, a FinishedChatCompletionToolCall is returned and the boolean is true. Otherwise, an empty tool call is returned and the boolean will be false.

You cannot rely on this with a stream that has ParallelToolCalls enabled.

type ChatCompletionAssistantMessageParam

type ChatCompletionAssistantMessageParam struct {
	// The role of the messages author, in this case `assistant`.
	Role param.Field[ChatCompletionAssistantMessageParamRole] `json:"role,required"`
	// The contents of the assistant message. Required unless `tool_calls` or
	// `function_call` is specified.
	Content param.Field[[]ChatCompletionAssistantMessageParamContentUnion] `json:"content"`
	// Deprecated and replaced by `tool_calls`. The name and arguments of a function
	// that should be called, as generated by the model.
	FunctionCall param.Field[ChatCompletionAssistantMessageParamFunctionCall] `json:"function_call"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name param.Field[string] `json:"name"`
	// The refusal message by the assistant.
	Refusal param.Field[string] `json:"refusal"`
	// The tool calls generated by the model, such as function calls.
	ToolCalls param.Field[[]ChatCompletionMessageToolCallParam] `json:"tool_calls"`
}

func AssistantMessage

func AssistantMessage(content string) ChatCompletionAssistantMessageParam

func (ChatCompletionAssistantMessageParam) MarshalJSON

func (r ChatCompletionAssistantMessageParam) MarshalJSON() (data []byte, err error)

type ChatCompletionAssistantMessageParamContent

type ChatCompletionAssistantMessageParamContent struct {
	// The type of the content part.
	Type param.Field[ChatCompletionAssistantMessageParamContentType] `json:"type,required"`
	// The text content.
	Text param.Field[string] `json:"text"`
	// The refusal message generated by the model.
	Refusal param.Field[string] `json:"refusal"`
}

func (ChatCompletionAssistantMessageParamContent) MarshalJSON

func (r ChatCompletionAssistantMessageParamContent) MarshalJSON() (data []byte, err error)

type ChatCompletionAssistantMessageParamContentType

type ChatCompletionAssistantMessageParamContentType string

The type of the content part.

const (
	ChatCompletionAssistantMessageParamContentTypeText    ChatCompletionAssistantMessageParamContentType = "text"
	ChatCompletionAssistantMessageParamContentTypeRefusal ChatCompletionAssistantMessageParamContentType = "refusal"
)

func (ChatCompletionAssistantMessageParamContentType) IsKnown

type ChatCompletionAssistantMessageParamContentUnion

type ChatCompletionAssistantMessageParamContentUnion interface {
	// contains filtered or unexported methods
}

Satisfied by ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam, ChatCompletionAssistantMessageParamContent.

type ChatCompletionAssistantMessageParamFunctionCall

type ChatCompletionAssistantMessageParamFunctionCall struct {
	// The arguments to call the function with, as generated by the model in JSON
	// format. Note that the model does not always generate valid JSON, and may
	// hallucinate parameters not defined by your function schema. Validate the
	// arguments in your code before calling your function.
	Arguments param.Field[string] `json:"arguments,required"`
	// The name of the function to call.
	Name param.Field[string] `json:"name,required"`
}

Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.

func (ChatCompletionAssistantMessageParamFunctionCall) MarshalJSON

func (r ChatCompletionAssistantMessageParamFunctionCall) MarshalJSON() (data []byte, err error)

type ChatCompletionAssistantMessageParamRole

type ChatCompletionAssistantMessageParamRole string

The role of the messages author, in this case `assistant`.

const (
	ChatCompletionAssistantMessageParamRoleAssistant ChatCompletionAssistantMessageParamRole = "assistant"
)

func (ChatCompletionAssistantMessageParamRole) IsKnown

type ChatCompletionChoice

type ChatCompletionChoice struct {
	// The reason the model stopped generating tokens. This will be `stop` if the model
	// hit a natural stop point or a provided stop sequence, `length` if the maximum
	// number of tokens specified in the request was reached, `content_filter` if
	// content was omitted due to a flag from our content filters, `tool_calls` if the
	// model called a tool, or `function_call` (deprecated) if the model called a
	// function.
	FinishReason ChatCompletionChoicesFinishReason `json:"finish_reason,required"`
	// The index of the choice in the list of choices.
	Index int64 `json:"index,required"`
	// Log probability information for the choice.
	Logprobs ChatCompletionChoicesLogprobs `json:"logprobs,required,nullable"`
	// A chat completion message generated by the model.
	Message ChatCompletionMessage    `json:"message,required"`
	JSON    chatCompletionChoiceJSON `json:"-"`
}

func (*ChatCompletionChoice) UnmarshalJSON

func (r *ChatCompletionChoice) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChoicesFinishReason

type ChatCompletionChoicesFinishReason string

The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.

const (
	ChatCompletionChoicesFinishReasonStop          ChatCompletionChoicesFinishReason = "stop"
	ChatCompletionChoicesFinishReasonLength        ChatCompletionChoicesFinishReason = "length"
	ChatCompletionChoicesFinishReasonToolCalls     ChatCompletionChoicesFinishReason = "tool_calls"
	ChatCompletionChoicesFinishReasonContentFilter ChatCompletionChoicesFinishReason = "content_filter"
	ChatCompletionChoicesFinishReasonFunctionCall  ChatCompletionChoicesFinishReason = "function_call"
)

func (ChatCompletionChoicesFinishReason) IsKnown

type ChatCompletionChoicesLogprobs

type ChatCompletionChoicesLogprobs struct {
	// A list of message content tokens with log probability information.
	Content []ChatCompletionTokenLogprob `json:"content,required,nullable"`
	// A list of message refusal tokens with log probability information.
	Refusal []ChatCompletionTokenLogprob      `json:"refusal,required,nullable"`
	JSON    chatCompletionChoicesLogprobsJSON `json:"-"`
}

Log probability information for the choice.

func (*ChatCompletionChoicesLogprobs) UnmarshalJSON

func (r *ChatCompletionChoicesLogprobs) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunk

type ChatCompletionChunk struct {
	// A unique identifier for the chat completion. Each chunk has the same ID.
	ID string `json:"id,required"`
	// A list of chat completion choices. Can contain more than one elements if `n` is
	// greater than 1. Can also be empty for the last chunk if you set
	// `stream_options: {"include_usage": true}`.
	Choices []ChatCompletionChunkChoice `json:"choices,required"`
	// The Unix timestamp (in seconds) of when the chat completion was created. Each
	// chunk has the same timestamp.
	Created int64 `json:"created,required"`
	// The model to generate the completion.
	Model string `json:"model,required"`
	// The object type, which is always `chat.completion.chunk`.
	Object ChatCompletionChunkObject `json:"object,required"`
	// The service tier used for processing the request. This field is only included if
	// the `service_tier` parameter is specified in the request.
	ServiceTier ChatCompletionChunkServiceTier `json:"service_tier,nullable"`
	// This fingerprint represents the backend configuration that the model runs with.
	// Can be used in conjunction with the `seed` request parameter to understand when
	// backend changes have been made that might impact determinism.
	SystemFingerprint string `json:"system_fingerprint"`
	// An optional field that will only be present when you set
	// `stream_options: {"include_usage": true}` in your request. When present, it
	// contains a null value except for the last chunk which contains the token usage
	// statistics for the entire request.
	Usage CompletionUsage         `json:"usage"`
	JSON  chatCompletionChunkJSON `json:"-"`
}

Represents a streamed chunk of a chat completion response returned by model, based on the provided input.

func (*ChatCompletionChunk) UnmarshalJSON

func (r *ChatCompletionChunk) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunkChoice

type ChatCompletionChunkChoice struct {
	// A chat completion delta generated by streamed model responses.
	Delta ChatCompletionChunkChoicesDelta `json:"delta,required"`
	// The reason the model stopped generating tokens. This will be `stop` if the model
	// hit a natural stop point or a provided stop sequence, `length` if the maximum
	// number of tokens specified in the request was reached, `content_filter` if
	// content was omitted due to a flag from our content filters, `tool_calls` if the
	// model called a tool, or `function_call` (deprecated) if the model called a
	// function.
	FinishReason ChatCompletionChunkChoicesFinishReason `json:"finish_reason,required,nullable"`
	// The index of the choice in the list of choices.
	Index int64 `json:"index,required"`
	// Log probability information for the choice.
	Logprobs ChatCompletionChunkChoicesLogprobs `json:"logprobs,nullable"`
	JSON     chatCompletionChunkChoiceJSON      `json:"-"`
}

func (*ChatCompletionChunkChoice) UnmarshalJSON

func (r *ChatCompletionChunkChoice) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunkChoicesDelta

type ChatCompletionChunkChoicesDelta struct {
	// The contents of the chunk message.
	Content string `json:"content,nullable"`
	// Deprecated and replaced by `tool_calls`. The name and arguments of a function
	// that should be called, as generated by the model.
	FunctionCall ChatCompletionChunkChoicesDeltaFunctionCall `json:"function_call"`
	// The refusal message generated by the model.
	Refusal string `json:"refusal,nullable"`
	// The role of the author of this message.
	Role      ChatCompletionChunkChoicesDeltaRole       `json:"role"`
	ToolCalls []ChatCompletionChunkChoicesDeltaToolCall `json:"tool_calls"`
	JSON      chatCompletionChunkChoicesDeltaJSON       `json:"-"`
}

A chat completion delta generated by streamed model responses.

func (*ChatCompletionChunkChoicesDelta) UnmarshalJSON

func (r *ChatCompletionChunkChoicesDelta) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunkChoicesDeltaFunctionCall

type ChatCompletionChunkChoicesDeltaFunctionCall struct {
	// The arguments to call the function with, as generated by the model in JSON
	// format. Note that the model does not always generate valid JSON, and may
	// hallucinate parameters not defined by your function schema. Validate the
	// arguments in your code before calling your function.
	Arguments string `json:"arguments"`
	// The name of the function to call.
	Name string                                          `json:"name"`
	JSON chatCompletionChunkChoicesDeltaFunctionCallJSON `json:"-"`
}

Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.

func (*ChatCompletionChunkChoicesDeltaFunctionCall) UnmarshalJSON

func (r *ChatCompletionChunkChoicesDeltaFunctionCall) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunkChoicesDeltaRole

type ChatCompletionChunkChoicesDeltaRole string

The role of the author of this message.

const (
	ChatCompletionChunkChoicesDeltaRoleSystem    ChatCompletionChunkChoicesDeltaRole = "system"
	ChatCompletionChunkChoicesDeltaRoleUser      ChatCompletionChunkChoicesDeltaRole = "user"
	ChatCompletionChunkChoicesDeltaRoleAssistant ChatCompletionChunkChoicesDeltaRole = "assistant"
	ChatCompletionChunkChoicesDeltaRoleTool      ChatCompletionChunkChoicesDeltaRole = "tool"
)

func (ChatCompletionChunkChoicesDeltaRole) IsKnown

type ChatCompletionChunkChoicesDeltaToolCall

type ChatCompletionChunkChoicesDeltaToolCall struct {
	Index int64 `json:"index,required"`
	// The ID of the tool call.
	ID       string                                           `json:"id"`
	Function ChatCompletionChunkChoicesDeltaToolCallsFunction `json:"function"`
	// The type of the tool. Currently, only `function` is supported.
	Type ChatCompletionChunkChoicesDeltaToolCallsType `json:"type"`
	JSON chatCompletionChunkChoicesDeltaToolCallJSON  `json:"-"`
}

func (*ChatCompletionChunkChoicesDeltaToolCall) UnmarshalJSON

func (r *ChatCompletionChunkChoicesDeltaToolCall) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunkChoicesDeltaToolCallsFunction

type ChatCompletionChunkChoicesDeltaToolCallsFunction struct {
	// The arguments to call the function with, as generated by the model in JSON
	// format. Note that the model does not always generate valid JSON, and may
	// hallucinate parameters not defined by your function schema. Validate the
	// arguments in your code before calling your function.
	Arguments string `json:"arguments"`
	// The name of the function to call.
	Name string                                               `json:"name"`
	JSON chatCompletionChunkChoicesDeltaToolCallsFunctionJSON `json:"-"`
}

func (*ChatCompletionChunkChoicesDeltaToolCallsFunction) UnmarshalJSON

func (r *ChatCompletionChunkChoicesDeltaToolCallsFunction) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunkChoicesDeltaToolCallsType

type ChatCompletionChunkChoicesDeltaToolCallsType string

The type of the tool. Currently, only `function` is supported.

const (
	ChatCompletionChunkChoicesDeltaToolCallsTypeFunction ChatCompletionChunkChoicesDeltaToolCallsType = "function"
)

func (ChatCompletionChunkChoicesDeltaToolCallsType) IsKnown

type ChatCompletionChunkChoicesFinishReason

type ChatCompletionChunkChoicesFinishReason string

The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.

const (
	ChatCompletionChunkChoicesFinishReasonStop          ChatCompletionChunkChoicesFinishReason = "stop"
	ChatCompletionChunkChoicesFinishReasonLength        ChatCompletionChunkChoicesFinishReason = "length"
	ChatCompletionChunkChoicesFinishReasonToolCalls     ChatCompletionChunkChoicesFinishReason = "tool_calls"
	ChatCompletionChunkChoicesFinishReasonContentFilter ChatCompletionChunkChoicesFinishReason = "content_filter"
	ChatCompletionChunkChoicesFinishReasonFunctionCall  ChatCompletionChunkChoicesFinishReason = "function_call"
)

func (ChatCompletionChunkChoicesFinishReason) IsKnown

type ChatCompletionChunkChoicesLogprobs

type ChatCompletionChunkChoicesLogprobs struct {
	// A list of message content tokens with log probability information.
	Content []ChatCompletionTokenLogprob `json:"content,required,nullable"`
	// A list of message refusal tokens with log probability information.
	Refusal []ChatCompletionTokenLogprob           `json:"refusal,required,nullable"`
	JSON    chatCompletionChunkChoicesLogprobsJSON `json:"-"`
}

Log probability information for the choice.

func (*ChatCompletionChunkChoicesLogprobs) UnmarshalJSON

func (r *ChatCompletionChunkChoicesLogprobs) UnmarshalJSON(data []byte) (err error)

type ChatCompletionChunkObject

type ChatCompletionChunkObject string

The object type, which is always `chat.completion.chunk`.

const (
	ChatCompletionChunkObjectChatCompletionChunk ChatCompletionChunkObject = "chat.completion.chunk"
)

func (ChatCompletionChunkObject) IsKnown

func (r ChatCompletionChunkObject) IsKnown() bool

type ChatCompletionChunkServiceTier

type ChatCompletionChunkServiceTier string

The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.

const (
	ChatCompletionChunkServiceTierScale   ChatCompletionChunkServiceTier = "scale"
	ChatCompletionChunkServiceTierDefault ChatCompletionChunkServiceTier = "default"
)

func (ChatCompletionChunkServiceTier) IsKnown

type ChatCompletionContentPartImageImageURLDetail

type ChatCompletionContentPartImageImageURLDetail string

Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).

const (
	ChatCompletionContentPartImageImageURLDetailAuto ChatCompletionContentPartImageImageURLDetail = "auto"
	ChatCompletionContentPartImageImageURLDetailLow  ChatCompletionContentPartImageImageURLDetail = "low"
	ChatCompletionContentPartImageImageURLDetailHigh ChatCompletionContentPartImageImageURLDetail = "high"
)

func (ChatCompletionContentPartImageImageURLDetail) IsKnown

type ChatCompletionContentPartImageImageURLParam

type ChatCompletionContentPartImageImageURLParam struct {
	// Either a URL of the image or the base64 encoded image data.
	URL param.Field[string] `json:"url,required" format:"uri"`
	// Specifies the detail level of the image. Learn more in the
	// [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
	Detail param.Field[ChatCompletionContentPartImageImageURLDetail] `json:"detail"`
}

func (ChatCompletionContentPartImageImageURLParam) MarshalJSON

func (r ChatCompletionContentPartImageImageURLParam) MarshalJSON() (data []byte, err error)

type ChatCompletionContentPartImageParam

type ChatCompletionContentPartImageParam struct {
	ImageURL param.Field[ChatCompletionContentPartImageImageURLParam] `json:"image_url,required"`
	// The type of the content part.
	Type param.Field[ChatCompletionContentPartImageType] `json:"type,required"`
}

func (ChatCompletionContentPartImageParam) MarshalJSON

func (r ChatCompletionContentPartImageParam) MarshalJSON() (data []byte, err error)

type ChatCompletionContentPartImageType

type ChatCompletionContentPartImageType string

The type of the content part.

const (
	ChatCompletionContentPartImageTypeImageURL ChatCompletionContentPartImageType = "image_url"
)

func (ChatCompletionContentPartImageType) IsKnown

type ChatCompletionContentPartParam

type ChatCompletionContentPartParam struct {
	// The type of the content part.
	Type param.Field[ChatCompletionContentPartType] `json:"type,required"`
	// The text content.
	Text     param.Field[string]      `json:"text"`
	ImageURL param.Field[interface{}] `json:"image_url,required"`
}

func (ChatCompletionContentPartParam) MarshalJSON

func (r ChatCompletionContentPartParam) MarshalJSON() (data []byte, err error)

type ChatCompletionContentPartRefusalParam

type ChatCompletionContentPartRefusalParam struct {
	// The refusal message generated by the model.
	Refusal param.Field[string] `json:"refusal,required"`
	// The type of the content part.
	Type param.Field[ChatCompletionContentPartRefusalType] `json:"type,required"`
}

func (ChatCompletionContentPartRefusalParam) MarshalJSON

func (r ChatCompletionContentPartRefusalParam) MarshalJSON() (data []byte, err error)

type ChatCompletionContentPartRefusalType

type ChatCompletionContentPartRefusalType string

The type of the content part.

const (
	ChatCompletionContentPartRefusalTypeRefusal ChatCompletionContentPartRefusalType = "refusal"
)

func (ChatCompletionContentPartRefusalType) IsKnown

type ChatCompletionContentPartTextParam

type ChatCompletionContentPartTextParam struct {
	// The text content.
	Text param.Field[string] `json:"text,required"`
	// The type of the content part.
	Type param.Field[ChatCompletionContentPartTextType] `json:"type,required"`
}

func (ChatCompletionContentPartTextParam) MarshalJSON

func (r ChatCompletionContentPartTextParam) MarshalJSON() (data []byte, err error)

type ChatCompletionContentPartTextType

type ChatCompletionContentPartTextType string

The type of the content part.

const (
	ChatCompletionContentPartTextTypeText ChatCompletionContentPartTextType = "text"
)

func (ChatCompletionContentPartTextType) IsKnown

type ChatCompletionContentPartType

type ChatCompletionContentPartType string

The type of the content part.

const (
	ChatCompletionContentPartTypeText     ChatCompletionContentPartType = "text"
	ChatCompletionContentPartTypeImageURL ChatCompletionContentPartType = "image_url"
)

func (ChatCompletionContentPartType) IsKnown

func (r ChatCompletionContentPartType) IsKnown() bool

type ChatCompletionContentPartUnionParam

type ChatCompletionContentPartUnionParam interface {
	// contains filtered or unexported methods
}

Satisfied by ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartParam.

type ChatCompletionFunctionCallOptionParam

type ChatCompletionFunctionCallOptionParam struct {
	// The name of the function to call.
	Name param.Field[string] `json:"name,required"`
}

Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.

func (ChatCompletionFunctionCallOptionParam) MarshalJSON

func (r ChatCompletionFunctionCallOptionParam) MarshalJSON() (data []byte, err error)

type ChatCompletionFunctionMessageParam

type ChatCompletionFunctionMessageParam struct {
	// The contents of the function message.
	Content param.Field[string] `json:"content,required"`
	// The name of the function to call.
	Name param.Field[string] `json:"name,required"`
	// The role of the messages author, in this case `function`.
	Role param.Field[ChatCompletionFunctionMessageParamRole] `json:"role,required"`
}

func (ChatCompletionFunctionMessageParam) MarshalJSON

func (r ChatCompletionFunctionMessageParam) MarshalJSON() (data []byte, err error)

type ChatCompletionFunctionMessageParamRole

type ChatCompletionFunctionMessageParamRole string

The role of the messages author, in this case `function`.

const (
	ChatCompletionFunctionMessageParamRoleFunction ChatCompletionFunctionMessageParamRole = "function"
)

func (ChatCompletionFunctionMessageParamRole) IsKnown

type ChatCompletionMessage

type ChatCompletionMessage struct {
	// The contents of the message.
	Content string `json:"content,required,nullable"`
	// The refusal message generated by the model.
	Refusal string `json:"refusal,required,nullable"`
	// The role of the author of this message.
	Role ChatCompletionMessageRole `json:"role,required"`
	// Deprecated and replaced by `tool_calls`. The name and arguments of a function
	// that should be called, as generated by the model.
	FunctionCall ChatCompletionMessageFunctionCall `json:"function_call"`
	// The tool calls generated by the model, such as function calls.
	ToolCalls []ChatCompletionMessageToolCall `json:"tool_calls"`
	JSON      chatCompletionMessageJSON       `json:"-"`
}

A chat completion message generated by the model.

func (ChatCompletionMessage) MarshalJSON

func (r ChatCompletionMessage) MarshalJSON() (data []byte, err error)

func (*ChatCompletionMessage) UnmarshalJSON

func (r *ChatCompletionMessage) UnmarshalJSON(data []byte) (err error)

type ChatCompletionMessageFunctionCall

type ChatCompletionMessageFunctionCall struct {
	// The arguments to call the function with, as generated by the model in JSON
	// format. Note that the model does not always generate valid JSON, and may
	// hallucinate parameters not defined by your function schema. Validate the
	// arguments in your code before calling your function.
	Arguments string `json:"arguments,required"`
	// The name of the function to call.
	Name string                                `json:"name,required"`
	JSON chatCompletionMessageFunctionCallJSON `json:"-"`
}

Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.

func (*ChatCompletionMessageFunctionCall) UnmarshalJSON

func (r *ChatCompletionMessageFunctionCall) UnmarshalJSON(data []byte) (err error)

type ChatCompletionMessageParam

type ChatCompletionMessageParam struct {
	Content param.Field[interface{}] `json:"content,required"`
	// The role of the messages author, in this case `system`.
	Role param.Field[ChatCompletionMessageParamRole] `json:"role,required"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name param.Field[string] `json:"name"`
	// The refusal message by the assistant.
	Refusal      param.Field[string]      `json:"refusal"`
	ToolCalls    param.Field[interface{}] `json:"tool_calls,required"`
	FunctionCall param.Field[interface{}] `json:"function_call,required"`
	// Tool call that this message is responding to.
	ToolCallID param.Field[string] `json:"tool_call_id"`
}

func (ChatCompletionMessageParam) MarshalJSON

func (r ChatCompletionMessageParam) MarshalJSON() (data []byte, err error)

type ChatCompletionMessageParamRole

type ChatCompletionMessageParamRole string

The role of the messages author, in this case `system`.

const (
	ChatCompletionMessageParamRoleSystem    ChatCompletionMessageParamRole = "system"
	ChatCompletionMessageParamRoleUser      ChatCompletionMessageParamRole = "user"
	ChatCompletionMessageParamRoleAssistant ChatCompletionMessageParamRole = "assistant"
	ChatCompletionMessageParamRoleTool      ChatCompletionMessageParamRole = "tool"
	ChatCompletionMessageParamRoleFunction  ChatCompletionMessageParamRole = "function"
)

func (ChatCompletionMessageParamRole) IsKnown

type ChatCompletionMessageParamUnion

type ChatCompletionMessageParamUnion interface {
	// contains filtered or unexported methods
}

Satisfied by ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionToolMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionMessageParam.

This union is additionally satisfied by the return types ChatCompletionMessage

func FunctionMessage

func FunctionMessage(name, content string) ChatCompletionMessageParamUnion

func SystemMessage

func SystemMessage(content string) ChatCompletionMessageParamUnion

func UserMessage

func UserMessage(content string) ChatCompletionMessageParamUnion

type ChatCompletionMessageRole

type ChatCompletionMessageRole string

The role of the author of this message.

const (
	ChatCompletionMessageRoleAssistant ChatCompletionMessageRole = "assistant"
)

func (ChatCompletionMessageRole) IsKnown

func (r ChatCompletionMessageRole) IsKnown() bool

type ChatCompletionMessageToolCall

type ChatCompletionMessageToolCall struct {
	// The ID of the tool call.
	ID string `json:"id,required"`
	// The function that the model called.
	Function ChatCompletionMessageToolCallFunction `json:"function,required"`
	// The type of the tool. Currently, only `function` is supported.
	Type ChatCompletionMessageToolCallType `json:"type,required"`
	JSON chatCompletionMessageToolCallJSON `json:"-"`
}

func (*ChatCompletionMessageToolCall) UnmarshalJSON

func (r *ChatCompletionMessageToolCall) UnmarshalJSON(data []byte) (err error)

type ChatCompletionMessageToolCallFunction

type ChatCompletionMessageToolCallFunction struct {
	// The arguments to call the function with, as generated by the model in JSON
	// format. Note that the model does not always generate valid JSON, and may
	// hallucinate parameters not defined by your function schema. Validate the
	// arguments in your code before calling your function.
	Arguments string `json:"arguments,required"`
	// The name of the function to call.
	Name string                                    `json:"name,required"`
	JSON chatCompletionMessageToolCallFunctionJSON `json:"-"`
}

The function that the model called.

func (*ChatCompletionMessageToolCallFunction) UnmarshalJSON

func (r *ChatCompletionMessageToolCallFunction) UnmarshalJSON(data []byte) (err error)

type ChatCompletionMessageToolCallFunctionParam

type ChatCompletionMessageToolCallFunctionParam struct {
	// The arguments to call the function with, as generated by the model in JSON
	// format. Note that the model does not always generate valid JSON, and may
	// hallucinate parameters not defined by your function schema. Validate the
	// arguments in your code before calling your function.
	Arguments param.Field[string] `json:"arguments,required"`
	// The name of the function to call.
	Name param.Field[string] `json:"name,required"`
}

The function that the model called.

func (ChatCompletionMessageToolCallFunctionParam) MarshalJSON

func (r ChatCompletionMessageToolCallFunctionParam) MarshalJSON() (data []byte, err error)

type ChatCompletionMessageToolCallParam

type ChatCompletionMessageToolCallParam struct {
	// The ID of the tool call.
	ID param.Field[string] `json:"id,required"`
	// The function that the model called.
	Function param.Field[ChatCompletionMessageToolCallFunctionParam] `json:"function,required"`
	// The type of the tool. Currently, only `function` is supported.
	Type param.Field[ChatCompletionMessageToolCallType] `json:"type,required"`
}

func (ChatCompletionMessageToolCallParam) MarshalJSON

func (r ChatCompletionMessageToolCallParam) MarshalJSON() (data []byte, err error)

type ChatCompletionMessageToolCallType

type ChatCompletionMessageToolCallType string

The type of the tool. Currently, only `function` is supported.

const (
	ChatCompletionMessageToolCallTypeFunction ChatCompletionMessageToolCallType = "function"
)

func (ChatCompletionMessageToolCallType) IsKnown

type ChatCompletionNamedToolChoiceFunctionParam

type ChatCompletionNamedToolChoiceFunctionParam struct {
	// The name of the function to call.
	Name param.Field[string] `json:"name,required"`
}

func (ChatCompletionNamedToolChoiceFunctionParam) MarshalJSON

func (r ChatCompletionNamedToolChoiceFunctionParam) MarshalJSON() (data []byte, err error)

type ChatCompletionNamedToolChoiceParam

type ChatCompletionNamedToolChoiceParam struct {
	Function param.Field[ChatCompletionNamedToolChoiceFunctionParam] `json:"function,required"`
	// The type of the tool. Currently, only `function` is supported.
	Type param.Field[ChatCompletionNamedToolChoiceType] `json:"type,required"`
}

Specifies a tool the model should use. Use to force the model to call a specific function.

func (ChatCompletionNamedToolChoiceParam) MarshalJSON

func (r ChatCompletionNamedToolChoiceParam) MarshalJSON() (data []byte, err error)

type ChatCompletionNamedToolChoiceType

type ChatCompletionNamedToolChoiceType string

The type of the tool. Currently, only `function` is supported.

const (
	ChatCompletionNamedToolChoiceTypeFunction ChatCompletionNamedToolChoiceType = "function"
)

func (ChatCompletionNamedToolChoiceType) IsKnown

type ChatCompletionNewParams

type ChatCompletionNewParams struct {
	// A list of messages comprising the conversation so far.
	// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
	Messages param.Field[[]ChatCompletionMessageParamUnion] `json:"messages,required"`
	// ID of the model to use. See the
	// [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
	// table for details on which models work with the Chat API.
	Model param.Field[ChatModel] `json:"model,required"`
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on their
	// existing frequency in the text so far, decreasing the model's likelihood to
	// repeat the same line verbatim.
	//
	// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
	FrequencyPenalty param.Field[float64] `json:"frequency_penalty"`
	// Deprecated in favor of `tool_choice`.
	//
	// Controls which (if any) function is called by the model. `none` means the model
	// will not call a function and instead generates a message. `auto` means the model
	// can pick between generating a message or calling a function. Specifying a
	// particular function via `{"name": "my_function"}` forces the model to call that
	// function.
	//
	// `none` is the default when no functions are present. `auto` is the default if
	// functions are present.
	FunctionCall param.Field[ChatCompletionNewParamsFunctionCallUnion] `json:"function_call"`
	// Deprecated in favor of `tools`.
	//
	// A list of functions the model may generate JSON inputs for.
	Functions param.Field[[]ChatCompletionNewParamsFunction] `json:"functions"`
	// Modify the likelihood of specified tokens appearing in the completion.
	//
	// Accepts a JSON object that maps tokens (specified by their token ID in the
	// tokenizer) to an associated bias value from -100 to 100. Mathematically, the
	// bias is added to the logits generated by the model prior to sampling. The exact
	// effect will vary per model, but values between -1 and 1 should decrease or
	// increase likelihood of selection; values like -100 or 100 should result in a ban
	// or exclusive selection of the relevant token.
	LogitBias param.Field[map[string]int64] `json:"logit_bias"`
	// Whether to return log probabilities of the output tokens or not. If true,
	// returns the log probabilities of each output token returned in the `content` of
	// `message`.
	Logprobs param.Field[bool] `json:"logprobs"`
	// An upper bound for the number of tokens that can be generated for a completion,
	// including visible output tokens and
	// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
	MaxCompletionTokens param.Field[int64] `json:"max_completion_tokens"`
	// The maximum number of [tokens](/tokenizer) that can be generated in the chat
	// completion. This value can be used to control
	// [costs](https://openai.com/api/pricing/) for text generated via API.
	//
	// This value is now deprecated in favor of `max_completion_tokens`, and is not
	// compatible with
	// [o1 series models](https://platform.openai.com/docs/guides/reasoning).
	MaxTokens param.Field[int64] `json:"max_tokens"`
	// How many chat completion choices to generate for each input message. Note that
	// you will be charged based on the number of generated tokens across all of the
	// choices. Keep `n` as `1` to minimize costs.
	N param.Field[int64] `json:"n"`
	// Whether to enable
	// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
	// during tool use.
	ParallelToolCalls param.Field[bool] `json:"parallel_tool_calls"`
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on
	// whether they appear in the text so far, increasing the model's likelihood to
	// talk about new topics.
	//
	// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
	PresencePenalty param.Field[float64] `json:"presence_penalty"`
	// An object specifying the format that the model must output. Compatible with
	// [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
	// [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
	// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
	// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
	//
	// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
	// Outputs which ensures the model will match your supplied JSON schema. Learn more
	// in the
	// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
	//
	// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
	// message the model generates is valid JSON.
	//
	// **Important:** when using JSON mode, you **must** also instruct the model to
	// produce JSON yourself via a system or user message. Without this, the model may
	// generate an unending stream of whitespace until the generation reaches the token
	// limit, resulting in a long-running and seemingly "stuck" request. Also note that
	// the message content may be partially cut off if `finish_reason="length"`, which
	// indicates the generation exceeded `max_tokens` or the conversation exceeded the
	// max context length.
	ResponseFormat param.Field[ChatCompletionNewParamsResponseFormatUnion] `json:"response_format"`
	// This feature is in Beta. If specified, our system will make a best effort to
	// sample deterministically, such that repeated requests with the same `seed` and
	// parameters should return the same result. Determinism is not guaranteed, and you
	// should refer to the `system_fingerprint` response parameter to monitor changes
	// in the backend.
	Seed param.Field[int64] `json:"seed"`
	// Specifies the latency tier to use for processing the request. This parameter is
	// relevant for customers subscribed to the scale tier service:
	//
	//   - If set to 'auto', and the Project is Scale tier enabled, the system will
	//     utilize scale tier credits until they are exhausted.
	//   - If set to 'auto', and the Project is not Scale tier enabled, the request will
	//     be processed using the default service tier with a lower uptime SLA and no
	//     latency guarentee.
	//   - If set to 'default', the request will be processed using the default service
	//     tier with a lower uptime SLA and no latency guarentee.
	//   - When not set, the default behavior is 'auto'.
	//
	// When this parameter is set, the response body will include the `service_tier`
	// utilized.
	ServiceTier param.Field[ChatCompletionNewParamsServiceTier] `json:"service_tier"`
	// Up to 4 sequences where the API will stop generating further tokens.
	Stop param.Field[ChatCompletionNewParamsStopUnion] `json:"stop"`
	// Options for streaming response. Only set this when you set `stream: true`.
	StreamOptions param.Field[ChatCompletionStreamOptionsParam] `json:"stream_options"`
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
	// make the output more random, while lower values like 0.2 will make it more
	// focused and deterministic.
	//
	// We generally recommend altering this or `top_p` but not both.
	Temperature param.Field[float64] `json:"temperature"`
	// Controls which (if any) tool is called by the model. `none` means the model will
	// not call any tool and instead generates a message. `auto` means the model can
	// pick between generating a message or calling one or more tools. `required` means
	// the model must call one or more tools. Specifying a particular tool via
	// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
	// call that tool.
	//
	// `none` is the default when no tools are present. `auto` is the default if tools
	// are present.
	ToolChoice param.Field[ChatCompletionToolChoiceOptionUnionParam] `json:"tool_choice"`
	// A list of tools the model may call. Currently, only functions are supported as a
	// tool. Use this to provide a list of functions the model may generate JSON inputs
	// for. A max of 128 functions are supported.
	Tools param.Field[[]ChatCompletionToolParam] `json:"tools"`
	// An integer between 0 and 20 specifying the number of most likely tokens to
	// return at each token position, each with an associated log probability.
	// `logprobs` must be set to `true` if this parameter is used.
	TopLogprobs param.Field[int64] `json:"top_logprobs"`
	// An alternative to sampling with temperature, called nucleus sampling, where the
	// model considers the results of the tokens with top_p probability mass. So 0.1
	// means only the tokens comprising the top 10% probability mass are considered.
	//
	// We generally recommend altering this or `temperature` but not both.
	TopP param.Field[float64] `json:"top_p"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor
	// and detect abuse.
	// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
	User param.Field[string] `json:"user"`
}

func (ChatCompletionNewParams) MarshalJSON

func (r ChatCompletionNewParams) MarshalJSON() (data []byte, err error)

type ChatCompletionNewParamsFunction

type ChatCompletionNewParamsFunction struct {
	// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
	// underscores and dashes, with a maximum length of 64.
	Name param.Field[string] `json:"name,required"`
	// A description of what the function does, used by the model to choose when and
	// how to call the function.
	Description param.Field[string] `json:"description"`
	// The parameters the functions accepts, described as a JSON Schema object. See the
	// [guide](https://platform.openai.com/docs/guides/function-calling) for examples,
	// and the
	// [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
	// documentation about the format.
	//
	// Omitting `parameters` defines a function with an empty parameter list.
	Parameters param.Field[shared.FunctionParameters] `json:"parameters"`
}

func (ChatCompletionNewParamsFunction) MarshalJSON

func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error)

type ChatCompletionNewParamsFunctionCallString

type ChatCompletionNewParamsFunctionCallString string

`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.

const (
	ChatCompletionNewParamsFunctionCallStringNone ChatCompletionNewParamsFunctionCallString = "none"
	ChatCompletionNewParamsFunctionCallStringAuto ChatCompletionNewParamsFunctionCallString = "auto"
)

func (ChatCompletionNewParamsFunctionCallString) IsKnown

type ChatCompletionNewParamsFunctionCallUnion

type ChatCompletionNewParamsFunctionCallUnion interface {
	// contains filtered or unexported methods
}

Deprecated in favor of `tool_choice`.

Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.

`none` is the default when no functions are present. `auto` is the default if functions are present.

Satisfied by ChatCompletionNewParamsFunctionCallString, ChatCompletionFunctionCallOptionParam.

type ChatCompletionNewParamsResponseFormat

type ChatCompletionNewParamsResponseFormat struct {
	// The type of response format being defined: `text`
	Type       param.Field[ChatCompletionNewParamsResponseFormatType] `json:"type,required"`
	JSONSchema param.Field[interface{}]                               `json:"json_schema,required"`
}

An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.

**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.

func (ChatCompletionNewParamsResponseFormat) ImplementsChatCompletionNewParamsResponseFormatUnion

func (r ChatCompletionNewParamsResponseFormat) ImplementsChatCompletionNewParamsResponseFormatUnion()

func (ChatCompletionNewParamsResponseFormat) MarshalJSON

func (r ChatCompletionNewParamsResponseFormat) MarshalJSON() (data []byte, err error)

type ChatCompletionNewParamsResponseFormatType

type ChatCompletionNewParamsResponseFormatType string

The type of response format being defined: `text`

const (
	ChatCompletionNewParamsResponseFormatTypeText       ChatCompletionNewParamsResponseFormatType = "text"
	ChatCompletionNewParamsResponseFormatTypeJSONObject ChatCompletionNewParamsResponseFormatType = "json_object"
	ChatCompletionNewParamsResponseFormatTypeJSONSchema ChatCompletionNewParamsResponseFormatType = "json_schema"
)

func (ChatCompletionNewParamsResponseFormatType) IsKnown

type ChatCompletionNewParamsResponseFormatUnion

type ChatCompletionNewParamsResponseFormatUnion interface {
	ImplementsChatCompletionNewParamsResponseFormatUnion()
}

An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.

**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.

Satisfied by shared.ResponseFormatTextParam, shared.ResponseFormatJSONObjectParam, shared.ResponseFormatJSONSchemaParam, ChatCompletionNewParamsResponseFormat.

type ChatCompletionNewParamsServiceTier

type ChatCompletionNewParamsServiceTier string

Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:

  • If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
  • If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
  • If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
  • When not set, the default behavior is 'auto'.

When this parameter is set, the response body will include the `service_tier` utilized.

const (
	ChatCompletionNewParamsServiceTierAuto    ChatCompletionNewParamsServiceTier = "auto"
	ChatCompletionNewParamsServiceTierDefault ChatCompletionNewParamsServiceTier = "default"
)

func (ChatCompletionNewParamsServiceTier) IsKnown

type ChatCompletionNewParamsStopArray

type ChatCompletionNewParamsStopArray []string

func (ChatCompletionNewParamsStopArray) ImplementsChatCompletionNewParamsStopUnion

func (r ChatCompletionNewParamsStopArray) ImplementsChatCompletionNewParamsStopUnion()

type ChatCompletionNewParamsStopUnion

type ChatCompletionNewParamsStopUnion interface {
	ImplementsChatCompletionNewParamsStopUnion()
}

Up to 4 sequences where the API will stop generating further tokens.

Satisfied by shared.UnionString, ChatCompletionNewParamsStopArray.

type ChatCompletionObject

type ChatCompletionObject string

The object type, which is always `chat.completion`.

const (
	ChatCompletionObjectChatCompletion ChatCompletionObject = "chat.completion"
)

func (ChatCompletionObject) IsKnown

func (r ChatCompletionObject) IsKnown() bool

type ChatCompletionService

type ChatCompletionService struct {
	Options []option.RequestOption
}

ChatCompletionService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewChatCompletionService method instead.

func NewChatCompletionService

func NewChatCompletionService(opts ...option.RequestOption) (r *ChatCompletionService)

NewChatCompletionService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*ChatCompletionService) New

Creates a model response for the given chat conversation.

func (*ChatCompletionService) NewStreaming

Creates a model response for the given chat conversation.

type ChatCompletionServiceTier

type ChatCompletionServiceTier string

The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.

const (
	ChatCompletionServiceTierScale   ChatCompletionServiceTier = "scale"
	ChatCompletionServiceTierDefault ChatCompletionServiceTier = "default"
)

func (ChatCompletionServiceTier) IsKnown

func (r ChatCompletionServiceTier) IsKnown() bool

type ChatCompletionStreamOptionsParam

type ChatCompletionStreamOptionsParam struct {
	// If set, an additional chunk will be streamed before the `data: [DONE]` message.
	// The `usage` field on this chunk shows the token usage statistics for the entire
	// request, and the `choices` field will always be an empty array. All other chunks
	// will also include a `usage` field, but with a null value.
	IncludeUsage param.Field[bool] `json:"include_usage"`
}

Options for streaming response. Only set this when you set `stream: true`.

func (ChatCompletionStreamOptionsParam) MarshalJSON

func (r ChatCompletionStreamOptionsParam) MarshalJSON() (data []byte, err error)

type ChatCompletionSystemMessageParam

type ChatCompletionSystemMessageParam struct {
	// The contents of the system message.
	Content param.Field[[]ChatCompletionContentPartTextParam] `json:"content,required"`
	// The role of the messages author, in this case `system`.
	Role param.Field[ChatCompletionSystemMessageParamRole] `json:"role,required"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name param.Field[string] `json:"name"`
}

func (ChatCompletionSystemMessageParam) MarshalJSON

func (r ChatCompletionSystemMessageParam) MarshalJSON() (data []byte, err error)

type ChatCompletionSystemMessageParamRole

type ChatCompletionSystemMessageParamRole string

The role of the messages author, in this case `system`.

const (
	ChatCompletionSystemMessageParamRoleSystem ChatCompletionSystemMessageParamRole = "system"
)

func (ChatCompletionSystemMessageParamRole) IsKnown

type ChatCompletionTokenLogprob

type ChatCompletionTokenLogprob struct {
	// The token.
	Token string `json:"token,required"`
	// A list of integers representing the UTF-8 bytes representation of the token.
	// Useful in instances where characters are represented by multiple tokens and
	// their byte representations must be combined to generate the correct text
	// representation. Can be `null` if there is no bytes representation for the token.
	Bytes []int64 `json:"bytes,required,nullable"`
	// The log probability of this token, if it is within the top 20 most likely
	// tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
	// unlikely.
	Logprob float64 `json:"logprob,required"`
	// List of the most likely tokens and their log probability, at this token
	// position. In rare cases, there may be fewer than the number of requested
	// `top_logprobs` returned.
	TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs,required"`
	JSON        chatCompletionTokenLogprobJSON         `json:"-"`
}

func (*ChatCompletionTokenLogprob) UnmarshalJSON

func (r *ChatCompletionTokenLogprob) UnmarshalJSON(data []byte) (err error)

type ChatCompletionTokenLogprobTopLogprob

type ChatCompletionTokenLogprobTopLogprob struct {
	// The token.
	Token string `json:"token,required"`
	// A list of integers representing the UTF-8 bytes representation of the token.
	// Useful in instances where characters are represented by multiple tokens and
	// their byte representations must be combined to generate the correct text
	// representation. Can be `null` if there is no bytes representation for the token.
	Bytes []int64 `json:"bytes,required,nullable"`
	// The log probability of this token, if it is within the top 20 most likely
	// tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
	// unlikely.
	Logprob float64                                  `json:"logprob,required"`
	JSON    chatCompletionTokenLogprobTopLogprobJSON `json:"-"`
}

func (*ChatCompletionTokenLogprobTopLogprob) UnmarshalJSON

func (r *ChatCompletionTokenLogprobTopLogprob) UnmarshalJSON(data []byte) (err error)

type ChatCompletionToolChoiceOptionString

type ChatCompletionToolChoiceOptionString string

`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.

const (
	ChatCompletionToolChoiceOptionStringNone     ChatCompletionToolChoiceOptionString = "none"
	ChatCompletionToolChoiceOptionStringAuto     ChatCompletionToolChoiceOptionString = "auto"
	ChatCompletionToolChoiceOptionStringRequired ChatCompletionToolChoiceOptionString = "required"
)

func (ChatCompletionToolChoiceOptionString) IsKnown

type ChatCompletionToolChoiceOptionUnionParam

type ChatCompletionToolChoiceOptionUnionParam interface {
	// contains filtered or unexported methods
}

Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.

`none` is the default when no tools are present. `auto` is the default if tools are present.

Satisfied by ChatCompletionToolChoiceOptionString, ChatCompletionNamedToolChoiceParam.

type ChatCompletionToolMessageParam

type ChatCompletionToolMessageParam struct {
	// The contents of the tool message.
	Content param.Field[[]ChatCompletionContentPartTextParam] `json:"content,required"`
	// The role of the messages author, in this case `tool`.
	Role param.Field[ChatCompletionToolMessageParamRole] `json:"role,required"`
	// Tool call that this message is responding to.
	ToolCallID param.Field[string] `json:"tool_call_id,required"`
}

func ToolMessage

func ToolMessage(toolCallID, content string) ChatCompletionToolMessageParam

func (ChatCompletionToolMessageParam) MarshalJSON

func (r ChatCompletionToolMessageParam) MarshalJSON() (data []byte, err error)

type ChatCompletionToolMessageParamRole

type ChatCompletionToolMessageParamRole string

The role of the messages author, in this case `tool`.

const (
	ChatCompletionToolMessageParamRoleTool ChatCompletionToolMessageParamRole = "tool"
)

func (ChatCompletionToolMessageParamRole) IsKnown

type ChatCompletionToolParam

type ChatCompletionToolParam struct {
	Function param.Field[shared.FunctionDefinitionParam] `json:"function,required"`
	// The type of the tool. Currently, only `function` is supported.
	Type param.Field[ChatCompletionToolType] `json:"type,required"`
}

func (ChatCompletionToolParam) MarshalJSON

func (r ChatCompletionToolParam) MarshalJSON() (data []byte, err error)

type ChatCompletionToolType

type ChatCompletionToolType string

The type of the tool. Currently, only `function` is supported.

const (
	ChatCompletionToolTypeFunction ChatCompletionToolType = "function"
)

func (ChatCompletionToolType) IsKnown

func (r ChatCompletionToolType) IsKnown() bool

type ChatCompletionUserMessageParam

type ChatCompletionUserMessageParam struct {
	// The contents of the user message.
	Content param.Field[[]ChatCompletionContentPartUnionParam] `json:"content,required"`
	// The role of the messages author, in this case `user`.
	Role param.Field[ChatCompletionUserMessageParamRole] `json:"role,required"`
	// An optional name for the participant. Provides the model information to
	// differentiate between participants of the same role.
	Name param.Field[string] `json:"name"`
}

func (ChatCompletionUserMessageParam) MarshalJSON

func (r ChatCompletionUserMessageParam) MarshalJSON() (data []byte, err error)

type ChatCompletionUserMessageParamRole

type ChatCompletionUserMessageParamRole string

The role of the messages author, in this case `user`.

const (
	ChatCompletionUserMessageParamRoleUser ChatCompletionUserMessageParamRole = "user"
)

func (ChatCompletionUserMessageParamRole) IsKnown

type ChatModel

type ChatModel = string
const (
	ChatModelO1Preview           ChatModel = "o1-preview"
	ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12"
	ChatModelO1Mini              ChatModel = "o1-mini"
	ChatModelO1Mini2024_09_12    ChatModel = "o1-mini-2024-09-12"
	ChatModelGPT4o               ChatModel = "gpt-4o"
	ChatModelGPT4o2024_08_06     ChatModel = "gpt-4o-2024-08-06"
	ChatModelGPT4o2024_05_13     ChatModel = "gpt-4o-2024-05-13"
	ChatModelChatgpt4oLatest     ChatModel = "chatgpt-4o-latest"
	ChatModelGPT4oMini           ChatModel = "gpt-4o-mini"
	ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18"
	ChatModelGPT4Turbo           ChatModel = "gpt-4-turbo"
	ChatModelGPT4Turbo2024_04_09 ChatModel = "gpt-4-turbo-2024-04-09"
	ChatModelGPT4_0125Preview    ChatModel = "gpt-4-0125-preview"
	ChatModelGPT4TurboPreview    ChatModel = "gpt-4-turbo-preview"
	ChatModelGPT4_1106Preview    ChatModel = "gpt-4-1106-preview"
	ChatModelGPT4VisionPreview   ChatModel = "gpt-4-vision-preview"
	ChatModelGPT4                ChatModel = "gpt-4"
	ChatModelGPT4_0314           ChatModel = "gpt-4-0314"
	ChatModelGPT4_0613           ChatModel = "gpt-4-0613"
	ChatModelGPT4_32k            ChatModel = "gpt-4-32k"
	ChatModelGPT4_32k0314        ChatModel = "gpt-4-32k-0314"
	ChatModelGPT4_32k0613        ChatModel = "gpt-4-32k-0613"
	ChatModelGPT3_5Turbo         ChatModel = "gpt-3.5-turbo"
	ChatModelGPT3_5Turbo16k      ChatModel = "gpt-3.5-turbo-16k"
	ChatModelGPT3_5Turbo0301     ChatModel = "gpt-3.5-turbo-0301"
	ChatModelGPT3_5Turbo0613     ChatModel = "gpt-3.5-turbo-0613"
	ChatModelGPT3_5Turbo1106     ChatModel = "gpt-3.5-turbo-1106"
	ChatModelGPT3_5Turbo0125     ChatModel = "gpt-3.5-turbo-0125"
	ChatModelGPT3_5Turbo16k0613  ChatModel = "gpt-3.5-turbo-16k-0613"
)

type ChatService

type ChatService struct {
	Options     []option.RequestOption
	Completions *ChatCompletionService
}

ChatService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewChatService method instead.

func NewChatService

func NewChatService(opts ...option.RequestOption) (r *ChatService)

NewChatService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

type Client

type Client struct {
	Options     []option.RequestOption
	Completions *CompletionService
	Chat        *ChatService
	Embeddings  *EmbeddingService
	Files       *FileService
	Images      *ImageService
	Audio       *AudioService
	Moderations *ModerationService
	Models      *ModelService
	FineTuning  *FineTuningService
	Beta        *BetaService
	Batches     *BatchService
	Uploads     *UploadService
}

Client creates a struct with services and top level methods that help with interacting with the openai API. You should not instantiate this client directly, and instead use the NewClient method instead.

func NewClient

func NewClient(opts ...option.RequestOption) (r *Client)

NewClient generates a new client with the default option read from the environment (OPENAI_API_KEY, OPENAI_ORG_ID, OPENAI_PROJECT_ID). The option passed in as arguments are applied after these default arguments, and all option will be passed down to the services and requests that this client makes.

func (*Client) Delete

func (r *Client) Delete(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error

Delete makes a DELETE request with the given URL, params, and optionally deserializes to a response. See [Execute] documentation on the params and response.

func (*Client) Execute

func (r *Client) Execute(ctx context.Context, method string, path string, params interface{}, res interface{}, opts ...option.RequestOption) error

Execute makes a request with the given context, method, URL, request params, response, and request options. This is useful for hitting undocumented endpoints while retaining the base URL, auth, retries, and other options from the client.

If a byte slice or an io.Reader is supplied to params, it will be used as-is for the request body.

The params is by default serialized into the body using encoding/json. If your type implements a MarshalJSON function, it will be used instead to serialize the request. If a URLQuery method is implemented, the returned url.Values will be used as query strings to the url.

If your params struct uses param.Field, you must provide either [MarshalJSON], [URLQuery], and/or [MarshalForm] functions. It is undefined behavior to use a struct uses param.Field without specifying how it is serialized.

Any "…Params" object defined in this library can be used as the request argument. Note that 'path' arguments will not be forwarded into the url.

The response body will be deserialized into the res variable, depending on its type:

  • A pointer to a *http.Response is populated by the raw response.
  • A pointer to a byte array will be populated with the contents of the request body.
  • A pointer to any other type uses this library's default JSON decoding, which respects UnmarshalJSON if it is defined on the type.
  • A nil value will not read the response body.

For even greater flexibility, see option.WithResponseInto and option.WithResponseBodyInto.

func (*Client) Get

func (r *Client) Get(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error

Get makes a GET request with the given URL, params, and optionally deserializes to a response. See [Execute] documentation on the params and response.

func (*Client) Patch

func (r *Client) Patch(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error

Patch makes a PATCH request with the given URL, params, and optionally deserializes to a response. See [Execute] documentation on the params and response.

func (*Client) Post

func (r *Client) Post(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error

Post makes a POST request with the given URL, params, and optionally deserializes to a response. See [Execute] documentation on the params and response.

func (*Client) Put

func (r *Client) Put(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error

Put makes a PUT request with the given URL, params, and optionally deserializes to a response. See [Execute] documentation on the params and response.

type CodeInterpreterLogs

type CodeInterpreterLogs struct {
	// The index of the output in the outputs array.
	Index int64 `json:"index,required"`
	// Always `logs`.
	Type CodeInterpreterLogsType `json:"type,required"`
	// The text output from the Code Interpreter tool call.
	Logs string                  `json:"logs"`
	JSON codeInterpreterLogsJSON `json:"-"`
}

Text output from the Code Interpreter tool call as part of a run step.

func (*CodeInterpreterLogs) UnmarshalJSON

func (r *CodeInterpreterLogs) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterLogsType

type CodeInterpreterLogsType string

Always `logs`.

const (
	CodeInterpreterLogsTypeLogs CodeInterpreterLogsType = "logs"
)

func (CodeInterpreterLogsType) IsKnown

func (r CodeInterpreterLogsType) IsKnown() bool

type CodeInterpreterOutputImage

type CodeInterpreterOutputImage struct {
	// The index of the output in the outputs array.
	Index int64 `json:"index,required"`
	// Always `image`.
	Type  CodeInterpreterOutputImageType  `json:"type,required"`
	Image CodeInterpreterOutputImageImage `json:"image"`
	JSON  codeInterpreterOutputImageJSON  `json:"-"`
}

func (*CodeInterpreterOutputImage) UnmarshalJSON

func (r *CodeInterpreterOutputImage) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterOutputImageImage

type CodeInterpreterOutputImageImage struct {
	// The [file](https://platform.openai.com/docs/api-reference/files) ID of the
	// image.
	FileID string                              `json:"file_id"`
	JSON   codeInterpreterOutputImageImageJSON `json:"-"`
}

func (*CodeInterpreterOutputImageImage) UnmarshalJSON

func (r *CodeInterpreterOutputImageImage) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterOutputImageType

type CodeInterpreterOutputImageType string

Always `image`.

const (
	CodeInterpreterOutputImageTypeImage CodeInterpreterOutputImageType = "image"
)

func (CodeInterpreterOutputImageType) IsKnown

type CodeInterpreterTool

type CodeInterpreterTool struct {
	// The type of tool being defined: `code_interpreter`
	Type CodeInterpreterToolType `json:"type,required"`
	JSON codeInterpreterToolJSON `json:"-"`
}

func (*CodeInterpreterTool) UnmarshalJSON

func (r *CodeInterpreterTool) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCall

type CodeInterpreterToolCall struct {
	// The ID of the tool call.
	ID string `json:"id,required"`
	// The Code Interpreter tool call definition.
	CodeInterpreter CodeInterpreterToolCallCodeInterpreter `json:"code_interpreter,required"`
	// The type of tool call. This is always going to be `code_interpreter` for this
	// type of tool call.
	Type CodeInterpreterToolCallType `json:"type,required"`
	JSON codeInterpreterToolCallJSON `json:"-"`
}

Details of the Code Interpreter tool call the run step was involved in.

func (*CodeInterpreterToolCall) UnmarshalJSON

func (r *CodeInterpreterToolCall) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallCodeInterpreter

type CodeInterpreterToolCallCodeInterpreter struct {
	// The input to the Code Interpreter tool call.
	Input string `json:"input,required"`
	// The outputs from the Code Interpreter tool call. Code Interpreter can output one
	// or more items, including text (`logs`) or images (`image`). Each of these are
	// represented by a different object type.
	Outputs []CodeInterpreterToolCallCodeInterpreterOutput `json:"outputs,required"`
	JSON    codeInterpreterToolCallCodeInterpreterJSON     `json:"-"`
}

The Code Interpreter tool call definition.

func (*CodeInterpreterToolCallCodeInterpreter) UnmarshalJSON

func (r *CodeInterpreterToolCallCodeInterpreter) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallCodeInterpreterOutput

type CodeInterpreterToolCallCodeInterpreterOutput struct {
	// Always `logs`.
	Type CodeInterpreterToolCallCodeInterpreterOutputsType `json:"type,required"`
	// The text output from the Code Interpreter tool call.
	Logs string `json:"logs"`
	// This field can have the runtime type of
	// [CodeInterpreterToolCallCodeInterpreterOutputsImageImage].
	Image interface{}                                      `json:"image,required"`
	JSON  codeInterpreterToolCallCodeInterpreterOutputJSON `json:"-"`
	// contains filtered or unexported fields
}

Text output from the Code Interpreter tool call as part of a run step.

func (CodeInterpreterToolCallCodeInterpreterOutput) AsUnion

AsUnion returns a CodeInterpreterToolCallCodeInterpreterOutputsUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are CodeInterpreterToolCallCodeInterpreterOutputsLogs, CodeInterpreterToolCallCodeInterpreterOutputsImage.

func (*CodeInterpreterToolCallCodeInterpreterOutput) UnmarshalJSON

func (r *CodeInterpreterToolCallCodeInterpreterOutput) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallCodeInterpreterOutputsImage

type CodeInterpreterToolCallCodeInterpreterOutputsImage struct {
	Image CodeInterpreterToolCallCodeInterpreterOutputsImageImage `json:"image,required"`
	// Always `image`.
	Type CodeInterpreterToolCallCodeInterpreterOutputsImageType `json:"type,required"`
	JSON codeInterpreterToolCallCodeInterpreterOutputsImageJSON `json:"-"`
}

func (*CodeInterpreterToolCallCodeInterpreterOutputsImage) UnmarshalJSON

func (r *CodeInterpreterToolCallCodeInterpreterOutputsImage) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallCodeInterpreterOutputsImageImage

type CodeInterpreterToolCallCodeInterpreterOutputsImageImage struct {
	// The [file](https://platform.openai.com/docs/api-reference/files) ID of the
	// image.
	FileID string                                                      `json:"file_id,required"`
	JSON   codeInterpreterToolCallCodeInterpreterOutputsImageImageJSON `json:"-"`
}

func (*CodeInterpreterToolCallCodeInterpreterOutputsImageImage) UnmarshalJSON

type CodeInterpreterToolCallCodeInterpreterOutputsImageType

type CodeInterpreterToolCallCodeInterpreterOutputsImageType string

Always `image`.

const (
	CodeInterpreterToolCallCodeInterpreterOutputsImageTypeImage CodeInterpreterToolCallCodeInterpreterOutputsImageType = "image"
)

func (CodeInterpreterToolCallCodeInterpreterOutputsImageType) IsKnown

type CodeInterpreterToolCallCodeInterpreterOutputsLogs

type CodeInterpreterToolCallCodeInterpreterOutputsLogs struct {
	// The text output from the Code Interpreter tool call.
	Logs string `json:"logs,required"`
	// Always `logs`.
	Type CodeInterpreterToolCallCodeInterpreterOutputsLogsType `json:"type,required"`
	JSON codeInterpreterToolCallCodeInterpreterOutputsLogsJSON `json:"-"`
}

Text output from the Code Interpreter tool call as part of a run step.

func (*CodeInterpreterToolCallCodeInterpreterOutputsLogs) UnmarshalJSON

func (r *CodeInterpreterToolCallCodeInterpreterOutputsLogs) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallCodeInterpreterOutputsLogsType

type CodeInterpreterToolCallCodeInterpreterOutputsLogsType string

Always `logs`.

const (
	CodeInterpreterToolCallCodeInterpreterOutputsLogsTypeLogs CodeInterpreterToolCallCodeInterpreterOutputsLogsType = "logs"
)

func (CodeInterpreterToolCallCodeInterpreterOutputsLogsType) IsKnown

type CodeInterpreterToolCallCodeInterpreterOutputsType

type CodeInterpreterToolCallCodeInterpreterOutputsType string

Always `logs`.

const (
	CodeInterpreterToolCallCodeInterpreterOutputsTypeLogs  CodeInterpreterToolCallCodeInterpreterOutputsType = "logs"
	CodeInterpreterToolCallCodeInterpreterOutputsTypeImage CodeInterpreterToolCallCodeInterpreterOutputsType = "image"
)

func (CodeInterpreterToolCallCodeInterpreterOutputsType) IsKnown

type CodeInterpreterToolCallCodeInterpreterOutputsUnion

type CodeInterpreterToolCallCodeInterpreterOutputsUnion interface {
	// contains filtered or unexported methods
}

Text output from the Code Interpreter tool call as part of a run step.

Union satisfied by CodeInterpreterToolCallCodeInterpreterOutputsLogs or CodeInterpreterToolCallCodeInterpreterOutputsImage.

type CodeInterpreterToolCallDelta

type CodeInterpreterToolCallDelta struct {
	// The index of the tool call in the tool calls array.
	Index int64 `json:"index,required"`
	// The type of tool call. This is always going to be `code_interpreter` for this
	// type of tool call.
	Type CodeInterpreterToolCallDeltaType `json:"type,required"`
	// The ID of the tool call.
	ID string `json:"id"`
	// The Code Interpreter tool call definition.
	CodeInterpreter CodeInterpreterToolCallDeltaCodeInterpreter `json:"code_interpreter"`
	JSON            codeInterpreterToolCallDeltaJSON            `json:"-"`
}

Details of the Code Interpreter tool call the run step was involved in.

func (*CodeInterpreterToolCallDelta) UnmarshalJSON

func (r *CodeInterpreterToolCallDelta) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallDeltaCodeInterpreter

type CodeInterpreterToolCallDeltaCodeInterpreter struct {
	// The input to the Code Interpreter tool call.
	Input string `json:"input"`
	// The outputs from the Code Interpreter tool call. Code Interpreter can output one
	// or more items, including text (`logs`) or images (`image`). Each of these are
	// represented by a different object type.
	Outputs []CodeInterpreterToolCallDeltaCodeInterpreterOutput `json:"outputs"`
	JSON    codeInterpreterToolCallDeltaCodeInterpreterJSON     `json:"-"`
}

The Code Interpreter tool call definition.

func (*CodeInterpreterToolCallDeltaCodeInterpreter) UnmarshalJSON

func (r *CodeInterpreterToolCallDeltaCodeInterpreter) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallDeltaCodeInterpreterOutput

type CodeInterpreterToolCallDeltaCodeInterpreterOutput struct {
	// The index of the output in the outputs array.
	Index int64 `json:"index,required"`
	// Always `logs`.
	Type CodeInterpreterToolCallDeltaCodeInterpreterOutputsType `json:"type,required"`
	// The text output from the Code Interpreter tool call.
	Logs string `json:"logs"`
	// This field can have the runtime type of [CodeInterpreterOutputImageImage].
	Image interface{}                                           `json:"image,required"`
	JSON  codeInterpreterToolCallDeltaCodeInterpreterOutputJSON `json:"-"`
	// contains filtered or unexported fields
}

Text output from the Code Interpreter tool call as part of a run step.

func (CodeInterpreterToolCallDeltaCodeInterpreterOutput) AsUnion

AsUnion returns a CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are CodeInterpreterLogs, CodeInterpreterOutputImage.

func (*CodeInterpreterToolCallDeltaCodeInterpreterOutput) UnmarshalJSON

func (r *CodeInterpreterToolCallDeltaCodeInterpreterOutput) UnmarshalJSON(data []byte) (err error)

type CodeInterpreterToolCallDeltaCodeInterpreterOutputsType

type CodeInterpreterToolCallDeltaCodeInterpreterOutputsType string

Always `logs`.

const (
	CodeInterpreterToolCallDeltaCodeInterpreterOutputsTypeLogs  CodeInterpreterToolCallDeltaCodeInterpreterOutputsType = "logs"
	CodeInterpreterToolCallDeltaCodeInterpreterOutputsTypeImage CodeInterpreterToolCallDeltaCodeInterpreterOutputsType = "image"
)

func (CodeInterpreterToolCallDeltaCodeInterpreterOutputsType) IsKnown

type CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion

type CodeInterpreterToolCallDeltaCodeInterpreterOutputsUnion interface {
	// contains filtered or unexported methods
}

Text output from the Code Interpreter tool call as part of a run step.

Union satisfied by CodeInterpreterLogs or CodeInterpreterOutputImage.

type CodeInterpreterToolCallDeltaType

type CodeInterpreterToolCallDeltaType string

The type of tool call. This is always going to be `code_interpreter` for this type of tool call.

const (
	CodeInterpreterToolCallDeltaTypeCodeInterpreter CodeInterpreterToolCallDeltaType = "code_interpreter"
)

func (CodeInterpreterToolCallDeltaType) IsKnown

type CodeInterpreterToolCallType

type CodeInterpreterToolCallType string

The type of tool call. This is always going to be `code_interpreter` for this type of tool call.

const (
	CodeInterpreterToolCallTypeCodeInterpreter CodeInterpreterToolCallType = "code_interpreter"
)

func (CodeInterpreterToolCallType) IsKnown

func (r CodeInterpreterToolCallType) IsKnown() bool

type CodeInterpreterToolParam

type CodeInterpreterToolParam struct {
	// The type of tool being defined: `code_interpreter`
	Type param.Field[CodeInterpreterToolType] `json:"type,required"`
}

func (CodeInterpreterToolParam) MarshalJSON

func (r CodeInterpreterToolParam) MarshalJSON() (data []byte, err error)

type CodeInterpreterToolType

type CodeInterpreterToolType string

The type of tool being defined: `code_interpreter`

const (
	CodeInterpreterToolTypeCodeInterpreter CodeInterpreterToolType = "code_interpreter"
)

func (CodeInterpreterToolType) IsKnown

func (r CodeInterpreterToolType) IsKnown() bool

type Completion

type Completion struct {
	// A unique identifier for the completion.
	ID string `json:"id,required"`
	// The list of completion choices the model generated for the input prompt.
	Choices []CompletionChoice `json:"choices,required"`
	// The Unix timestamp (in seconds) of when the completion was created.
	Created int64 `json:"created,required"`
	// The model used for completion.
	Model string `json:"model,required"`
	// The object type, which is always "text_completion"
	Object CompletionObject `json:"object,required"`
	// This fingerprint represents the backend configuration that the model runs with.
	//
	// Can be used in conjunction with the `seed` request parameter to understand when
	// backend changes have been made that might impact determinism.
	SystemFingerprint string `json:"system_fingerprint"`
	// Usage statistics for the completion request.
	Usage CompletionUsage `json:"usage"`
	JSON  completionJSON  `json:"-"`
}

Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).

func (*Completion) UnmarshalJSON

func (r *Completion) UnmarshalJSON(data []byte) (err error)

type CompletionChoice

type CompletionChoice struct {
	// The reason the model stopped generating tokens. This will be `stop` if the model
	// hit a natural stop point or a provided stop sequence, `length` if the maximum
	// number of tokens specified in the request was reached, or `content_filter` if
	// content was omitted due to a flag from our content filters.
	FinishReason CompletionChoiceFinishReason `json:"finish_reason,required"`
	Index        int64                        `json:"index,required"`
	Logprobs     CompletionChoiceLogprobs     `json:"logprobs,required,nullable"`
	Text         string                       `json:"text,required"`
	JSON         completionChoiceJSON         `json:"-"`
}

func (*CompletionChoice) UnmarshalJSON

func (r *CompletionChoice) UnmarshalJSON(data []byte) (err error)

type CompletionChoiceFinishReason

type CompletionChoiceFinishReason string

The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters.

const (
	CompletionChoiceFinishReasonStop          CompletionChoiceFinishReason = "stop"
	CompletionChoiceFinishReasonLength        CompletionChoiceFinishReason = "length"
	CompletionChoiceFinishReasonContentFilter CompletionChoiceFinishReason = "content_filter"
)

func (CompletionChoiceFinishReason) IsKnown

func (r CompletionChoiceFinishReason) IsKnown() bool

type CompletionChoiceLogprobs

type CompletionChoiceLogprobs struct {
	TextOffset    []int64                      `json:"text_offset"`
	TokenLogprobs []float64                    `json:"token_logprobs"`
	Tokens        []string                     `json:"tokens"`
	TopLogprobs   []map[string]float64         `json:"top_logprobs"`
	JSON          completionChoiceLogprobsJSON `json:"-"`
}

func (*CompletionChoiceLogprobs) UnmarshalJSON

func (r *CompletionChoiceLogprobs) UnmarshalJSON(data []byte) (err error)

type CompletionNewParams

type CompletionNewParams struct {
	// ID of the model to use. You can use the
	// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
	// see all of your available models, or see our
	// [Model overview](https://platform.openai.com/docs/models/overview) for
	// descriptions of them.
	Model param.Field[CompletionNewParamsModel] `json:"model,required"`
	// The prompt(s) to generate completions for, encoded as a string, array of
	// strings, array of tokens, or array of token arrays.
	//
	// Note that <|endoftext|> is the document separator that the model sees during
	// training, so if a prompt is not specified the model will generate as if from the
	// beginning of a new document.
	Prompt param.Field[CompletionNewParamsPromptUnion] `json:"prompt,required"`
	// Generates `best_of` completions server-side and returns the "best" (the one with
	// the highest log probability per token). Results cannot be streamed.
	//
	// When used with `n`, `best_of` controls the number of candidate completions and
	// `n` specifies how many to return – `best_of` must be greater than `n`.
	//
	// **Note:** Because this parameter generates many completions, it can quickly
	// consume your token quota. Use carefully and ensure that you have reasonable
	// settings for `max_tokens` and `stop`.
	BestOf param.Field[int64] `json:"best_of"`
	// Echo back the prompt in addition to the completion
	Echo param.Field[bool] `json:"echo"`
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on their
	// existing frequency in the text so far, decreasing the model's likelihood to
	// repeat the same line verbatim.
	//
	// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
	FrequencyPenalty param.Field[float64] `json:"frequency_penalty"`
	// Modify the likelihood of specified tokens appearing in the completion.
	//
	// Accepts a JSON object that maps tokens (specified by their token ID in the GPT
	// tokenizer) to an associated bias value from -100 to 100. You can use this
	// [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
	// Mathematically, the bias is added to the logits generated by the model prior to
	// sampling. The exact effect will vary per model, but values between -1 and 1
	// should decrease or increase likelihood of selection; values like -100 or 100
	// should result in a ban or exclusive selection of the relevant token.
	//
	// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
	// from being generated.
	LogitBias param.Field[map[string]int64] `json:"logit_bias"`
	// Include the log probabilities on the `logprobs` most likely output tokens, as
	// well the chosen tokens. For example, if `logprobs` is 5, the API will return a
	// list of the 5 most likely tokens. The API will always return the `logprob` of
	// the sampled token, so there may be up to `logprobs+1` elements in the response.
	//
	// The maximum value for `logprobs` is 5.
	Logprobs param.Field[int64] `json:"logprobs"`
	// The maximum number of [tokens](/tokenizer) that can be generated in the
	// completion.
	//
	// The token count of your prompt plus `max_tokens` cannot exceed the model's
	// context length.
	// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
	// for counting tokens.
	MaxTokens param.Field[int64] `json:"max_tokens"`
	// How many completions to generate for each prompt.
	//
	// **Note:** Because this parameter generates many completions, it can quickly
	// consume your token quota. Use carefully and ensure that you have reasonable
	// settings for `max_tokens` and `stop`.
	N param.Field[int64] `json:"n"`
	// Number between -2.0 and 2.0. Positive values penalize new tokens based on
	// whether they appear in the text so far, increasing the model's likelihood to
	// talk about new topics.
	//
	// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
	PresencePenalty param.Field[float64] `json:"presence_penalty"`
	// If specified, our system will make a best effort to sample deterministically,
	// such that repeated requests with the same `seed` and parameters should return
	// the same result.
	//
	// Determinism is not guaranteed, and you should refer to the `system_fingerprint`
	// response parameter to monitor changes in the backend.
	Seed param.Field[int64] `json:"seed"`
	// Up to 4 sequences where the API will stop generating further tokens. The
	// returned text will not contain the stop sequence.
	Stop param.Field[CompletionNewParamsStopUnion] `json:"stop"`
	// Options for streaming response. Only set this when you set `stream: true`.
	StreamOptions param.Field[ChatCompletionStreamOptionsParam] `json:"stream_options"`
	// The suffix that comes after a completion of inserted text.
	//
	// This parameter is only supported for `gpt-3.5-turbo-instruct`.
	Suffix param.Field[string] `json:"suffix"`
	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
	// make the output more random, while lower values like 0.2 will make it more
	// focused and deterministic.
	//
	// We generally recommend altering this or `top_p` but not both.
	Temperature param.Field[float64] `json:"temperature"`
	// An alternative to sampling with temperature, called nucleus sampling, where the
	// model considers the results of the tokens with top_p probability mass. So 0.1
	// means only the tokens comprising the top 10% probability mass are considered.
	//
	// We generally recommend altering this or `temperature` but not both.
	TopP param.Field[float64] `json:"top_p"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor
	// and detect abuse.
	// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
	User param.Field[string] `json:"user"`
}

func (CompletionNewParams) MarshalJSON

func (r CompletionNewParams) MarshalJSON() (data []byte, err error)

type CompletionNewParamsModel

type CompletionNewParamsModel string
const (
	CompletionNewParamsModelGPT3_5TurboInstruct CompletionNewParamsModel = "gpt-3.5-turbo-instruct"
	CompletionNewParamsModelDavinci002          CompletionNewParamsModel = "davinci-002"
	CompletionNewParamsModelBabbage002          CompletionNewParamsModel = "babbage-002"
)

func (CompletionNewParamsModel) IsKnown

func (r CompletionNewParamsModel) IsKnown() bool

type CompletionNewParamsPromptArrayOfStrings

type CompletionNewParamsPromptArrayOfStrings []string

func (CompletionNewParamsPromptArrayOfStrings) ImplementsCompletionNewParamsPromptUnion

func (r CompletionNewParamsPromptArrayOfStrings) ImplementsCompletionNewParamsPromptUnion()

type CompletionNewParamsPromptArrayOfTokenArrays

type CompletionNewParamsPromptArrayOfTokenArrays [][]int64

func (CompletionNewParamsPromptArrayOfTokenArrays) ImplementsCompletionNewParamsPromptUnion

func (r CompletionNewParamsPromptArrayOfTokenArrays) ImplementsCompletionNewParamsPromptUnion()

type CompletionNewParamsPromptArrayOfTokens

type CompletionNewParamsPromptArrayOfTokens []int64

func (CompletionNewParamsPromptArrayOfTokens) ImplementsCompletionNewParamsPromptUnion

func (r CompletionNewParamsPromptArrayOfTokens) ImplementsCompletionNewParamsPromptUnion()

type CompletionNewParamsPromptUnion

type CompletionNewParamsPromptUnion interface {
	ImplementsCompletionNewParamsPromptUnion()
}

The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.

Satisfied by shared.UnionString, CompletionNewParamsPromptArrayOfStrings, CompletionNewParamsPromptArrayOfTokens, CompletionNewParamsPromptArrayOfTokenArrays.

type CompletionNewParamsStopArray

type CompletionNewParamsStopArray []string

func (CompletionNewParamsStopArray) ImplementsCompletionNewParamsStopUnion

func (r CompletionNewParamsStopArray) ImplementsCompletionNewParamsStopUnion()

type CompletionNewParamsStopUnion

type CompletionNewParamsStopUnion interface {
	ImplementsCompletionNewParamsStopUnion()
}

Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.

Satisfied by shared.UnionString, CompletionNewParamsStopArray.

type CompletionObject

type CompletionObject string

The object type, which is always "text_completion"

const (
	CompletionObjectTextCompletion CompletionObject = "text_completion"
)

func (CompletionObject) IsKnown

func (r CompletionObject) IsKnown() bool

type CompletionService

type CompletionService struct {
	Options []option.RequestOption
}

CompletionService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewCompletionService method instead.

func NewCompletionService

func NewCompletionService(opts ...option.RequestOption) (r *CompletionService)

NewCompletionService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*CompletionService) New

Creates a completion for the provided prompt and parameters.

func (*CompletionService) NewStreaming

func (r *CompletionService) NewStreaming(ctx context.Context, body CompletionNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[Completion])

Creates a completion for the provided prompt and parameters.

type CompletionUsage

type CompletionUsage struct {
	// Number of tokens in the generated completion.
	CompletionTokens int64 `json:"completion_tokens,required"`
	// Number of tokens in the prompt.
	PromptTokens int64 `json:"prompt_tokens,required"`
	// Total number of tokens used in the request (prompt + completion).
	TotalTokens int64 `json:"total_tokens,required"`
	// Breakdown of tokens used in a completion.
	CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
	JSON                    completionUsageJSON                    `json:"-"`
}

Usage statistics for the completion request.

func (*CompletionUsage) UnmarshalJSON

func (r *CompletionUsage) UnmarshalJSON(data []byte) (err error)

type CompletionUsageCompletionTokensDetails

type CompletionUsageCompletionTokensDetails struct {
	// Tokens generated by the model for reasoning.
	ReasoningTokens int64                                      `json:"reasoning_tokens"`
	JSON            completionUsageCompletionTokensDetailsJSON `json:"-"`
}

Breakdown of tokens used in a completion.

func (*CompletionUsageCompletionTokensDetails) UnmarshalJSON

func (r *CompletionUsageCompletionTokensDetails) UnmarshalJSON(data []byte) (err error)

type CreateEmbeddingResponse

type CreateEmbeddingResponse struct {
	// The list of embeddings generated by the model.
	Data []Embedding `json:"data,required"`
	// The name of the model used to generate the embedding.
	Model string `json:"model,required"`
	// The object type, which is always "list".
	Object CreateEmbeddingResponseObject `json:"object,required"`
	// The usage information for the request.
	Usage CreateEmbeddingResponseUsage `json:"usage,required"`
	JSON  createEmbeddingResponseJSON  `json:"-"`
}

func (*CreateEmbeddingResponse) UnmarshalJSON

func (r *CreateEmbeddingResponse) UnmarshalJSON(data []byte) (err error)

type CreateEmbeddingResponseObject

type CreateEmbeddingResponseObject string

The object type, which is always "list".

const (
	CreateEmbeddingResponseObjectList CreateEmbeddingResponseObject = "list"
)

func (CreateEmbeddingResponseObject) IsKnown

func (r CreateEmbeddingResponseObject) IsKnown() bool

type CreateEmbeddingResponseUsage

type CreateEmbeddingResponseUsage struct {
	// The number of tokens used by the prompt.
	PromptTokens int64 `json:"prompt_tokens,required"`
	// The total number of tokens used by the request.
	TotalTokens int64                            `json:"total_tokens,required"`
	JSON        createEmbeddingResponseUsageJSON `json:"-"`
}

The usage information for the request.

func (*CreateEmbeddingResponseUsage) UnmarshalJSON

func (r *CreateEmbeddingResponseUsage) UnmarshalJSON(data []byte) (err error)

type Embedding

type Embedding struct {
	// The embedding vector, which is a list of floats. The length of vector depends on
	// the model as listed in the
	// [embedding guide](https://platform.openai.com/docs/guides/embeddings).
	Embedding []float64 `json:"embedding,required"`
	// The index of the embedding in the list of embeddings.
	Index int64 `json:"index,required"`
	// The object type, which is always "embedding".
	Object EmbeddingObject `json:"object,required"`
	JSON   embeddingJSON   `json:"-"`
}

Represents an embedding vector returned by embedding endpoint.

func (*Embedding) UnmarshalJSON

func (r *Embedding) UnmarshalJSON(data []byte) (err error)

type EmbeddingNewParams

type EmbeddingNewParams struct {
	// Input text to embed, encoded as a string or array of tokens. To embed multiple
	// inputs in a single request, pass an array of strings or array of token arrays.
	// The input must not exceed the max input tokens for the model (8192 tokens for
	// `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
	// dimensions or less.
	// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
	// for counting tokens.
	Input param.Field[EmbeddingNewParamsInputUnion] `json:"input,required"`
	// ID of the model to use. You can use the
	// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
	// see all of your available models, or see our
	// [Model overview](https://platform.openai.com/docs/models/overview) for
	// descriptions of them.
	Model param.Field[EmbeddingNewParamsModel] `json:"model,required"`
	// The number of dimensions the resulting output embeddings should have. Only
	// supported in `text-embedding-3` and later models.
	Dimensions param.Field[int64] `json:"dimensions"`
	// The format to return the embeddings in. Can be either `float` or
	// [`base64`](https://pypi.org/project/pybase64/).
	EncodingFormat param.Field[EmbeddingNewParamsEncodingFormat] `json:"encoding_format"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor
	// and detect abuse.
	// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
	User param.Field[string] `json:"user"`
}

func (EmbeddingNewParams) MarshalJSON

func (r EmbeddingNewParams) MarshalJSON() (data []byte, err error)

type EmbeddingNewParamsEncodingFormat

type EmbeddingNewParamsEncodingFormat string

The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).

const (
	EmbeddingNewParamsEncodingFormatFloat  EmbeddingNewParamsEncodingFormat = "float"
	EmbeddingNewParamsEncodingFormatBase64 EmbeddingNewParamsEncodingFormat = "base64"
)

func (EmbeddingNewParamsEncodingFormat) IsKnown

type EmbeddingNewParamsInputArrayOfStrings

type EmbeddingNewParamsInputArrayOfStrings []string

func (EmbeddingNewParamsInputArrayOfStrings) ImplementsEmbeddingNewParamsInputUnion

func (r EmbeddingNewParamsInputArrayOfStrings) ImplementsEmbeddingNewParamsInputUnion()

type EmbeddingNewParamsInputArrayOfTokenArrays

type EmbeddingNewParamsInputArrayOfTokenArrays [][]int64

func (EmbeddingNewParamsInputArrayOfTokenArrays) ImplementsEmbeddingNewParamsInputUnion

func (r EmbeddingNewParamsInputArrayOfTokenArrays) ImplementsEmbeddingNewParamsInputUnion()

type EmbeddingNewParamsInputArrayOfTokens

type EmbeddingNewParamsInputArrayOfTokens []int64

func (EmbeddingNewParamsInputArrayOfTokens) ImplementsEmbeddingNewParamsInputUnion

func (r EmbeddingNewParamsInputArrayOfTokens) ImplementsEmbeddingNewParamsInputUnion()

type EmbeddingNewParamsInputUnion

type EmbeddingNewParamsInputUnion interface {
	ImplementsEmbeddingNewParamsInputUnion()
}

Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.

Satisfied by shared.UnionString, EmbeddingNewParamsInputArrayOfStrings, EmbeddingNewParamsInputArrayOfTokens, EmbeddingNewParamsInputArrayOfTokenArrays.

type EmbeddingNewParamsModel

type EmbeddingNewParamsModel string
const (
	EmbeddingNewParamsModelTextEmbeddingAda002 EmbeddingNewParamsModel = "text-embedding-ada-002"
	EmbeddingNewParamsModelTextEmbedding3Small EmbeddingNewParamsModel = "text-embedding-3-small"
	EmbeddingNewParamsModelTextEmbedding3Large EmbeddingNewParamsModel = "text-embedding-3-large"
)

func (EmbeddingNewParamsModel) IsKnown

func (r EmbeddingNewParamsModel) IsKnown() bool

type EmbeddingObject

type EmbeddingObject string

The object type, which is always "embedding".

const (
	EmbeddingObjectEmbedding EmbeddingObject = "embedding"
)

func (EmbeddingObject) IsKnown

func (r EmbeddingObject) IsKnown() bool

type EmbeddingService

type EmbeddingService struct {
	Options []option.RequestOption
}

EmbeddingService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewEmbeddingService method instead.

func NewEmbeddingService

func NewEmbeddingService(opts ...option.RequestOption) (r *EmbeddingService)

NewEmbeddingService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*EmbeddingService) New

Creates an embedding vector representing the input text.

type Error

type Error = apierror.Error

type ErrorObject

type ErrorObject = shared.ErrorObject

This is an alias to an internal type.

type FileChunkingStrategy

type FileChunkingStrategy struct {
	// Always `static`.
	Type   FileChunkingStrategyType   `json:"type,required"`
	Static StaticFileChunkingStrategy `json:"static"`
	JSON   fileChunkingStrategyJSON   `json:"-"`
	// contains filtered or unexported fields
}

The strategy used to chunk the file.

func (FileChunkingStrategy) AsUnion

AsUnion returns a FileChunkingStrategyUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject.

func (*FileChunkingStrategy) UnmarshalJSON

func (r *FileChunkingStrategy) UnmarshalJSON(data []byte) (err error)

type FileChunkingStrategyParam

type FileChunkingStrategyParam struct {
	// Always `auto`.
	Type   param.Field[FileChunkingStrategyParamType]   `json:"type,required"`
	Static param.Field[StaticFileChunkingStrategyParam] `json:"static"`
}

The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.

func (FileChunkingStrategyParam) MarshalJSON

func (r FileChunkingStrategyParam) MarshalJSON() (data []byte, err error)

type FileChunkingStrategyParamType

type FileChunkingStrategyParamType string

Always `auto`.

const (
	FileChunkingStrategyParamTypeAuto   FileChunkingStrategyParamType = "auto"
	FileChunkingStrategyParamTypeStatic FileChunkingStrategyParamType = "static"
)

func (FileChunkingStrategyParamType) IsKnown

func (r FileChunkingStrategyParamType) IsKnown() bool

type FileChunkingStrategyParamUnion

type FileChunkingStrategyParamUnion interface {
	// contains filtered or unexported methods
}

The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.

Satisfied by AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam, FileChunkingStrategyParam.

type FileChunkingStrategyType

type FileChunkingStrategyType string

Always `static`.

const (
	FileChunkingStrategyTypeStatic FileChunkingStrategyType = "static"
	FileChunkingStrategyTypeOther  FileChunkingStrategyType = "other"
)

func (FileChunkingStrategyType) IsKnown

func (r FileChunkingStrategyType) IsKnown() bool

type FileChunkingStrategyUnion

type FileChunkingStrategyUnion interface {
	// contains filtered or unexported methods
}

The strategy used to chunk the file.

Union satisfied by StaticFileChunkingStrategyObject or OtherFileChunkingStrategyObject.

type FileCitationAnnotation

type FileCitationAnnotation struct {
	EndIndex     int64                              `json:"end_index,required"`
	FileCitation FileCitationAnnotationFileCitation `json:"file_citation,required"`
	StartIndex   int64                              `json:"start_index,required"`
	// The text in the message content that needs to be replaced.
	Text string `json:"text,required"`
	// Always `file_citation`.
	Type FileCitationAnnotationType `json:"type,required"`
	JSON fileCitationAnnotationJSON `json:"-"`
}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

func (*FileCitationAnnotation) UnmarshalJSON

func (r *FileCitationAnnotation) UnmarshalJSON(data []byte) (err error)

type FileCitationAnnotationFileCitation

type FileCitationAnnotationFileCitation struct {
	// The ID of the specific File the citation is from.
	FileID string                                 `json:"file_id,required"`
	JSON   fileCitationAnnotationFileCitationJSON `json:"-"`
}

func (*FileCitationAnnotationFileCitation) UnmarshalJSON

func (r *FileCitationAnnotationFileCitation) UnmarshalJSON(data []byte) (err error)

type FileCitationAnnotationType

type FileCitationAnnotationType string

Always `file_citation`.

const (
	FileCitationAnnotationTypeFileCitation FileCitationAnnotationType = "file_citation"
)

func (FileCitationAnnotationType) IsKnown

func (r FileCitationAnnotationType) IsKnown() bool

type FileCitationDeltaAnnotation

type FileCitationDeltaAnnotation struct {
	// The index of the annotation in the text content part.
	Index int64 `json:"index,required"`
	// Always `file_citation`.
	Type         FileCitationDeltaAnnotationType         `json:"type,required"`
	EndIndex     int64                                   `json:"end_index"`
	FileCitation FileCitationDeltaAnnotationFileCitation `json:"file_citation"`
	StartIndex   int64                                   `json:"start_index"`
	// The text in the message content that needs to be replaced.
	Text string                          `json:"text"`
	JSON fileCitationDeltaAnnotationJSON `json:"-"`
}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

func (*FileCitationDeltaAnnotation) UnmarshalJSON

func (r *FileCitationDeltaAnnotation) UnmarshalJSON(data []byte) (err error)

type FileCitationDeltaAnnotationFileCitation

type FileCitationDeltaAnnotationFileCitation struct {
	// The ID of the specific File the citation is from.
	FileID string `json:"file_id"`
	// The specific quote in the file.
	Quote string                                      `json:"quote"`
	JSON  fileCitationDeltaAnnotationFileCitationJSON `json:"-"`
}

func (*FileCitationDeltaAnnotationFileCitation) UnmarshalJSON

func (r *FileCitationDeltaAnnotationFileCitation) UnmarshalJSON(data []byte) (err error)

type FileCitationDeltaAnnotationType

type FileCitationDeltaAnnotationType string

Always `file_citation`.

const (
	FileCitationDeltaAnnotationTypeFileCitation FileCitationDeltaAnnotationType = "file_citation"
)

func (FileCitationDeltaAnnotationType) IsKnown

type FileDeleted

type FileDeleted struct {
	ID      string            `json:"id,required"`
	Deleted bool              `json:"deleted,required"`
	Object  FileDeletedObject `json:"object,required"`
	JSON    fileDeletedJSON   `json:"-"`
}

func (*FileDeleted) UnmarshalJSON

func (r *FileDeleted) UnmarshalJSON(data []byte) (err error)

type FileDeletedObject

type FileDeletedObject string
const (
	FileDeletedObjectFile FileDeletedObject = "file"
)

func (FileDeletedObject) IsKnown

func (r FileDeletedObject) IsKnown() bool

type FileListParams

type FileListParams struct {
	// Only return files with the given purpose.
	Purpose param.Field[string] `query:"purpose"`
}

func (FileListParams) URLQuery

func (r FileListParams) URLQuery() (v url.Values)

URLQuery serializes FileListParams's query parameters as `url.Values`.

type FileNewParams

type FileNewParams struct {
	// The File object (not file name) to be uploaded.
	File param.Field[io.Reader] `json:"file,required" format:"binary"`
	// The intended purpose of the uploaded file.
	//
	// Use "assistants" for
	// [Assistants](https://platform.openai.com/docs/api-reference/assistants) and
	// [Message](https://platform.openai.com/docs/api-reference/messages) files,
	// "vision" for Assistants image file inputs, "batch" for
	// [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for
	// [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning).
	Purpose param.Field[FilePurpose] `json:"purpose,required"`
}

func (FileNewParams) MarshalMultipart

func (r FileNewParams) MarshalMultipart() (data []byte, contentType string, err error)

type FileObject

type FileObject struct {
	// The file identifier, which can be referenced in the API endpoints.
	ID string `json:"id,required"`
	// The size of the file, in bytes.
	Bytes int64 `json:"bytes,required"`
	// The Unix timestamp (in seconds) for when the file was created.
	CreatedAt int64 `json:"created_at,required"`
	// The name of the file.
	Filename string `json:"filename,required"`
	// The object type, which is always `file`.
	Object FileObjectObject `json:"object,required"`
	// The intended purpose of the file. Supported values are `assistants`,
	// `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`
	// and `vision`.
	Purpose FileObjectPurpose `json:"purpose,required"`
	// Deprecated. The current status of the file, which can be either `uploaded`,
	// `processed`, or `error`.
	Status FileObjectStatus `json:"status,required"`
	// Deprecated. For details on why a fine-tuning training file failed validation,
	// see the `error` field on `fine_tuning.job`.
	StatusDetails string         `json:"status_details"`
	JSON          fileObjectJSON `json:"-"`
}

The `File` object represents a document that has been uploaded to OpenAI.

func (*FileObject) UnmarshalJSON

func (r *FileObject) UnmarshalJSON(data []byte) (err error)

type FileObjectObject

type FileObjectObject string

The object type, which is always `file`.

const (
	FileObjectObjectFile FileObjectObject = "file"
)

func (FileObjectObject) IsKnown

func (r FileObjectObject) IsKnown() bool

type FileObjectPurpose

type FileObjectPurpose string

The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`.

const (
	FileObjectPurposeAssistants       FileObjectPurpose = "assistants"
	FileObjectPurposeAssistantsOutput FileObjectPurpose = "assistants_output"
	FileObjectPurposeBatch            FileObjectPurpose = "batch"
	FileObjectPurposeBatchOutput      FileObjectPurpose = "batch_output"
	FileObjectPurposeFineTune         FileObjectPurpose = "fine-tune"
	FileObjectPurposeFineTuneResults  FileObjectPurpose = "fine-tune-results"
	FileObjectPurposeVision           FileObjectPurpose = "vision"
)

func (FileObjectPurpose) IsKnown

func (r FileObjectPurpose) IsKnown() bool

type FileObjectStatus

type FileObjectStatus string

Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`.

const (
	FileObjectStatusUploaded  FileObjectStatus = "uploaded"
	FileObjectStatusProcessed FileObjectStatus = "processed"
	FileObjectStatusError     FileObjectStatus = "error"
)

func (FileObjectStatus) IsKnown

func (r FileObjectStatus) IsKnown() bool

type FilePathAnnotation

type FilePathAnnotation struct {
	EndIndex   int64                      `json:"end_index,required"`
	FilePath   FilePathAnnotationFilePath `json:"file_path,required"`
	StartIndex int64                      `json:"start_index,required"`
	// The text in the message content that needs to be replaced.
	Text string `json:"text,required"`
	// Always `file_path`.
	Type FilePathAnnotationType `json:"type,required"`
	JSON filePathAnnotationJSON `json:"-"`
}

A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file.

func (*FilePathAnnotation) UnmarshalJSON

func (r *FilePathAnnotation) UnmarshalJSON(data []byte) (err error)

type FilePathAnnotationFilePath

type FilePathAnnotationFilePath struct {
	// The ID of the file that was generated.
	FileID string                         `json:"file_id,required"`
	JSON   filePathAnnotationFilePathJSON `json:"-"`
}

func (*FilePathAnnotationFilePath) UnmarshalJSON

func (r *FilePathAnnotationFilePath) UnmarshalJSON(data []byte) (err error)

type FilePathAnnotationType

type FilePathAnnotationType string

Always `file_path`.

const (
	FilePathAnnotationTypeFilePath FilePathAnnotationType = "file_path"
)

func (FilePathAnnotationType) IsKnown

func (r FilePathAnnotationType) IsKnown() bool

type FilePathDeltaAnnotation

type FilePathDeltaAnnotation struct {
	// The index of the annotation in the text content part.
	Index int64 `json:"index,required"`
	// Always `file_path`.
	Type       FilePathDeltaAnnotationType     `json:"type,required"`
	EndIndex   int64                           `json:"end_index"`
	FilePath   FilePathDeltaAnnotationFilePath `json:"file_path"`
	StartIndex int64                           `json:"start_index"`
	// The text in the message content that needs to be replaced.
	Text string                      `json:"text"`
	JSON filePathDeltaAnnotationJSON `json:"-"`
}

A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file.

func (*FilePathDeltaAnnotation) UnmarshalJSON

func (r *FilePathDeltaAnnotation) UnmarshalJSON(data []byte) (err error)

type FilePathDeltaAnnotationFilePath

type FilePathDeltaAnnotationFilePath struct {
	// The ID of the file that was generated.
	FileID string                              `json:"file_id"`
	JSON   filePathDeltaAnnotationFilePathJSON `json:"-"`
}

func (*FilePathDeltaAnnotationFilePath) UnmarshalJSON

func (r *FilePathDeltaAnnotationFilePath) UnmarshalJSON(data []byte) (err error)

type FilePathDeltaAnnotationType

type FilePathDeltaAnnotationType string

Always `file_path`.

const (
	FilePathDeltaAnnotationTypeFilePath FilePathDeltaAnnotationType = "file_path"
)

func (FilePathDeltaAnnotationType) IsKnown

func (r FilePathDeltaAnnotationType) IsKnown() bool

type FilePurpose

type FilePurpose string

The intended purpose of the uploaded file.

Use "assistants" for [Assistants](https://platform.openai.com/docs/api-reference/assistants) and Message(https://platform.openai.com/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning).

const (
	FilePurposeAssistants FilePurpose = "assistants"
	FilePurposeBatch      FilePurpose = "batch"
	FilePurposeFineTune   FilePurpose = "fine-tune"
	FilePurposeVision     FilePurpose = "vision"
)

func (FilePurpose) IsKnown

func (r FilePurpose) IsKnown() bool

type FileSearchTool

type FileSearchTool struct {
	// The type of tool being defined: `file_search`
	Type FileSearchToolType `json:"type,required"`
	// Overrides for the file search tool.
	FileSearch FileSearchToolFileSearch `json:"file_search"`
	JSON       fileSearchToolJSON       `json:"-"`
}

func (*FileSearchTool) UnmarshalJSON

func (r *FileSearchTool) UnmarshalJSON(data []byte) (err error)

type FileSearchToolCall

type FileSearchToolCall struct {
	// The ID of the tool call object.
	ID string `json:"id,required"`
	// For now, this is always going to be an empty object.
	FileSearch FileSearchToolCallFileSearch `json:"file_search,required"`
	// The type of tool call. This is always going to be `file_search` for this type of
	// tool call.
	Type FileSearchToolCallType `json:"type,required"`
	JSON fileSearchToolCallJSON `json:"-"`
}

func (*FileSearchToolCall) UnmarshalJSON

func (r *FileSearchToolCall) UnmarshalJSON(data []byte) (err error)

type FileSearchToolCallDelta

type FileSearchToolCallDelta struct {
	// For now, this is always going to be an empty object.
	FileSearch interface{} `json:"file_search,required"`
	// The index of the tool call in the tool calls array.
	Index int64 `json:"index,required"`
	// The type of tool call. This is always going to be `file_search` for this type of
	// tool call.
	Type FileSearchToolCallDeltaType `json:"type,required"`
	// The ID of the tool call object.
	ID   string                      `json:"id"`
	JSON fileSearchToolCallDeltaJSON `json:"-"`
}

func (*FileSearchToolCallDelta) UnmarshalJSON

func (r *FileSearchToolCallDelta) UnmarshalJSON(data []byte) (err error)

type FileSearchToolCallDeltaType

type FileSearchToolCallDeltaType string

The type of tool call. This is always going to be `file_search` for this type of tool call.

const (
	FileSearchToolCallDeltaTypeFileSearch FileSearchToolCallDeltaType = "file_search"
)

func (FileSearchToolCallDeltaType) IsKnown

func (r FileSearchToolCallDeltaType) IsKnown() bool

type FileSearchToolCallFileSearch

type FileSearchToolCallFileSearch struct {
	// The ranking options for the file search.
	RankingOptions FileSearchToolCallFileSearchRankingOptions `json:"ranking_options"`
	// The results of the file search.
	Results []FileSearchToolCallFileSearchResult `json:"results"`
	JSON    fileSearchToolCallFileSearchJSON     `json:"-"`
}

For now, this is always going to be an empty object.

func (*FileSearchToolCallFileSearch) UnmarshalJSON

func (r *FileSearchToolCallFileSearch) UnmarshalJSON(data []byte) (err error)

type FileSearchToolCallFileSearchRankingOptions

type FileSearchToolCallFileSearchRankingOptions struct {
	// The ranker used for the file search.
	Ranker FileSearchToolCallFileSearchRankingOptionsRanker `json:"ranker,required"`
	// The score threshold for the file search. All values must be a floating point
	// number between 0 and 1.
	ScoreThreshold float64                                        `json:"score_threshold,required"`
	JSON           fileSearchToolCallFileSearchRankingOptionsJSON `json:"-"`
}

The ranking options for the file search.

func (*FileSearchToolCallFileSearchRankingOptions) UnmarshalJSON

func (r *FileSearchToolCallFileSearchRankingOptions) UnmarshalJSON(data []byte) (err error)

type FileSearchToolCallFileSearchRankingOptionsRanker

type FileSearchToolCallFileSearchRankingOptionsRanker string

The ranker used for the file search.

const (
	FileSearchToolCallFileSearchRankingOptionsRankerDefault2024_08_21 FileSearchToolCallFileSearchRankingOptionsRanker = "default_2024_08_21"
)

func (FileSearchToolCallFileSearchRankingOptionsRanker) IsKnown

type FileSearchToolCallFileSearchResult

type FileSearchToolCallFileSearchResult struct {
	// The ID of the file that result was found in.
	FileID string `json:"file_id,required"`
	// The name of the file that result was found in.
	FileName string `json:"file_name,required"`
	// The score of the result. All values must be a floating point number between 0
	// and 1.
	Score float64 `json:"score,required"`
	// The content of the result that was found. The content is only included if
	// requested via the include query parameter.
	Content []FileSearchToolCallFileSearchResultsContent `json:"content"`
	JSON    fileSearchToolCallFileSearchResultJSON       `json:"-"`
}

A result instance of the file search.

func (*FileSearchToolCallFileSearchResult) UnmarshalJSON

func (r *FileSearchToolCallFileSearchResult) UnmarshalJSON(data []byte) (err error)

type FileSearchToolCallFileSearchResultsContent

type FileSearchToolCallFileSearchResultsContent struct {
	// The text content of the file.
	Text string `json:"text"`
	// The type of the content.
	Type FileSearchToolCallFileSearchResultsContentType `json:"type"`
	JSON fileSearchToolCallFileSearchResultsContentJSON `json:"-"`
}

func (*FileSearchToolCallFileSearchResultsContent) UnmarshalJSON

func (r *FileSearchToolCallFileSearchResultsContent) UnmarshalJSON(data []byte) (err error)

type FileSearchToolCallFileSearchResultsContentType

type FileSearchToolCallFileSearchResultsContentType string

The type of the content.

const (
	FileSearchToolCallFileSearchResultsContentTypeText FileSearchToolCallFileSearchResultsContentType = "text"
)

func (FileSearchToolCallFileSearchResultsContentType) IsKnown

type FileSearchToolCallType

type FileSearchToolCallType string

The type of tool call. This is always going to be `file_search` for this type of tool call.

const (
	FileSearchToolCallTypeFileSearch FileSearchToolCallType = "file_search"
)

func (FileSearchToolCallType) IsKnown

func (r FileSearchToolCallType) IsKnown() bool

type FileSearchToolFileSearch

type FileSearchToolFileSearch struct {
	// The maximum number of results the file search tool should output. The default is
	// 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between
	// 1 and 50 inclusive.
	//
	// Note that the file search tool may output fewer than `max_num_results` results.
	// See the
	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
	// for more information.
	MaxNumResults int64 `json:"max_num_results"`
	// The ranking options for the file search. If not specified, the file search tool
	// will use the `auto` ranker and a score_threshold of 0.
	//
	// See the
	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
	// for more information.
	RankingOptions FileSearchToolFileSearchRankingOptions `json:"ranking_options"`
	JSON           fileSearchToolFileSearchJSON           `json:"-"`
}

Overrides for the file search tool.

func (*FileSearchToolFileSearch) UnmarshalJSON

func (r *FileSearchToolFileSearch) UnmarshalJSON(data []byte) (err error)

type FileSearchToolFileSearchParam

type FileSearchToolFileSearchParam struct {
	// The maximum number of results the file search tool should output. The default is
	// 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between
	// 1 and 50 inclusive.
	//
	// Note that the file search tool may output fewer than `max_num_results` results.
	// See the
	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
	// for more information.
	MaxNumResults param.Field[int64] `json:"max_num_results"`
	// The ranking options for the file search. If not specified, the file search tool
	// will use the `auto` ranker and a score_threshold of 0.
	//
	// See the
	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
	// for more information.
	RankingOptions param.Field[FileSearchToolFileSearchRankingOptionsParam] `json:"ranking_options"`
}

Overrides for the file search tool.

func (FileSearchToolFileSearchParam) MarshalJSON

func (r FileSearchToolFileSearchParam) MarshalJSON() (data []byte, err error)

type FileSearchToolFileSearchRankingOptions

type FileSearchToolFileSearchRankingOptions struct {
	// The score threshold for the file search. All values must be a floating point
	// number between 0 and 1.
	ScoreThreshold float64 `json:"score_threshold,required"`
	// The ranker to use for the file search. If not specified will use the `auto`
	// ranker.
	Ranker FileSearchToolFileSearchRankingOptionsRanker `json:"ranker"`
	JSON   fileSearchToolFileSearchRankingOptionsJSON   `json:"-"`
}

The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.

See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.

func (*FileSearchToolFileSearchRankingOptions) UnmarshalJSON

func (r *FileSearchToolFileSearchRankingOptions) UnmarshalJSON(data []byte) (err error)

type FileSearchToolFileSearchRankingOptionsParam

type FileSearchToolFileSearchRankingOptionsParam struct {
	// The score threshold for the file search. All values must be a floating point
	// number between 0 and 1.
	ScoreThreshold param.Field[float64] `json:"score_threshold,required"`
	// The ranker to use for the file search. If not specified will use the `auto`
	// ranker.
	Ranker param.Field[FileSearchToolFileSearchRankingOptionsRanker] `json:"ranker"`
}

The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.

See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.

func (FileSearchToolFileSearchRankingOptionsParam) MarshalJSON

func (r FileSearchToolFileSearchRankingOptionsParam) MarshalJSON() (data []byte, err error)

type FileSearchToolFileSearchRankingOptionsRanker

type FileSearchToolFileSearchRankingOptionsRanker string

The ranker to use for the file search. If not specified will use the `auto` ranker.

const (
	FileSearchToolFileSearchRankingOptionsRankerAuto              FileSearchToolFileSearchRankingOptionsRanker = "auto"
	FileSearchToolFileSearchRankingOptionsRankerDefault2024_08_21 FileSearchToolFileSearchRankingOptionsRanker = "default_2024_08_21"
)

func (FileSearchToolFileSearchRankingOptionsRanker) IsKnown

type FileSearchToolParam

type FileSearchToolParam struct {
	// The type of tool being defined: `file_search`
	Type param.Field[FileSearchToolType] `json:"type,required"`
	// Overrides for the file search tool.
	FileSearch param.Field[FileSearchToolFileSearchParam] `json:"file_search"`
}

func (FileSearchToolParam) MarshalJSON

func (r FileSearchToolParam) MarshalJSON() (data []byte, err error)

type FileSearchToolType

type FileSearchToolType string

The type of tool being defined: `file_search`

const (
	FileSearchToolTypeFileSearch FileSearchToolType = "file_search"
)

func (FileSearchToolType) IsKnown

func (r FileSearchToolType) IsKnown() bool

type FileService

type FileService struct {
	Options []option.RequestOption
}

FileService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewFileService method instead.

func NewFileService

func NewFileService(opts ...option.RequestOption) (r *FileService)

NewFileService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*FileService) Content

func (r *FileService) Content(ctx context.Context, fileID string, opts ...option.RequestOption) (res *http.Response, err error)

Returns the contents of the specified file.

func (*FileService) Delete

func (r *FileService) Delete(ctx context.Context, fileID string, opts ...option.RequestOption) (res *FileDeleted, err error)

Delete a file.

func (*FileService) Get

func (r *FileService) Get(ctx context.Context, fileID string, opts ...option.RequestOption) (res *FileObject, err error)

Returns information about a specific file.

func (*FileService) List

func (r *FileService) List(ctx context.Context, query FileListParams, opts ...option.RequestOption) (res *pagination.Page[FileObject], err error)

Returns a list of files that belong to the user's organization.

func (*FileService) ListAutoPaging

Returns a list of files that belong to the user's organization.

func (*FileService) New

func (r *FileService) New(ctx context.Context, body FileNewParams, opts ...option.RequestOption) (res *FileObject, err error)

Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.

The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for details.

The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) models.

The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input).

Please [contact us](https://help.openai.com/) if you need to increase these storage limits.

type FineTuningJob

type FineTuningJob struct {
	// The object identifier, which can be referenced in the API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the fine-tuning job was created.
	CreatedAt int64 `json:"created_at,required"`
	// For fine-tuning jobs that have `failed`, this will contain more information on
	// the cause of the failure.
	Error FineTuningJobError `json:"error,required,nullable"`
	// The name of the fine-tuned model that is being created. The value will be null
	// if the fine-tuning job is still running.
	FineTunedModel string `json:"fine_tuned_model,required,nullable"`
	// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The
	// value will be null if the fine-tuning job is still running.
	FinishedAt int64 `json:"finished_at,required,nullable"`
	// The hyperparameters used for the fine-tuning job. See the
	// [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for
	// more details.
	Hyperparameters FineTuningJobHyperparameters `json:"hyperparameters,required"`
	// The base model that is being fine-tuned.
	Model string `json:"model,required"`
	// The object type, which is always "fine_tuning.job".
	Object FineTuningJobObject `json:"object,required"`
	// The organization that owns the fine-tuning job.
	OrganizationID string `json:"organization_id,required"`
	// The compiled results file ID(s) for the fine-tuning job. You can retrieve the
	// results with the
	// [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
	ResultFiles []string `json:"result_files,required"`
	// The seed used for the fine-tuning job.
	Seed int64 `json:"seed,required"`
	// The current status of the fine-tuning job, which can be either
	// `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
	Status FineTuningJobStatus `json:"status,required"`
	// The total number of billable tokens processed by this fine-tuning job. The value
	// will be null if the fine-tuning job is still running.
	TrainedTokens int64 `json:"trained_tokens,required,nullable"`
	// The file ID used for training. You can retrieve the training data with the
	// [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
	TrainingFile string `json:"training_file,required"`
	// The file ID used for validation. You can retrieve the validation results with
	// the
	// [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
	ValidationFile string `json:"validation_file,required,nullable"`
	// The Unix timestamp (in seconds) for when the fine-tuning job is estimated to
	// finish. The value will be null if the fine-tuning job is not running.
	EstimatedFinish int64 `json:"estimated_finish,nullable"`
	// A list of integrations to enable for this fine-tuning job.
	Integrations []FineTuningJobWandbIntegrationObject `json:"integrations,nullable"`
	JSON         fineTuningJobJSON                     `json:"-"`
}

The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.

func (*FineTuningJob) UnmarshalJSON

func (r *FineTuningJob) UnmarshalJSON(data []byte) (err error)

type FineTuningJobCheckpoint

type FineTuningJobCheckpoint struct {
	// The checkpoint identifier, which can be referenced in the API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the checkpoint was created.
	CreatedAt int64 `json:"created_at,required"`
	// The name of the fine-tuned checkpoint model that is created.
	FineTunedModelCheckpoint string `json:"fine_tuned_model_checkpoint,required"`
	// The name of the fine-tuning job that this checkpoint was created from.
	FineTuningJobID string `json:"fine_tuning_job_id,required"`
	// Metrics at the step number during the fine-tuning job.
	Metrics FineTuningJobCheckpointMetrics `json:"metrics,required"`
	// The object type, which is always "fine_tuning.job.checkpoint".
	Object FineTuningJobCheckpointObject `json:"object,required"`
	// The step number that the checkpoint was created at.
	StepNumber int64                       `json:"step_number,required"`
	JSON       fineTuningJobCheckpointJSON `json:"-"`
}

The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use.

func (*FineTuningJobCheckpoint) UnmarshalJSON

func (r *FineTuningJobCheckpoint) UnmarshalJSON(data []byte) (err error)

type FineTuningJobCheckpointListParams

type FineTuningJobCheckpointListParams struct {
	// Identifier for the last checkpoint ID from the previous pagination request.
	After param.Field[string] `query:"after"`
	// Number of checkpoints to retrieve.
	Limit param.Field[int64] `query:"limit"`
}

func (FineTuningJobCheckpointListParams) URLQuery

func (r FineTuningJobCheckpointListParams) URLQuery() (v url.Values)

URLQuery serializes FineTuningJobCheckpointListParams's query parameters as `url.Values`.

type FineTuningJobCheckpointMetrics

type FineTuningJobCheckpointMetrics struct {
	FullValidLoss              float64                            `json:"full_valid_loss"`
	FullValidMeanTokenAccuracy float64                            `json:"full_valid_mean_token_accuracy"`
	Step                       float64                            `json:"step"`
	TrainLoss                  float64                            `json:"train_loss"`
	TrainMeanTokenAccuracy     float64                            `json:"train_mean_token_accuracy"`
	ValidLoss                  float64                            `json:"valid_loss"`
	ValidMeanTokenAccuracy     float64                            `json:"valid_mean_token_accuracy"`
	JSON                       fineTuningJobCheckpointMetricsJSON `json:"-"`
}

Metrics at the step number during the fine-tuning job.

func (*FineTuningJobCheckpointMetrics) UnmarshalJSON

func (r *FineTuningJobCheckpointMetrics) UnmarshalJSON(data []byte) (err error)

type FineTuningJobCheckpointObject

type FineTuningJobCheckpointObject string

The object type, which is always "fine_tuning.job.checkpoint".

const (
	FineTuningJobCheckpointObjectFineTuningJobCheckpoint FineTuningJobCheckpointObject = "fine_tuning.job.checkpoint"
)

func (FineTuningJobCheckpointObject) IsKnown

func (r FineTuningJobCheckpointObject) IsKnown() bool

type FineTuningJobCheckpointService

type FineTuningJobCheckpointService struct {
	Options []option.RequestOption
}

FineTuningJobCheckpointService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewFineTuningJobCheckpointService method instead.

func NewFineTuningJobCheckpointService

func NewFineTuningJobCheckpointService(opts ...option.RequestOption) (r *FineTuningJobCheckpointService)

NewFineTuningJobCheckpointService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*FineTuningJobCheckpointService) List

List checkpoints for a fine-tuning job.

func (*FineTuningJobCheckpointService) ListAutoPaging

List checkpoints for a fine-tuning job.

type FineTuningJobError

type FineTuningJobError struct {
	// A machine-readable error code.
	Code string `json:"code,required"`
	// A human-readable error message.
	Message string `json:"message,required"`
	// The parameter that was invalid, usually `training_file` or `validation_file`.
	// This field will be null if the failure was not parameter-specific.
	Param string                 `json:"param,required,nullable"`
	JSON  fineTuningJobErrorJSON `json:"-"`
}

For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure.

func (*FineTuningJobError) UnmarshalJSON

func (r *FineTuningJobError) UnmarshalJSON(data []byte) (err error)

type FineTuningJobEvent

type FineTuningJobEvent struct {
	ID        string                   `json:"id,required"`
	CreatedAt int64                    `json:"created_at,required"`
	Level     FineTuningJobEventLevel  `json:"level,required"`
	Message   string                   `json:"message,required"`
	Object    FineTuningJobEventObject `json:"object,required"`
	JSON      fineTuningJobEventJSON   `json:"-"`
}

Fine-tuning job event object

func (*FineTuningJobEvent) UnmarshalJSON

func (r *FineTuningJobEvent) UnmarshalJSON(data []byte) (err error)

type FineTuningJobEventLevel

type FineTuningJobEventLevel string
const (
	FineTuningJobEventLevelInfo  FineTuningJobEventLevel = "info"
	FineTuningJobEventLevelWarn  FineTuningJobEventLevel = "warn"
	FineTuningJobEventLevelError FineTuningJobEventLevel = "error"
)

func (FineTuningJobEventLevel) IsKnown

func (r FineTuningJobEventLevel) IsKnown() bool

type FineTuningJobEventObject

type FineTuningJobEventObject string
const (
	FineTuningJobEventObjectFineTuningJobEvent FineTuningJobEventObject = "fine_tuning.job.event"
)

func (FineTuningJobEventObject) IsKnown

func (r FineTuningJobEventObject) IsKnown() bool

type FineTuningJobHyperparameters

type FineTuningJobHyperparameters struct {
	// The number of epochs to train the model for. An epoch refers to one full cycle
	// through the training dataset. "auto" decides the optimal number of epochs based
	// on the size of the dataset. If setting the number manually, we support any
	// number between 1 and 50 epochs.
	NEpochs FineTuningJobHyperparametersNEpochsUnion `json:"n_epochs,required"`
	JSON    fineTuningJobHyperparametersJSON         `json:"-"`
}

The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details.

func (*FineTuningJobHyperparameters) UnmarshalJSON

func (r *FineTuningJobHyperparameters) UnmarshalJSON(data []byte) (err error)

type FineTuningJobHyperparametersNEpochsString

type FineTuningJobHyperparametersNEpochsString string
const (
	FineTuningJobHyperparametersNEpochsStringAuto FineTuningJobHyperparametersNEpochsString = "auto"
)

func (FineTuningJobHyperparametersNEpochsString) ImplementsFineTuningJobHyperparametersNEpochsUnion

func (r FineTuningJobHyperparametersNEpochsString) ImplementsFineTuningJobHyperparametersNEpochsUnion()

func (FineTuningJobHyperparametersNEpochsString) IsKnown

type FineTuningJobHyperparametersNEpochsUnion

type FineTuningJobHyperparametersNEpochsUnion interface {
	ImplementsFineTuningJobHyperparametersNEpochsUnion()
}

The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs.

Union satisfied by FineTuningJobHyperparametersNEpochsString or shared.UnionInt.

type FineTuningJobListEventsParams

type FineTuningJobListEventsParams struct {
	// Identifier for the last event from the previous pagination request.
	After param.Field[string] `query:"after"`
	// Number of events to retrieve.
	Limit param.Field[int64] `query:"limit"`
}

func (FineTuningJobListEventsParams) URLQuery

func (r FineTuningJobListEventsParams) URLQuery() (v url.Values)

URLQuery serializes FineTuningJobListEventsParams's query parameters as `url.Values`.

type FineTuningJobListParams

type FineTuningJobListParams struct {
	// Identifier for the last job from the previous pagination request.
	After param.Field[string] `query:"after"`
	// Number of fine-tuning jobs to retrieve.
	Limit param.Field[int64] `query:"limit"`
}

func (FineTuningJobListParams) URLQuery

func (r FineTuningJobListParams) URLQuery() (v url.Values)

URLQuery serializes FineTuningJobListParams's query parameters as `url.Values`.

type FineTuningJobNewParams

type FineTuningJobNewParams struct {
	// The name of the model to fine-tune. You can select one of the
	// [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
	Model param.Field[FineTuningJobNewParamsModel] `json:"model,required"`
	// The ID of an uploaded file that contains training data.
	//
	// See [upload file](https://platform.openai.com/docs/api-reference/files/create)
	// for how to upload a file.
	//
	// Your dataset must be formatted as a JSONL file. Additionally, you must upload
	// your file with the purpose `fine-tune`.
	//
	// The contents of the file should differ depending on if the model uses the
	// [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
	// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
	// format.
	//
	// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
	// for more details.
	TrainingFile param.Field[string] `json:"training_file,required"`
	// The hyperparameters used for the fine-tuning job.
	Hyperparameters param.Field[FineTuningJobNewParamsHyperparameters] `json:"hyperparameters"`
	// A list of integrations to enable for your fine-tuning job.
	Integrations param.Field[[]FineTuningJobNewParamsIntegration] `json:"integrations"`
	// The seed controls the reproducibility of the job. Passing in the same seed and
	// job parameters should produce the same results, but may differ in rare cases. If
	// a seed is not specified, one will be generated for you.
	Seed param.Field[int64] `json:"seed"`
	// A string of up to 64 characters that will be added to your fine-tuned model
	// name.
	//
	// For example, a `suffix` of "custom-model-name" would produce a model name like
	// `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
	Suffix param.Field[string] `json:"suffix"`
	// The ID of an uploaded file that contains validation data.
	//
	// If you provide this file, the data is used to generate validation metrics
	// periodically during fine-tuning. These metrics can be viewed in the fine-tuning
	// results file. The same data should not be present in both train and validation
	// files.
	//
	// Your dataset must be formatted as a JSONL file. You must upload your file with
	// the purpose `fine-tune`.
	//
	// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
	// for more details.
	ValidationFile param.Field[string] `json:"validation_file"`
}

func (FineTuningJobNewParams) MarshalJSON

func (r FineTuningJobNewParams) MarshalJSON() (data []byte, err error)

type FineTuningJobNewParamsHyperparameters

type FineTuningJobNewParamsHyperparameters struct {
	// Number of examples in each batch. A larger batch size means that model
	// parameters are updated less frequently, but with lower variance.
	BatchSize param.Field[FineTuningJobNewParamsHyperparametersBatchSizeUnion] `json:"batch_size"`
	// Scaling factor for the learning rate. A smaller learning rate may be useful to
	// avoid overfitting.
	LearningRateMultiplier param.Field[FineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion] `json:"learning_rate_multiplier"`
	// The number of epochs to train the model for. An epoch refers to one full cycle
	// through the training dataset.
	NEpochs param.Field[FineTuningJobNewParamsHyperparametersNEpochsUnion] `json:"n_epochs"`
}

The hyperparameters used for the fine-tuning job.

func (FineTuningJobNewParamsHyperparameters) MarshalJSON

func (r FineTuningJobNewParamsHyperparameters) MarshalJSON() (data []byte, err error)

type FineTuningJobNewParamsHyperparametersBatchSizeString

type FineTuningJobNewParamsHyperparametersBatchSizeString string
const (
	FineTuningJobNewParamsHyperparametersBatchSizeStringAuto FineTuningJobNewParamsHyperparametersBatchSizeString = "auto"
)

func (FineTuningJobNewParamsHyperparametersBatchSizeString) ImplementsFineTuningJobNewParamsHyperparametersBatchSizeUnion

func (r FineTuningJobNewParamsHyperparametersBatchSizeString) ImplementsFineTuningJobNewParamsHyperparametersBatchSizeUnion()

func (FineTuningJobNewParamsHyperparametersBatchSizeString) IsKnown

type FineTuningJobNewParamsHyperparametersBatchSizeUnion

type FineTuningJobNewParamsHyperparametersBatchSizeUnion interface {
	ImplementsFineTuningJobNewParamsHyperparametersBatchSizeUnion()
}

Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.

Satisfied by FineTuningJobNewParamsHyperparametersBatchSizeString, shared.UnionInt.

type FineTuningJobNewParamsHyperparametersLearningRateMultiplierString

type FineTuningJobNewParamsHyperparametersLearningRateMultiplierString string
const (
	FineTuningJobNewParamsHyperparametersLearningRateMultiplierStringAuto FineTuningJobNewParamsHyperparametersLearningRateMultiplierString = "auto"
)

func (FineTuningJobNewParamsHyperparametersLearningRateMultiplierString) ImplementsFineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion

func (r FineTuningJobNewParamsHyperparametersLearningRateMultiplierString) ImplementsFineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion()

func (FineTuningJobNewParamsHyperparametersLearningRateMultiplierString) IsKnown

type FineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion

type FineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion interface {
	ImplementsFineTuningJobNewParamsHyperparametersLearningRateMultiplierUnion()
}

Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.

Satisfied by FineTuningJobNewParamsHyperparametersLearningRateMultiplierString, shared.UnionFloat.

type FineTuningJobNewParamsHyperparametersNEpochsString

type FineTuningJobNewParamsHyperparametersNEpochsString string
const (
	FineTuningJobNewParamsHyperparametersNEpochsStringAuto FineTuningJobNewParamsHyperparametersNEpochsString = "auto"
)

func (FineTuningJobNewParamsHyperparametersNEpochsString) ImplementsFineTuningJobNewParamsHyperparametersNEpochsUnion

func (r FineTuningJobNewParamsHyperparametersNEpochsString) ImplementsFineTuningJobNewParamsHyperparametersNEpochsUnion()

func (FineTuningJobNewParamsHyperparametersNEpochsString) IsKnown

type FineTuningJobNewParamsHyperparametersNEpochsUnion

type FineTuningJobNewParamsHyperparametersNEpochsUnion interface {
	ImplementsFineTuningJobNewParamsHyperparametersNEpochsUnion()
}

The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.

Satisfied by FineTuningJobNewParamsHyperparametersNEpochsString, shared.UnionInt.

type FineTuningJobNewParamsIntegration

type FineTuningJobNewParamsIntegration struct {
	// The type of integration to enable. Currently, only "wandb" (Weights and Biases)
	// is supported.
	Type param.Field[FineTuningJobNewParamsIntegrationsType] `json:"type,required"`
	// The settings for your integration with Weights and Biases. This payload
	// specifies the project that metrics will be sent to. Optionally, you can set an
	// explicit display name for your run, add tags to your run, and set a default
	// entity (team, username, etc) to be associated with your run.
	Wandb param.Field[FineTuningJobNewParamsIntegrationsWandb] `json:"wandb,required"`
}

func (FineTuningJobNewParamsIntegration) MarshalJSON

func (r FineTuningJobNewParamsIntegration) MarshalJSON() (data []byte, err error)

type FineTuningJobNewParamsIntegrationsType

type FineTuningJobNewParamsIntegrationsType string

The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported.

const (
	FineTuningJobNewParamsIntegrationsTypeWandb FineTuningJobNewParamsIntegrationsType = "wandb"
)

func (FineTuningJobNewParamsIntegrationsType) IsKnown

type FineTuningJobNewParamsIntegrationsWandb

type FineTuningJobNewParamsIntegrationsWandb struct {
	// The name of the project that the new run will be created under.
	Project param.Field[string] `json:"project,required"`
	// The entity to use for the run. This allows you to set the team or username of
	// the WandB user that you would like associated with the run. If not set, the
	// default entity for the registered WandB API key is used.
	Entity param.Field[string] `json:"entity"`
	// A display name to set for the run. If not set, we will use the Job ID as the
	// name.
	Name param.Field[string] `json:"name"`
	// A list of tags to be attached to the newly created run. These tags are passed
	// through directly to WandB. Some default tags are generated by OpenAI:
	// "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
	Tags param.Field[[]string] `json:"tags"`
}

The settings for your integration with Weights and Biases. This payload specifies the project that metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags to your run, and set a default entity (team, username, etc) to be associated with your run.

func (FineTuningJobNewParamsIntegrationsWandb) MarshalJSON

func (r FineTuningJobNewParamsIntegrationsWandb) MarshalJSON() (data []byte, err error)

type FineTuningJobNewParamsModel

type FineTuningJobNewParamsModel string
const (
	FineTuningJobNewParamsModelBabbage002  FineTuningJobNewParamsModel = "babbage-002"
	FineTuningJobNewParamsModelDavinci002  FineTuningJobNewParamsModel = "davinci-002"
	FineTuningJobNewParamsModelGPT3_5Turbo FineTuningJobNewParamsModel = "gpt-3.5-turbo"
	FineTuningJobNewParamsModelGPT4oMini   FineTuningJobNewParamsModel = "gpt-4o-mini"
)

func (FineTuningJobNewParamsModel) IsKnown

func (r FineTuningJobNewParamsModel) IsKnown() bool

type FineTuningJobObject

type FineTuningJobObject string

The object type, which is always "fine_tuning.job".

const (
	FineTuningJobObjectFineTuningJob FineTuningJobObject = "fine_tuning.job"
)

func (FineTuningJobObject) IsKnown

func (r FineTuningJobObject) IsKnown() bool

type FineTuningJobService

type FineTuningJobService struct {
	Options     []option.RequestOption
	Checkpoints *FineTuningJobCheckpointService
}

FineTuningJobService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewFineTuningJobService method instead.

func NewFineTuningJobService

func NewFineTuningJobService(opts ...option.RequestOption) (r *FineTuningJobService)

NewFineTuningJobService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*FineTuningJobService) Cancel

func (r *FineTuningJobService) Cancel(ctx context.Context, fineTuningJobID string, opts ...option.RequestOption) (res *FineTuningJob, err error)

Immediately cancel a fine-tune job.

func (*FineTuningJobService) Get

func (r *FineTuningJobService) Get(ctx context.Context, fineTuningJobID string, opts ...option.RequestOption) (res *FineTuningJob, err error)

Get info about a fine-tuning job.

[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)

func (*FineTuningJobService) List

List your organization's fine-tuning jobs

func (*FineTuningJobService) ListAutoPaging

List your organization's fine-tuning jobs

func (*FineTuningJobService) ListEvents

Get status updates for a fine-tuning job.

func (*FineTuningJobService) ListEventsAutoPaging

Get status updates for a fine-tuning job.

func (*FineTuningJobService) New

Creates a fine-tuning job which begins the process of creating a new model from a given dataset.

Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete.

[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)

type FineTuningJobStatus

type FineTuningJobStatus string

The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.

const (
	FineTuningJobStatusValidatingFiles FineTuningJobStatus = "validating_files"
	FineTuningJobStatusQueued          FineTuningJobStatus = "queued"
	FineTuningJobStatusRunning         FineTuningJobStatus = "running"
	FineTuningJobStatusSucceeded       FineTuningJobStatus = "succeeded"
	FineTuningJobStatusFailed          FineTuningJobStatus = "failed"
	FineTuningJobStatusCancelled       FineTuningJobStatus = "cancelled"
)

func (FineTuningJobStatus) IsKnown

func (r FineTuningJobStatus) IsKnown() bool

type FineTuningJobWandbIntegration

type FineTuningJobWandbIntegration struct {
	// The name of the project that the new run will be created under.
	Project string `json:"project,required"`
	// The entity to use for the run. This allows you to set the team or username of
	// the WandB user that you would like associated with the run. If not set, the
	// default entity for the registered WandB API key is used.
	Entity string `json:"entity,nullable"`
	// A display name to set for the run. If not set, we will use the Job ID as the
	// name.
	Name string `json:"name,nullable"`
	// A list of tags to be attached to the newly created run. These tags are passed
	// through directly to WandB. Some default tags are generated by OpenAI:
	// "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
	Tags []string                          `json:"tags"`
	JSON fineTuningJobWandbIntegrationJSON `json:"-"`
}

The settings for your integration with Weights and Biases. This payload specifies the project that metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags to your run, and set a default entity (team, username, etc) to be associated with your run.

func (*FineTuningJobWandbIntegration) UnmarshalJSON

func (r *FineTuningJobWandbIntegration) UnmarshalJSON(data []byte) (err error)

type FineTuningJobWandbIntegrationObject

type FineTuningJobWandbIntegrationObject struct {
	// The type of the integration being enabled for the fine-tuning job
	Type FineTuningJobWandbIntegrationObjectType `json:"type,required"`
	// The settings for your integration with Weights and Biases. This payload
	// specifies the project that metrics will be sent to. Optionally, you can set an
	// explicit display name for your run, add tags to your run, and set a default
	// entity (team, username, etc) to be associated with your run.
	Wandb FineTuningJobWandbIntegration           `json:"wandb,required"`
	JSON  fineTuningJobWandbIntegrationObjectJSON `json:"-"`
}

func (*FineTuningJobWandbIntegrationObject) UnmarshalJSON

func (r *FineTuningJobWandbIntegrationObject) UnmarshalJSON(data []byte) (err error)

type FineTuningJobWandbIntegrationObjectType

type FineTuningJobWandbIntegrationObjectType string

The type of the integration being enabled for the fine-tuning job

const (
	FineTuningJobWandbIntegrationObjectTypeWandb FineTuningJobWandbIntegrationObjectType = "wandb"
)

func (FineTuningJobWandbIntegrationObjectType) IsKnown

type FineTuningService

type FineTuningService struct {
	Options []option.RequestOption
	Jobs    *FineTuningJobService
}

FineTuningService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewFineTuningService method instead.

func NewFineTuningService

func NewFineTuningService(opts ...option.RequestOption) (r *FineTuningService)

NewFineTuningService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

type FinishedChatCompletionToolCall

type FinishedChatCompletionToolCall struct {
	ChatCompletionMessageToolCallFunction
	Index int
}

type FunctionDefinition

type FunctionDefinition = shared.FunctionDefinition

This is an alias to an internal type.

type FunctionDefinitionParam

type FunctionDefinitionParam = shared.FunctionDefinitionParam

This is an alias to an internal type.

type FunctionParameters

type FunctionParameters = shared.FunctionParameters

The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.

Omitting `parameters` defines a function with an empty parameter list.

This is an alias to an internal type.

type FunctionTool

type FunctionTool struct {
	Function shared.FunctionDefinition `json:"function,required"`
	// The type of tool being defined: `function`
	Type FunctionToolType `json:"type,required"`
	JSON functionToolJSON `json:"-"`
}

func (*FunctionTool) UnmarshalJSON

func (r *FunctionTool) UnmarshalJSON(data []byte) (err error)

type FunctionToolCall

type FunctionToolCall struct {
	// The ID of the tool call object.
	ID string `json:"id,required"`
	// The definition of the function that was called.
	Function FunctionToolCallFunction `json:"function,required"`
	// The type of tool call. This is always going to be `function` for this type of
	// tool call.
	Type FunctionToolCallType `json:"type,required"`
	JSON functionToolCallJSON `json:"-"`
}

func (*FunctionToolCall) UnmarshalJSON

func (r *FunctionToolCall) UnmarshalJSON(data []byte) (err error)

type FunctionToolCallDelta

type FunctionToolCallDelta struct {
	// The index of the tool call in the tool calls array.
	Index int64 `json:"index,required"`
	// The type of tool call. This is always going to be `function` for this type of
	// tool call.
	Type FunctionToolCallDeltaType `json:"type,required"`
	// The ID of the tool call object.
	ID string `json:"id"`
	// The definition of the function that was called.
	Function FunctionToolCallDeltaFunction `json:"function"`
	JSON     functionToolCallDeltaJSON     `json:"-"`
}

func (*FunctionToolCallDelta) UnmarshalJSON

func (r *FunctionToolCallDelta) UnmarshalJSON(data []byte) (err error)

type FunctionToolCallDeltaFunction

type FunctionToolCallDeltaFunction struct {
	// The arguments passed to the function.
	Arguments string `json:"arguments"`
	// The name of the function.
	Name string `json:"name"`
	// The output of the function. This will be `null` if the outputs have not been
	// [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
	// yet.
	Output string                            `json:"output,nullable"`
	JSON   functionToolCallDeltaFunctionJSON `json:"-"`
}

The definition of the function that was called.

func (*FunctionToolCallDeltaFunction) UnmarshalJSON

func (r *FunctionToolCallDeltaFunction) UnmarshalJSON(data []byte) (err error)

type FunctionToolCallDeltaType

type FunctionToolCallDeltaType string

The type of tool call. This is always going to be `function` for this type of tool call.

const (
	FunctionToolCallDeltaTypeFunction FunctionToolCallDeltaType = "function"
)

func (FunctionToolCallDeltaType) IsKnown

func (r FunctionToolCallDeltaType) IsKnown() bool

type FunctionToolCallFunction

type FunctionToolCallFunction struct {
	// The arguments passed to the function.
	Arguments string `json:"arguments,required"`
	// The name of the function.
	Name string `json:"name,required"`
	// The output of the function. This will be `null` if the outputs have not been
	// [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
	// yet.
	Output string                       `json:"output,required,nullable"`
	JSON   functionToolCallFunctionJSON `json:"-"`
}

The definition of the function that was called.

func (*FunctionToolCallFunction) UnmarshalJSON

func (r *FunctionToolCallFunction) UnmarshalJSON(data []byte) (err error)

type FunctionToolCallType

type FunctionToolCallType string

The type of tool call. This is always going to be `function` for this type of tool call.

const (
	FunctionToolCallTypeFunction FunctionToolCallType = "function"
)

func (FunctionToolCallType) IsKnown

func (r FunctionToolCallType) IsKnown() bool

type FunctionToolParam

type FunctionToolParam struct {
	Function param.Field[shared.FunctionDefinitionParam] `json:"function,required"`
	// The type of tool being defined: `function`
	Type param.Field[FunctionToolType] `json:"type,required"`
}

func (FunctionToolParam) MarshalJSON

func (r FunctionToolParam) MarshalJSON() (data []byte, err error)

type FunctionToolType

type FunctionToolType string

The type of tool being defined: `function`

const (
	FunctionToolTypeFunction FunctionToolType = "function"
)

func (FunctionToolType) IsKnown

func (r FunctionToolType) IsKnown() bool

type Image

type Image struct {
	// The base64-encoded JSON of the generated image, if `response_format` is
	// `b64_json`.
	B64JSON string `json:"b64_json"`
	// The prompt that was used to generate the image, if there was any revision to the
	// prompt.
	RevisedPrompt string `json:"revised_prompt"`
	// The URL of the generated image, if `response_format` is `url` (default).
	URL  string    `json:"url"`
	JSON imageJSON `json:"-"`
}

Represents the url or the content of an image generated by the OpenAI API.

func (*Image) UnmarshalJSON

func (r *Image) UnmarshalJSON(data []byte) (err error)

type ImageEditParams

type ImageEditParams struct {
	// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
	// is not provided, image must have transparency, which will be used as the mask.
	Image param.Field[io.Reader] `json:"image,required" format:"binary"`
	// A text description of the desired image(s). The maximum length is 1000
	// characters.
	Prompt param.Field[string] `json:"prompt,required"`
	// An additional image whose fully transparent areas (e.g. where alpha is zero)
	// indicate where `image` should be edited. Must be a valid PNG file, less than
	// 4MB, and have the same dimensions as `image`.
	Mask param.Field[io.Reader] `json:"mask" format:"binary"`
	// The model to use for image generation. Only `dall-e-2` is supported at this
	// time.
	Model param.Field[ImageModel] `json:"model"`
	// The number of images to generate. Must be between 1 and 10.
	N param.Field[int64] `json:"n"`
	// The format in which the generated images are returned. Must be one of `url` or
	// `b64_json`. URLs are only valid for 60 minutes after the image has been
	// generated.
	ResponseFormat param.Field[ImageEditParamsResponseFormat] `json:"response_format"`
	// The size of the generated images. Must be one of `256x256`, `512x512`, or
	// `1024x1024`.
	Size param.Field[ImageEditParamsSize] `json:"size"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor
	// and detect abuse.
	// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
	User param.Field[string] `json:"user"`
}

func (ImageEditParams) MarshalMultipart

func (r ImageEditParams) MarshalMultipart() (data []byte, contentType string, err error)

type ImageEditParamsResponseFormat

type ImageEditParamsResponseFormat string

The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated.

const (
	ImageEditParamsResponseFormatURL     ImageEditParamsResponseFormat = "url"
	ImageEditParamsResponseFormatB64JSON ImageEditParamsResponseFormat = "b64_json"
)

func (ImageEditParamsResponseFormat) IsKnown

func (r ImageEditParamsResponseFormat) IsKnown() bool

type ImageEditParamsSize

type ImageEditParamsSize string

The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.

const (
	ImageEditParamsSize256x256   ImageEditParamsSize = "256x256"
	ImageEditParamsSize512x512   ImageEditParamsSize = "512x512"
	ImageEditParamsSize1024x1024 ImageEditParamsSize = "1024x1024"
)

func (ImageEditParamsSize) IsKnown

func (r ImageEditParamsSize) IsKnown() bool

type ImageFile

type ImageFile struct {
	// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
	// in the message content. Set `purpose="vision"` when uploading the File if you
	// need to later display the file content.
	FileID string `json:"file_id,required"`
	// Specifies the detail level of the image if specified by the user. `low` uses
	// fewer tokens, you can opt in to high resolution using `high`.
	Detail ImageFileDetail `json:"detail"`
	JSON   imageFileJSON   `json:"-"`
}

func (*ImageFile) UnmarshalJSON

func (r *ImageFile) UnmarshalJSON(data []byte) (err error)

type ImageFileContentBlock

type ImageFileContentBlock struct {
	ImageFile ImageFile `json:"image_file,required"`
	// Always `image_file`.
	Type ImageFileContentBlockType `json:"type,required"`
	JSON imageFileContentBlockJSON `json:"-"`
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

func (*ImageFileContentBlock) UnmarshalJSON

func (r *ImageFileContentBlock) UnmarshalJSON(data []byte) (err error)

type ImageFileContentBlockParam

type ImageFileContentBlockParam struct {
	ImageFile param.Field[ImageFileParam] `json:"image_file,required"`
	// Always `image_file`.
	Type param.Field[ImageFileContentBlockType] `json:"type,required"`
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

func (ImageFileContentBlockParam) MarshalJSON

func (r ImageFileContentBlockParam) MarshalJSON() (data []byte, err error)

type ImageFileContentBlockType

type ImageFileContentBlockType string

Always `image_file`.

const (
	ImageFileContentBlockTypeImageFile ImageFileContentBlockType = "image_file"
)

func (ImageFileContentBlockType) IsKnown

func (r ImageFileContentBlockType) IsKnown() bool

type ImageFileDelta

type ImageFileDelta struct {
	// Specifies the detail level of the image if specified by the user. `low` uses
	// fewer tokens, you can opt in to high resolution using `high`.
	Detail ImageFileDeltaDetail `json:"detail"`
	// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
	// in the message content. Set `purpose="vision"` when uploading the File if you
	// need to later display the file content.
	FileID string             `json:"file_id"`
	JSON   imageFileDeltaJSON `json:"-"`
}

func (*ImageFileDelta) UnmarshalJSON

func (r *ImageFileDelta) UnmarshalJSON(data []byte) (err error)

type ImageFileDeltaBlock

type ImageFileDeltaBlock struct {
	// The index of the content part in the message.
	Index int64 `json:"index,required"`
	// Always `image_file`.
	Type      ImageFileDeltaBlockType `json:"type,required"`
	ImageFile ImageFileDelta          `json:"image_file"`
	JSON      imageFileDeltaBlockJSON `json:"-"`
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

func (*ImageFileDeltaBlock) UnmarshalJSON

func (r *ImageFileDeltaBlock) UnmarshalJSON(data []byte) (err error)

type ImageFileDeltaBlockType

type ImageFileDeltaBlockType string

Always `image_file`.

const (
	ImageFileDeltaBlockTypeImageFile ImageFileDeltaBlockType = "image_file"
)

func (ImageFileDeltaBlockType) IsKnown

func (r ImageFileDeltaBlockType) IsKnown() bool

type ImageFileDeltaDetail

type ImageFileDeltaDetail string

Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.

const (
	ImageFileDeltaDetailAuto ImageFileDeltaDetail = "auto"
	ImageFileDeltaDetailLow  ImageFileDeltaDetail = "low"
	ImageFileDeltaDetailHigh ImageFileDeltaDetail = "high"
)

func (ImageFileDeltaDetail) IsKnown

func (r ImageFileDeltaDetail) IsKnown() bool

type ImageFileDetail

type ImageFileDetail string

Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.

const (
	ImageFileDetailAuto ImageFileDetail = "auto"
	ImageFileDetailLow  ImageFileDetail = "low"
	ImageFileDetailHigh ImageFileDetail = "high"
)

func (ImageFileDetail) IsKnown

func (r ImageFileDetail) IsKnown() bool

type ImageFileParam

type ImageFileParam struct {
	// The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
	// in the message content. Set `purpose="vision"` when uploading the File if you
	// need to later display the file content.
	FileID param.Field[string] `json:"file_id,required"`
	// Specifies the detail level of the image if specified by the user. `low` uses
	// fewer tokens, you can opt in to high resolution using `high`.
	Detail param.Field[ImageFileDetail] `json:"detail"`
}

func (ImageFileParam) MarshalJSON

func (r ImageFileParam) MarshalJSON() (data []byte, err error)

type ImageGenerateParams

type ImageGenerateParams struct {
	// A text description of the desired image(s). The maximum length is 1000
	// characters for `dall-e-2` and 4000 characters for `dall-e-3`.
	Prompt param.Field[string] `json:"prompt,required"`
	// The model to use for image generation.
	Model param.Field[ImageModel] `json:"model"`
	// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
	// `n=1` is supported.
	N param.Field[int64] `json:"n"`
	// The quality of the image that will be generated. `hd` creates images with finer
	// details and greater consistency across the image. This param is only supported
	// for `dall-e-3`.
	Quality param.Field[ImageGenerateParamsQuality] `json:"quality"`
	// The format in which the generated images are returned. Must be one of `url` or
	// `b64_json`. URLs are only valid for 60 minutes after the image has been
	// generated.
	ResponseFormat param.Field[ImageGenerateParamsResponseFormat] `json:"response_format"`
	// The size of the generated images. Must be one of `256x256`, `512x512`, or
	// `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
	// `1024x1792` for `dall-e-3` models.
	Size param.Field[ImageGenerateParamsSize] `json:"size"`
	// The style of the generated images. Must be one of `vivid` or `natural`. Vivid
	// causes the model to lean towards generating hyper-real and dramatic images.
	// Natural causes the model to produce more natural, less hyper-real looking
	// images. This param is only supported for `dall-e-3`.
	Style param.Field[ImageGenerateParamsStyle] `json:"style"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor
	// and detect abuse.
	// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
	User param.Field[string] `json:"user"`
}

func (ImageGenerateParams) MarshalJSON

func (r ImageGenerateParams) MarshalJSON() (data []byte, err error)

type ImageGenerateParamsQuality

type ImageGenerateParamsQuality string

The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`.

const (
	ImageGenerateParamsQualityStandard ImageGenerateParamsQuality = "standard"
	ImageGenerateParamsQualityHD       ImageGenerateParamsQuality = "hd"
)

func (ImageGenerateParamsQuality) IsKnown

func (r ImageGenerateParamsQuality) IsKnown() bool

type ImageGenerateParamsResponseFormat

type ImageGenerateParamsResponseFormat string

The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated.

const (
	ImageGenerateParamsResponseFormatURL     ImageGenerateParamsResponseFormat = "url"
	ImageGenerateParamsResponseFormatB64JSON ImageGenerateParamsResponseFormat = "b64_json"
)

func (ImageGenerateParamsResponseFormat) IsKnown

type ImageGenerateParamsSize

type ImageGenerateParamsSize string

The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.

const (
	ImageGenerateParamsSize256x256   ImageGenerateParamsSize = "256x256"
	ImageGenerateParamsSize512x512   ImageGenerateParamsSize = "512x512"
	ImageGenerateParamsSize1024x1024 ImageGenerateParamsSize = "1024x1024"
	ImageGenerateParamsSize1792x1024 ImageGenerateParamsSize = "1792x1024"
	ImageGenerateParamsSize1024x1792 ImageGenerateParamsSize = "1024x1792"
)

func (ImageGenerateParamsSize) IsKnown

func (r ImageGenerateParamsSize) IsKnown() bool

type ImageGenerateParamsStyle

type ImageGenerateParamsStyle string

The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`.

const (
	ImageGenerateParamsStyleVivid   ImageGenerateParamsStyle = "vivid"
	ImageGenerateParamsStyleNatural ImageGenerateParamsStyle = "natural"
)

func (ImageGenerateParamsStyle) IsKnown

func (r ImageGenerateParamsStyle) IsKnown() bool

type ImageModel

type ImageModel = string
const (
	ImageModelDallE2 ImageModel = "dall-e-2"
	ImageModelDallE3 ImageModel = "dall-e-3"
)

type ImageNewVariationParams

type ImageNewVariationParams struct {
	// The image to use as the basis for the variation(s). Must be a valid PNG file,
	// less than 4MB, and square.
	Image param.Field[io.Reader] `json:"image,required" format:"binary"`
	// The model to use for image generation. Only `dall-e-2` is supported at this
	// time.
	Model param.Field[ImageModel] `json:"model"`
	// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
	// `n=1` is supported.
	N param.Field[int64] `json:"n"`
	// The format in which the generated images are returned. Must be one of `url` or
	// `b64_json`. URLs are only valid for 60 minutes after the image has been
	// generated.
	ResponseFormat param.Field[ImageNewVariationParamsResponseFormat] `json:"response_format"`
	// The size of the generated images. Must be one of `256x256`, `512x512`, or
	// `1024x1024`.
	Size param.Field[ImageNewVariationParamsSize] `json:"size"`
	// A unique identifier representing your end-user, which can help OpenAI to monitor
	// and detect abuse.
	// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
	User param.Field[string] `json:"user"`
}

func (ImageNewVariationParams) MarshalMultipart

func (r ImageNewVariationParams) MarshalMultipart() (data []byte, contentType string, err error)

type ImageNewVariationParamsResponseFormat

type ImageNewVariationParamsResponseFormat string

The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated.

const (
	ImageNewVariationParamsResponseFormatURL     ImageNewVariationParamsResponseFormat = "url"
	ImageNewVariationParamsResponseFormatB64JSON ImageNewVariationParamsResponseFormat = "b64_json"
)

func (ImageNewVariationParamsResponseFormat) IsKnown

type ImageNewVariationParamsSize

type ImageNewVariationParamsSize string

The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.

const (
	ImageNewVariationParamsSize256x256   ImageNewVariationParamsSize = "256x256"
	ImageNewVariationParamsSize512x512   ImageNewVariationParamsSize = "512x512"
	ImageNewVariationParamsSize1024x1024 ImageNewVariationParamsSize = "1024x1024"
)

func (ImageNewVariationParamsSize) IsKnown

func (r ImageNewVariationParamsSize) IsKnown() bool

type ImageService

type ImageService struct {
	Options []option.RequestOption
}

ImageService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewImageService method instead.

func NewImageService

func NewImageService(opts ...option.RequestOption) (r *ImageService)

NewImageService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*ImageService) Edit

func (r *ImageService) Edit(ctx context.Context, body ImageEditParams, opts ...option.RequestOption) (res *ImagesResponse, err error)

Creates an edited or extended image given an original image and a prompt.

func (*ImageService) Generate

func (r *ImageService) Generate(ctx context.Context, body ImageGenerateParams, opts ...option.RequestOption) (res *ImagesResponse, err error)

Creates an image given a prompt.

func (*ImageService) NewVariation

func (r *ImageService) NewVariation(ctx context.Context, body ImageNewVariationParams, opts ...option.RequestOption) (res *ImagesResponse, err error)

Creates a variation of a given image.

type ImageURL

type ImageURL struct {
	// The external URL of the image, must be a supported image types: jpeg, jpg, png,
	// gif, webp.
	URL string `json:"url,required" format:"uri"`
	// Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
	// to high resolution using `high`. Default value is `auto`
	Detail ImageURLDetail `json:"detail"`
	JSON   imageURLJSON   `json:"-"`
}

func (*ImageURL) UnmarshalJSON

func (r *ImageURL) UnmarshalJSON(data []byte) (err error)

type ImageURLContentBlock

type ImageURLContentBlock struct {
	ImageURL ImageURL `json:"image_url,required"`
	// The type of the content part.
	Type ImageURLContentBlockType `json:"type,required"`
	JSON imageURLContentBlockJSON `json:"-"`
}

References an image URL in the content of a message.

func (*ImageURLContentBlock) UnmarshalJSON

func (r *ImageURLContentBlock) UnmarshalJSON(data []byte) (err error)

type ImageURLContentBlockParam

type ImageURLContentBlockParam struct {
	ImageURL param.Field[ImageURLParam] `json:"image_url,required"`
	// The type of the content part.
	Type param.Field[ImageURLContentBlockType] `json:"type,required"`
}

References an image URL in the content of a message.

func (ImageURLContentBlockParam) MarshalJSON

func (r ImageURLContentBlockParam) MarshalJSON() (data []byte, err error)

type ImageURLContentBlockType

type ImageURLContentBlockType string

The type of the content part.

const (
	ImageURLContentBlockTypeImageURL ImageURLContentBlockType = "image_url"
)

func (ImageURLContentBlockType) IsKnown

func (r ImageURLContentBlockType) IsKnown() bool

type ImageURLDelta

type ImageURLDelta struct {
	// Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
	// to high resolution using `high`.
	Detail ImageURLDeltaDetail `json:"detail"`
	// The URL of the image, must be a supported image types: jpeg, jpg, png, gif,
	// webp.
	URL  string            `json:"url"`
	JSON imageURLDeltaJSON `json:"-"`
}

func (*ImageURLDelta) UnmarshalJSON

func (r *ImageURLDelta) UnmarshalJSON(data []byte) (err error)

type ImageURLDeltaBlock

type ImageURLDeltaBlock struct {
	// The index of the content part in the message.
	Index int64 `json:"index,required"`
	// Always `image_url`.
	Type     ImageURLDeltaBlockType `json:"type,required"`
	ImageURL ImageURLDelta          `json:"image_url"`
	JSON     imageURLDeltaBlockJSON `json:"-"`
}

References an image URL in the content of a message.

func (*ImageURLDeltaBlock) UnmarshalJSON

func (r *ImageURLDeltaBlock) UnmarshalJSON(data []byte) (err error)

type ImageURLDeltaBlockType

type ImageURLDeltaBlockType string

Always `image_url`.

const (
	ImageURLDeltaBlockTypeImageURL ImageURLDeltaBlockType = "image_url"
)

func (ImageURLDeltaBlockType) IsKnown

func (r ImageURLDeltaBlockType) IsKnown() bool

type ImageURLDeltaDetail

type ImageURLDeltaDetail string

Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`.

const (
	ImageURLDeltaDetailAuto ImageURLDeltaDetail = "auto"
	ImageURLDeltaDetailLow  ImageURLDeltaDetail = "low"
	ImageURLDeltaDetailHigh ImageURLDeltaDetail = "high"
)

func (ImageURLDeltaDetail) IsKnown

func (r ImageURLDeltaDetail) IsKnown() bool

type ImageURLDetail

type ImageURLDetail string

Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto`

const (
	ImageURLDetailAuto ImageURLDetail = "auto"
	ImageURLDetailLow  ImageURLDetail = "low"
	ImageURLDetailHigh ImageURLDetail = "high"
)

func (ImageURLDetail) IsKnown

func (r ImageURLDetail) IsKnown() bool

type ImageURLParam

type ImageURLParam struct {
	// The external URL of the image, must be a supported image types: jpeg, jpg, png,
	// gif, webp.
	URL param.Field[string] `json:"url,required" format:"uri"`
	// Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
	// to high resolution using `high`. Default value is `auto`
	Detail param.Field[ImageURLDetail] `json:"detail"`
}

func (ImageURLParam) MarshalJSON

func (r ImageURLParam) MarshalJSON() (data []byte, err error)

type ImagesResponse

type ImagesResponse struct {
	Created int64              `json:"created,required"`
	Data    []Image            `json:"data,required"`
	JSON    imagesResponseJSON `json:"-"`
}

func (*ImagesResponse) UnmarshalJSON

func (r *ImagesResponse) UnmarshalJSON(data []byte) (err error)

type Message

type Message struct {
	// The identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// If applicable, the ID of the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants) that
	// authored this message.
	AssistantID string `json:"assistant_id,required,nullable"`
	// A list of files attached to the message, and the tools they were added to.
	Attachments []MessageAttachment `json:"attachments,required,nullable"`
	// The Unix timestamp (in seconds) for when the message was completed.
	CompletedAt int64 `json:"completed_at,required,nullable"`
	// The content of the message in array of text and/or images.
	Content []MessageContent `json:"content,required"`
	// The Unix timestamp (in seconds) for when the message was created.
	CreatedAt int64 `json:"created_at,required"`
	// The Unix timestamp (in seconds) for when the message was marked as incomplete.
	IncompleteAt int64 `json:"incomplete_at,required,nullable"`
	// On an incomplete message, details about why the message is incomplete.
	IncompleteDetails MessageIncompleteDetails `json:"incomplete_details,required,nullable"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata interface{} `json:"metadata,required,nullable"`
	// The object type, which is always `thread.message`.
	Object MessageObject `json:"object,required"`
	// The entity that produced the message. One of `user` or `assistant`.
	Role MessageRole `json:"role,required"`
	// The ID of the [run](https://platform.openai.com/docs/api-reference/runs)
	// associated with the creation of this message. Value is `null` when messages are
	// created manually using the create message or create thread endpoints.
	RunID string `json:"run_id,required,nullable"`
	// The status of the message, which can be either `in_progress`, `incomplete`, or
	// `completed`.
	Status MessageStatus `json:"status,required"`
	// The [thread](https://platform.openai.com/docs/api-reference/threads) ID that
	// this message belongs to.
	ThreadID string      `json:"thread_id,required"`
	JSON     messageJSON `json:"-"`
}

Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads).

func (*Message) UnmarshalJSON

func (r *Message) UnmarshalJSON(data []byte) (err error)

type MessageAttachment

type MessageAttachment struct {
	// The ID of the file to attach to the message.
	FileID string `json:"file_id"`
	// The tools to add this file to.
	Tools []MessageAttachmentsTool `json:"tools"`
	JSON  messageAttachmentJSON    `json:"-"`
}

func (*MessageAttachment) UnmarshalJSON

func (r *MessageAttachment) UnmarshalJSON(data []byte) (err error)

type MessageAttachmentsTool

type MessageAttachmentsTool struct {
	// The type of tool being defined: `code_interpreter`
	Type MessageAttachmentsToolsType `json:"type,required"`
	JSON messageAttachmentsToolJSON  `json:"-"`
	// contains filtered or unexported fields
}

func (MessageAttachmentsTool) AsUnion

AsUnion returns a MessageAttachmentsToolsUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are CodeInterpreterTool, MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly.

func (*MessageAttachmentsTool) UnmarshalJSON

func (r *MessageAttachmentsTool) UnmarshalJSON(data []byte) (err error)

type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly

type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly struct {
	// The type of tool being defined: `file_search`
	Type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType `json:"type,required"`
	JSON messageAttachmentsToolsAssistantToolsFileSearchTypeOnlyJSON `json:"-"`
}

func (*MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly) UnmarshalJSON

type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType

type MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType string

The type of tool being defined: `file_search`

const (
	MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyTypeFileSearch MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType = "file_search"
)

func (MessageAttachmentsToolsAssistantToolsFileSearchTypeOnlyType) IsKnown

type MessageAttachmentsToolsType

type MessageAttachmentsToolsType string

The type of tool being defined: `code_interpreter`

const (
	MessageAttachmentsToolsTypeCodeInterpreter MessageAttachmentsToolsType = "code_interpreter"
	MessageAttachmentsToolsTypeFileSearch      MessageAttachmentsToolsType = "file_search"
)

func (MessageAttachmentsToolsType) IsKnown

func (r MessageAttachmentsToolsType) IsKnown() bool

type MessageAttachmentsToolsUnion

type MessageAttachmentsToolsUnion interface {
	// contains filtered or unexported methods
}

Union satisfied by CodeInterpreterTool or MessageAttachmentsToolsAssistantToolsFileSearchTypeOnly.

type MessageContent

type MessageContent struct {
	// Always `image_file`.
	Type      MessageContentType `json:"type,required"`
	ImageFile ImageFile          `json:"image_file"`
	ImageURL  ImageURL           `json:"image_url"`
	Text      Text               `json:"text"`
	Refusal   string             `json:"refusal"`
	JSON      messageContentJSON `json:"-"`
	// contains filtered or unexported fields
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

func (MessageContent) AsUnion

func (r MessageContent) AsUnion() MessageContentUnion

AsUnion returns a MessageContentUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock.

func (*MessageContent) UnmarshalJSON

func (r *MessageContent) UnmarshalJSON(data []byte) (err error)

type MessageContentDelta

type MessageContentDelta struct {
	// The index of the content part in the message.
	Index int64 `json:"index,required"`
	// Always `image_file`.
	Type      MessageContentDeltaType `json:"type,required"`
	ImageFile ImageFileDelta          `json:"image_file"`
	Text      TextDelta               `json:"text"`
	Refusal   string                  `json:"refusal"`
	ImageURL  ImageURLDelta           `json:"image_url"`
	JSON      messageContentDeltaJSON `json:"-"`
	// contains filtered or unexported fields
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

func (MessageContentDelta) AsUnion

AsUnion returns a MessageContentDeltaUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock.

func (*MessageContentDelta) UnmarshalJSON

func (r *MessageContentDelta) UnmarshalJSON(data []byte) (err error)

type MessageContentDeltaType

type MessageContentDeltaType string

Always `image_file`.

const (
	MessageContentDeltaTypeImageFile MessageContentDeltaType = "image_file"
	MessageContentDeltaTypeText      MessageContentDeltaType = "text"
	MessageContentDeltaTypeRefusal   MessageContentDeltaType = "refusal"
	MessageContentDeltaTypeImageURL  MessageContentDeltaType = "image_url"
)

func (MessageContentDeltaType) IsKnown

func (r MessageContentDeltaType) IsKnown() bool

type MessageContentDeltaUnion

type MessageContentDeltaUnion interface {
	// contains filtered or unexported methods
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

Union satisfied by ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock or ImageURLDeltaBlock.

type MessageContentPartParam

type MessageContentPartParam struct {
	// Always `image_file`.
	Type      param.Field[MessageContentPartParamType] `json:"type,required"`
	ImageFile param.Field[ImageFileParam]              `json:"image_file"`
	ImageURL  param.Field[ImageURLParam]               `json:"image_url"`
	// Text content to be sent to the model
	Text param.Field[string] `json:"text"`
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

func (MessageContentPartParam) MarshalJSON

func (r MessageContentPartParam) MarshalJSON() (data []byte, err error)

type MessageContentPartParamType

type MessageContentPartParamType string

Always `image_file`.

const (
	MessageContentPartParamTypeImageFile MessageContentPartParamType = "image_file"
	MessageContentPartParamTypeImageURL  MessageContentPartParamType = "image_url"
	MessageContentPartParamTypeText      MessageContentPartParamType = "text"
)

func (MessageContentPartParamType) IsKnown

func (r MessageContentPartParamType) IsKnown() bool

type MessageContentPartParamUnion

type MessageContentPartParamUnion interface {
	// contains filtered or unexported methods
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

Satisfied by ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam, MessageContentPartParam.

type MessageContentType

type MessageContentType string

Always `image_file`.

const (
	MessageContentTypeImageFile MessageContentType = "image_file"
	MessageContentTypeImageURL  MessageContentType = "image_url"
	MessageContentTypeText      MessageContentType = "text"
	MessageContentTypeRefusal   MessageContentType = "refusal"
)

func (MessageContentType) IsKnown

func (r MessageContentType) IsKnown() bool

type MessageContentUnion

type MessageContentUnion interface {
	// contains filtered or unexported methods
}

References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.

Union satisfied by ImageFileContentBlock, ImageURLContentBlock, TextContentBlock or RefusalContentBlock.

type MessageCreationStepDetails

type MessageCreationStepDetails struct {
	MessageCreation MessageCreationStepDetailsMessageCreation `json:"message_creation,required"`
	// Always `message_creation`.
	Type MessageCreationStepDetailsType `json:"type,required"`
	JSON messageCreationStepDetailsJSON `json:"-"`
}

Details of the message creation by the run step.

func (*MessageCreationStepDetails) UnmarshalJSON

func (r *MessageCreationStepDetails) UnmarshalJSON(data []byte) (err error)

type MessageCreationStepDetailsMessageCreation

type MessageCreationStepDetailsMessageCreation struct {
	// The ID of the message that was created by this run step.
	MessageID string                                        `json:"message_id,required"`
	JSON      messageCreationStepDetailsMessageCreationJSON `json:"-"`
}

func (*MessageCreationStepDetailsMessageCreation) UnmarshalJSON

func (r *MessageCreationStepDetailsMessageCreation) UnmarshalJSON(data []byte) (err error)

type MessageCreationStepDetailsType

type MessageCreationStepDetailsType string

Always `message_creation`.

const (
	MessageCreationStepDetailsTypeMessageCreation MessageCreationStepDetailsType = "message_creation"
)

func (MessageCreationStepDetailsType) IsKnown

type MessageDeleted

type MessageDeleted struct {
	ID      string               `json:"id,required"`
	Deleted bool                 `json:"deleted,required"`
	Object  MessageDeletedObject `json:"object,required"`
	JSON    messageDeletedJSON   `json:"-"`
}

func (*MessageDeleted) UnmarshalJSON

func (r *MessageDeleted) UnmarshalJSON(data []byte) (err error)

type MessageDeletedObject

type MessageDeletedObject string
const (
	MessageDeletedObjectThreadMessageDeleted MessageDeletedObject = "thread.message.deleted"
)

func (MessageDeletedObject) IsKnown

func (r MessageDeletedObject) IsKnown() bool

type MessageDelta

type MessageDelta struct {
	// The content of the message in array of text and/or images.
	Content []MessageContentDelta `json:"content"`
	// The entity that produced the message. One of `user` or `assistant`.
	Role MessageDeltaRole `json:"role"`
	JSON messageDeltaJSON `json:"-"`
}

The delta containing the fields that have changed on the Message.

func (*MessageDelta) UnmarshalJSON

func (r *MessageDelta) UnmarshalJSON(data []byte) (err error)

type MessageDeltaEvent

type MessageDeltaEvent struct {
	// The identifier of the message, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The delta containing the fields that have changed on the Message.
	Delta MessageDelta `json:"delta,required"`
	// The object type, which is always `thread.message.delta`.
	Object MessageDeltaEventObject `json:"object,required"`
	JSON   messageDeltaEventJSON   `json:"-"`
}

Represents a message delta i.e. any changed fields on a message during streaming.

func (*MessageDeltaEvent) UnmarshalJSON

func (r *MessageDeltaEvent) UnmarshalJSON(data []byte) (err error)

type MessageDeltaEventObject

type MessageDeltaEventObject string

The object type, which is always `thread.message.delta`.

const (
	MessageDeltaEventObjectThreadMessageDelta MessageDeltaEventObject = "thread.message.delta"
)

func (MessageDeltaEventObject) IsKnown

func (r MessageDeltaEventObject) IsKnown() bool

type MessageDeltaRole

type MessageDeltaRole string

The entity that produced the message. One of `user` or `assistant`.

const (
	MessageDeltaRoleUser      MessageDeltaRole = "user"
	MessageDeltaRoleAssistant MessageDeltaRole = "assistant"
)

func (MessageDeltaRole) IsKnown

func (r MessageDeltaRole) IsKnown() bool

type MessageIncompleteDetails

type MessageIncompleteDetails struct {
	// The reason the message is incomplete.
	Reason MessageIncompleteDetailsReason `json:"reason,required"`
	JSON   messageIncompleteDetailsJSON   `json:"-"`
}

On an incomplete message, details about why the message is incomplete.

func (*MessageIncompleteDetails) UnmarshalJSON

func (r *MessageIncompleteDetails) UnmarshalJSON(data []byte) (err error)

type MessageIncompleteDetailsReason

type MessageIncompleteDetailsReason string

The reason the message is incomplete.

const (
	MessageIncompleteDetailsReasonContentFilter MessageIncompleteDetailsReason = "content_filter"
	MessageIncompleteDetailsReasonMaxTokens     MessageIncompleteDetailsReason = "max_tokens"
	MessageIncompleteDetailsReasonRunCancelled  MessageIncompleteDetailsReason = "run_cancelled"
	MessageIncompleteDetailsReasonRunExpired    MessageIncompleteDetailsReason = "run_expired"
	MessageIncompleteDetailsReasonRunFailed     MessageIncompleteDetailsReason = "run_failed"
)

func (MessageIncompleteDetailsReason) IsKnown

type MessageObject

type MessageObject string

The object type, which is always `thread.message`.

const (
	MessageObjectThreadMessage MessageObject = "thread.message"
)

func (MessageObject) IsKnown

func (r MessageObject) IsKnown() bool

type MessageRole

type MessageRole string

The entity that produced the message. One of `user` or `assistant`.

const (
	MessageRoleUser      MessageRole = "user"
	MessageRoleAssistant MessageRole = "assistant"
)

func (MessageRole) IsKnown

func (r MessageRole) IsKnown() bool

type MessageStatus

type MessageStatus string

The status of the message, which can be either `in_progress`, `incomplete`, or `completed`.

const (
	MessageStatusInProgress MessageStatus = "in_progress"
	MessageStatusIncomplete MessageStatus = "incomplete"
	MessageStatusCompleted  MessageStatus = "completed"
)

func (MessageStatus) IsKnown

func (r MessageStatus) IsKnown() bool

type Model

type Model struct {
	// The model identifier, which can be referenced in the API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) when the model was created.
	Created int64 `json:"created,required"`
	// The object type, which is always "model".
	Object ModelObject `json:"object,required"`
	// The organization that owns the model.
	OwnedBy string    `json:"owned_by,required"`
	JSON    modelJSON `json:"-"`
}

Describes an OpenAI model offering that can be used with the API.

func (*Model) UnmarshalJSON

func (r *Model) UnmarshalJSON(data []byte) (err error)

type ModelDeleted

type ModelDeleted struct {
	ID      string           `json:"id,required"`
	Deleted bool             `json:"deleted,required"`
	Object  string           `json:"object,required"`
	JSON    modelDeletedJSON `json:"-"`
}

func (*ModelDeleted) UnmarshalJSON

func (r *ModelDeleted) UnmarshalJSON(data []byte) (err error)

type ModelObject

type ModelObject string

The object type, which is always "model".

const (
	ModelObjectModel ModelObject = "model"
)

func (ModelObject) IsKnown

func (r ModelObject) IsKnown() bool

type ModelService

type ModelService struct {
	Options []option.RequestOption
}

ModelService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewModelService method instead.

func NewModelService

func NewModelService(opts ...option.RequestOption) (r *ModelService)

NewModelService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*ModelService) Delete

func (r *ModelService) Delete(ctx context.Context, model string, opts ...option.RequestOption) (res *ModelDeleted, err error)

Delete a fine-tuned model. You must have the Owner role in your organization to delete a model.

func (*ModelService) Get

func (r *ModelService) Get(ctx context.Context, model string, opts ...option.RequestOption) (res *Model, err error)

Retrieves a model instance, providing basic information about the model such as the owner and permissioning.

func (*ModelService) List

func (r *ModelService) List(ctx context.Context, opts ...option.RequestOption) (res *pagination.Page[Model], err error)

Lists the currently available models, and provides basic information about each one such as the owner and availability.

func (*ModelService) ListAutoPaging

func (r *ModelService) ListAutoPaging(ctx context.Context, opts ...option.RequestOption) *pagination.PageAutoPager[Model]

Lists the currently available models, and provides basic information about each one such as the owner and availability.

type Moderation

type Moderation struct {
	// A list of the categories, and whether they are flagged or not.
	Categories ModerationCategories `json:"categories,required"`
	// A list of the categories along with their scores as predicted by model.
	CategoryScores ModerationCategoryScores `json:"category_scores,required"`
	// Whether any of the below categories are flagged.
	Flagged bool           `json:"flagged,required"`
	JSON    moderationJSON `json:"-"`
}

func (*Moderation) UnmarshalJSON

func (r *Moderation) UnmarshalJSON(data []byte) (err error)

type ModerationCategories

type ModerationCategories struct {
	// Content that expresses, incites, or promotes harassing language towards any
	// target.
	Harassment bool `json:"harassment,required"`
	// Harassment content that also includes violence or serious harm towards any
	// target.
	HarassmentThreatening bool `json:"harassment/threatening,required"`
	// Content that expresses, incites, or promotes hate based on race, gender,
	// ethnicity, religion, nationality, sexual orientation, disability status, or
	// caste. Hateful content aimed at non-protected groups (e.g., chess players) is
	// harassment.
	Hate bool `json:"hate,required"`
	// Hateful content that also includes violence or serious harm towards the targeted
	// group based on race, gender, ethnicity, religion, nationality, sexual
	// orientation, disability status, or caste.
	HateThreatening bool `json:"hate/threatening,required"`
	// Content that promotes, encourages, or depicts acts of self-harm, such as
	// suicide, cutting, and eating disorders.
	SelfHarm bool `json:"self-harm,required"`
	// Content that encourages performing acts of self-harm, such as suicide, cutting,
	// and eating disorders, or that gives instructions or advice on how to commit such
	// acts.
	SelfHarmInstructions bool `json:"self-harm/instructions,required"`
	// Content where the speaker expresses that they are engaging or intend to engage
	// in acts of self-harm, such as suicide, cutting, and eating disorders.
	SelfHarmIntent bool `json:"self-harm/intent,required"`
	// Content meant to arouse sexual excitement, such as the description of sexual
	// activity, or that promotes sexual services (excluding sex education and
	// wellness).
	Sexual bool `json:"sexual,required"`
	// Sexual content that includes an individual who is under 18 years old.
	SexualMinors bool `json:"sexual/minors,required"`
	// Content that depicts death, violence, or physical injury.
	Violence bool `json:"violence,required"`
	// Content that depicts death, violence, or physical injury in graphic detail.
	ViolenceGraphic bool                     `json:"violence/graphic,required"`
	JSON            moderationCategoriesJSON `json:"-"`
}

A list of the categories, and whether they are flagged or not.

func (*ModerationCategories) UnmarshalJSON

func (r *ModerationCategories) UnmarshalJSON(data []byte) (err error)

type ModerationCategoryScores

type ModerationCategoryScores struct {
	// The score for the category 'harassment'.
	Harassment float64 `json:"harassment,required"`
	// The score for the category 'harassment/threatening'.
	HarassmentThreatening float64 `json:"harassment/threatening,required"`
	// The score for the category 'hate'.
	Hate float64 `json:"hate,required"`
	// The score for the category 'hate/threatening'.
	HateThreatening float64 `json:"hate/threatening,required"`
	// The score for the category 'self-harm'.
	SelfHarm float64 `json:"self-harm,required"`
	// The score for the category 'self-harm/instructions'.
	SelfHarmInstructions float64 `json:"self-harm/instructions,required"`
	// The score for the category 'self-harm/intent'.
	SelfHarmIntent float64 `json:"self-harm/intent,required"`
	// The score for the category 'sexual'.
	Sexual float64 `json:"sexual,required"`
	// The score for the category 'sexual/minors'.
	SexualMinors float64 `json:"sexual/minors,required"`
	// The score for the category 'violence'.
	Violence float64 `json:"violence,required"`
	// The score for the category 'violence/graphic'.
	ViolenceGraphic float64                      `json:"violence/graphic,required"`
	JSON            moderationCategoryScoresJSON `json:"-"`
}

A list of the categories along with their scores as predicted by model.

func (*ModerationCategoryScores) UnmarshalJSON

func (r *ModerationCategoryScores) UnmarshalJSON(data []byte) (err error)

type ModerationModel

type ModerationModel = string
const (
	ModerationModelTextModerationLatest ModerationModel = "text-moderation-latest"
	ModerationModelTextModerationStable ModerationModel = "text-moderation-stable"
)

type ModerationNewParams

type ModerationNewParams struct {
	// The input text to classify
	Input param.Field[ModerationNewParamsInputUnion] `json:"input,required"`
	// Two content moderations models are available: `text-moderation-stable` and
	// `text-moderation-latest`.
	//
	// The default is `text-moderation-latest` which will be automatically upgraded
	// over time. This ensures you are always using our most accurate model. If you use
	// `text-moderation-stable`, we will provide advanced notice before updating the
	// model. Accuracy of `text-moderation-stable` may be slightly lower than for
	// `text-moderation-latest`.
	Model param.Field[ModerationModel] `json:"model"`
}

func (ModerationNewParams) MarshalJSON

func (r ModerationNewParams) MarshalJSON() (data []byte, err error)

type ModerationNewParamsInputArray

type ModerationNewParamsInputArray []string

func (ModerationNewParamsInputArray) ImplementsModerationNewParamsInputUnion

func (r ModerationNewParamsInputArray) ImplementsModerationNewParamsInputUnion()

type ModerationNewParamsInputUnion

type ModerationNewParamsInputUnion interface {
	ImplementsModerationNewParamsInputUnion()
}

The input text to classify

Satisfied by shared.UnionString, ModerationNewParamsInputArray.

type ModerationNewResponse

type ModerationNewResponse struct {
	// The unique identifier for the moderation request.
	ID string `json:"id,required"`
	// The model used to generate the moderation results.
	Model string `json:"model,required"`
	// A list of moderation objects.
	Results []Moderation              `json:"results,required"`
	JSON    moderationNewResponseJSON `json:"-"`
}

Represents if a given text input is potentially harmful.

func (*ModerationNewResponse) UnmarshalJSON

func (r *ModerationNewResponse) UnmarshalJSON(data []byte) (err error)

type ModerationService

type ModerationService struct {
	Options []option.RequestOption
}

ModerationService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewModerationService method instead.

func NewModerationService

func NewModerationService(opts ...option.RequestOption) (r *ModerationService)

NewModerationService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*ModerationService) New

Classifies if text is potentially harmful.

type OtherFileChunkingStrategyObject

type OtherFileChunkingStrategyObject struct {
	// Always `other`.
	Type OtherFileChunkingStrategyObjectType `json:"type,required"`
	JSON otherFileChunkingStrategyObjectJSON `json:"-"`
}

This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.

func (*OtherFileChunkingStrategyObject) UnmarshalJSON

func (r *OtherFileChunkingStrategyObject) UnmarshalJSON(data []byte) (err error)

type OtherFileChunkingStrategyObjectType

type OtherFileChunkingStrategyObjectType string

Always `other`.

const (
	OtherFileChunkingStrategyObjectTypeOther OtherFileChunkingStrategyObjectType = "other"
)

func (OtherFileChunkingStrategyObjectType) IsKnown

type RefusalContentBlock

type RefusalContentBlock struct {
	Refusal string `json:"refusal,required"`
	// Always `refusal`.
	Type RefusalContentBlockType `json:"type,required"`
	JSON refusalContentBlockJSON `json:"-"`
}

The refusal content generated by the assistant.

func (*RefusalContentBlock) UnmarshalJSON

func (r *RefusalContentBlock) UnmarshalJSON(data []byte) (err error)

type RefusalContentBlockType

type RefusalContentBlockType string

Always `refusal`.

const (
	RefusalContentBlockTypeRefusal RefusalContentBlockType = "refusal"
)

func (RefusalContentBlockType) IsKnown

func (r RefusalContentBlockType) IsKnown() bool

type RefusalDeltaBlock

type RefusalDeltaBlock struct {
	// The index of the refusal part in the message.
	Index int64 `json:"index,required"`
	// Always `refusal`.
	Type    RefusalDeltaBlockType `json:"type,required"`
	Refusal string                `json:"refusal"`
	JSON    refusalDeltaBlockJSON `json:"-"`
}

The refusal content that is part of a message.

func (*RefusalDeltaBlock) UnmarshalJSON

func (r *RefusalDeltaBlock) UnmarshalJSON(data []byte) (err error)

type RefusalDeltaBlockType

type RefusalDeltaBlockType string

Always `refusal`.

const (
	RefusalDeltaBlockTypeRefusal RefusalDeltaBlockType = "refusal"
)

func (RefusalDeltaBlockType) IsKnown

func (r RefusalDeltaBlockType) IsKnown() bool

type RequiredActionFunctionToolCall

type RequiredActionFunctionToolCall struct {
	// The ID of the tool call. This ID must be referenced when you submit the tool
	// outputs in using the
	// [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
	// endpoint.
	ID string `json:"id,required"`
	// The function definition.
	Function RequiredActionFunctionToolCallFunction `json:"function,required"`
	// The type of tool call the output is required for. For now, this is always
	// `function`.
	Type RequiredActionFunctionToolCallType `json:"type,required"`
	JSON requiredActionFunctionToolCallJSON `json:"-"`
}

Tool call objects

func (*RequiredActionFunctionToolCall) UnmarshalJSON

func (r *RequiredActionFunctionToolCall) UnmarshalJSON(data []byte) (err error)

type RequiredActionFunctionToolCallFunction

type RequiredActionFunctionToolCallFunction struct {
	// The arguments that the model expects you to pass to the function.
	Arguments string `json:"arguments,required"`
	// The name of the function.
	Name string                                     `json:"name,required"`
	JSON requiredActionFunctionToolCallFunctionJSON `json:"-"`
}

The function definition.

func (*RequiredActionFunctionToolCallFunction) UnmarshalJSON

func (r *RequiredActionFunctionToolCallFunction) UnmarshalJSON(data []byte) (err error)

type RequiredActionFunctionToolCallType

type RequiredActionFunctionToolCallType string

The type of tool call the output is required for. For now, this is always `function`.

const (
	RequiredActionFunctionToolCallTypeFunction RequiredActionFunctionToolCallType = "function"
)

func (RequiredActionFunctionToolCallType) IsKnown

type ResponseFormatJSONObjectParam

type ResponseFormatJSONObjectParam = shared.ResponseFormatJSONObjectParam

This is an alias to an internal type.

type ResponseFormatJSONObjectType

type ResponseFormatJSONObjectType = shared.ResponseFormatJSONObjectType

The type of response format being defined: `json_object`

This is an alias to an internal type.

type ResponseFormatJSONSchemaJSONSchemaParam

type ResponseFormatJSONSchemaJSONSchemaParam = shared.ResponseFormatJSONSchemaJSONSchemaParam

This is an alias to an internal type.

type ResponseFormatJSONSchemaParam

type ResponseFormatJSONSchemaParam = shared.ResponseFormatJSONSchemaParam

This is an alias to an internal type.

type ResponseFormatJSONSchemaType

type ResponseFormatJSONSchemaType = shared.ResponseFormatJSONSchemaType

The type of response format being defined: `json_schema`

This is an alias to an internal type.

type ResponseFormatTextParam

type ResponseFormatTextParam = shared.ResponseFormatTextParam

This is an alias to an internal type.

type ResponseFormatTextType

type ResponseFormatTextType = shared.ResponseFormatTextType

The type of response format being defined: `text`

This is an alias to an internal type.

type Run

type Run struct {
	// The identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The ID of the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
	// execution of this run.
	AssistantID string `json:"assistant_id,required"`
	// The Unix timestamp (in seconds) for when the run was cancelled.
	CancelledAt int64 `json:"cancelled_at,required,nullable"`
	// The Unix timestamp (in seconds) for when the run was completed.
	CompletedAt int64 `json:"completed_at,required,nullable"`
	// The Unix timestamp (in seconds) for when the run was created.
	CreatedAt int64 `json:"created_at,required"`
	// The Unix timestamp (in seconds) for when the run will expire.
	ExpiresAt int64 `json:"expires_at,required,nullable"`
	// The Unix timestamp (in seconds) for when the run failed.
	FailedAt int64 `json:"failed_at,required,nullable"`
	// Details on why the run is incomplete. Will be `null` if the run is not
	// incomplete.
	IncompleteDetails RunIncompleteDetails `json:"incomplete_details,required,nullable"`
	// The instructions that the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
	// this run.
	Instructions string `json:"instructions,required"`
	// The last error associated with this run. Will be `null` if there are no errors.
	LastError RunLastError `json:"last_error,required,nullable"`
	// The maximum number of completion tokens specified to have been used over the
	// course of the run.
	MaxCompletionTokens int64 `json:"max_completion_tokens,required,nullable"`
	// The maximum number of prompt tokens specified to have been used over the course
	// of the run.
	MaxPromptTokens int64 `json:"max_prompt_tokens,required,nullable"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata interface{} `json:"metadata,required,nullable"`
	// The model that the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
	// this run.
	Model string `json:"model,required"`
	// The object type, which is always `thread.run`.
	Object RunObject `json:"object,required"`
	// Whether to enable
	// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
	// during tool use.
	ParallelToolCalls bool `json:"parallel_tool_calls,required"`
	// Details on the action required to continue the run. Will be `null` if no action
	// is required.
	RequiredAction RunRequiredAction `json:"required_action,required,nullable"`
	// The Unix timestamp (in seconds) for when the run was started.
	StartedAt int64 `json:"started_at,required,nullable"`
	// The status of the run, which can be either `queued`, `in_progress`,
	// `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
	// `incomplete`, or `expired`.
	Status RunStatus `json:"status,required"`
	// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
	// that was executed on as a part of this run.
	ThreadID string `json:"thread_id,required"`
	// Controls which (if any) tool is called by the model. `none` means the model will
	// not call any tools and instead generates a message. `auto` is the default value
	// and means the model can pick between generating a message or calling one or more
	// tools. `required` means the model must call one or more tools before responding
	// to the user. Specifying a particular tool like `{"type": "file_search"}` or
	// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
	// call that tool.
	ToolChoice AssistantToolChoiceOptionUnion `json:"tool_choice,required,nullable"`
	// The list of tools that the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
	// this run.
	Tools []AssistantTool `json:"tools,required"`
	// Controls for how a thread will be truncated prior to the run. Use this to
	// control the intial context window of the run.
	TruncationStrategy RunTruncationStrategy `json:"truncation_strategy,required,nullable"`
	// Usage statistics related to the run. This value will be `null` if the run is not
	// in a terminal state (i.e. `in_progress`, `queued`, etc.).
	Usage RunUsage `json:"usage,required,nullable"`
	// The sampling temperature used for this run. If not set, defaults to 1.
	Temperature float64 `json:"temperature,nullable"`
	// The nucleus sampling value used for this run. If not set, defaults to 1.
	TopP float64 `json:"top_p,nullable"`
	JSON runJSON `json:"-"`
}

Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads).

func (*Run) UnmarshalJSON

func (r *Run) UnmarshalJSON(data []byte) (err error)

type RunIncompleteDetails

type RunIncompleteDetails struct {
	// The reason why the run is incomplete. This will point to which specific token
	// limit was reached over the course of the run.
	Reason RunIncompleteDetailsReason `json:"reason"`
	JSON   runIncompleteDetailsJSON   `json:"-"`
}

Details on why the run is incomplete. Will be `null` if the run is not incomplete.

func (*RunIncompleteDetails) UnmarshalJSON

func (r *RunIncompleteDetails) UnmarshalJSON(data []byte) (err error)

type RunIncompleteDetailsReason

type RunIncompleteDetailsReason string

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

const (
	RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
	RunIncompleteDetailsReasonMaxPromptTokens     RunIncompleteDetailsReason = "max_prompt_tokens"
)

func (RunIncompleteDetailsReason) IsKnown

func (r RunIncompleteDetailsReason) IsKnown() bool

type RunLastError

type RunLastError struct {
	// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
	Code RunLastErrorCode `json:"code,required"`
	// A human-readable description of the error.
	Message string           `json:"message,required"`
	JSON    runLastErrorJSON `json:"-"`
}

The last error associated with this run. Will be `null` if there are no errors.

func (*RunLastError) UnmarshalJSON

func (r *RunLastError) UnmarshalJSON(data []byte) (err error)

type RunLastErrorCode

type RunLastErrorCode string

One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.

const (
	RunLastErrorCodeServerError       RunLastErrorCode = "server_error"
	RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
	RunLastErrorCodeInvalidPrompt     RunLastErrorCode = "invalid_prompt"
)

func (RunLastErrorCode) IsKnown

func (r RunLastErrorCode) IsKnown() bool

type RunObject

type RunObject string

The object type, which is always `thread.run`.

const (
	RunObjectThreadRun RunObject = "thread.run"
)

func (RunObject) IsKnown

func (r RunObject) IsKnown() bool

type RunRequiredAction

type RunRequiredAction struct {
	// Details on the tool outputs needed for this run to continue.
	SubmitToolOutputs RunRequiredActionSubmitToolOutputs `json:"submit_tool_outputs,required"`
	// For now, this is always `submit_tool_outputs`.
	Type RunRequiredActionType `json:"type,required"`
	JSON runRequiredActionJSON `json:"-"`
}

Details on the action required to continue the run. Will be `null` if no action is required.

func (*RunRequiredAction) UnmarshalJSON

func (r *RunRequiredAction) UnmarshalJSON(data []byte) (err error)

type RunRequiredActionSubmitToolOutputs

type RunRequiredActionSubmitToolOutputs struct {
	// A list of the relevant tool calls.
	ToolCalls []RequiredActionFunctionToolCall       `json:"tool_calls,required"`
	JSON      runRequiredActionSubmitToolOutputsJSON `json:"-"`
}

Details on the tool outputs needed for this run to continue.

func (*RunRequiredActionSubmitToolOutputs) UnmarshalJSON

func (r *RunRequiredActionSubmitToolOutputs) UnmarshalJSON(data []byte) (err error)

type RunRequiredActionType

type RunRequiredActionType string

For now, this is always `submit_tool_outputs`.

const (
	RunRequiredActionTypeSubmitToolOutputs RunRequiredActionType = "submit_tool_outputs"
)

func (RunRequiredActionType) IsKnown

func (r RunRequiredActionType) IsKnown() bool

type RunStatus

type RunStatus string

The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`.

const (
	RunStatusQueued         RunStatus = "queued"
	RunStatusInProgress     RunStatus = "in_progress"
	RunStatusRequiresAction RunStatus = "requires_action"
	RunStatusCancelling     RunStatus = "cancelling"
	RunStatusCancelled      RunStatus = "cancelled"
	RunStatusFailed         RunStatus = "failed"
	RunStatusCompleted      RunStatus = "completed"
	RunStatusIncomplete     RunStatus = "incomplete"
	RunStatusExpired        RunStatus = "expired"
)

func (RunStatus) IsKnown

func (r RunStatus) IsKnown() bool

type RunStep

type RunStep struct {
	// The identifier of the run step, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The ID of the
	// [assistant](https://platform.openai.com/docs/api-reference/assistants)
	// associated with the run step.
	AssistantID string `json:"assistant_id,required"`
	// The Unix timestamp (in seconds) for when the run step was cancelled.
	CancelledAt int64 `json:"cancelled_at,required,nullable"`
	// The Unix timestamp (in seconds) for when the run step completed.
	CompletedAt int64 `json:"completed_at,required,nullable"`
	// The Unix timestamp (in seconds) for when the run step was created.
	CreatedAt int64 `json:"created_at,required"`
	// The Unix timestamp (in seconds) for when the run step expired. A step is
	// considered expired if the parent run is expired.
	ExpiredAt int64 `json:"expired_at,required,nullable"`
	// The Unix timestamp (in seconds) for when the run step failed.
	FailedAt int64 `json:"failed_at,required,nullable"`
	// The last error associated with this run step. Will be `null` if there are no
	// errors.
	LastError RunStepLastError `json:"last_error,required,nullable"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata interface{} `json:"metadata,required,nullable"`
	// The object type, which is always `thread.run.step`.
	Object RunStepObject `json:"object,required"`
	// The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that
	// this run step is a part of.
	RunID string `json:"run_id,required"`
	// The status of the run step, which can be either `in_progress`, `cancelled`,
	// `failed`, `completed`, or `expired`.
	Status RunStepStatus `json:"status,required"`
	// The details of the run step.
	StepDetails RunStepStepDetails `json:"step_details,required"`
	// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
	// that was run.
	ThreadID string `json:"thread_id,required"`
	// The type of run step, which can be either `message_creation` or `tool_calls`.
	Type RunStepType `json:"type,required"`
	// Usage statistics related to the run step. This value will be `null` while the
	// run step's status is `in_progress`.
	Usage RunStepUsage `json:"usage,required,nullable"`
	JSON  runStepJSON  `json:"-"`
}

Represents a step in execution of a run.

func (*RunStep) UnmarshalJSON

func (r *RunStep) UnmarshalJSON(data []byte) (err error)

type RunStepDelta

type RunStepDelta struct {
	// The details of the run step.
	StepDetails RunStepDeltaStepDetails `json:"step_details"`
	JSON        runStepDeltaJSON        `json:"-"`
}

The delta containing the fields that have changed on the run step.

func (*RunStepDelta) UnmarshalJSON

func (r *RunStepDelta) UnmarshalJSON(data []byte) (err error)

type RunStepDeltaEvent

type RunStepDeltaEvent struct {
	// The identifier of the run step, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The delta containing the fields that have changed on the run step.
	Delta RunStepDelta `json:"delta,required"`
	// The object type, which is always `thread.run.step.delta`.
	Object RunStepDeltaEventObject `json:"object,required"`
	JSON   runStepDeltaEventJSON   `json:"-"`
}

Represents a run step delta i.e. any changed fields on a run step during streaming.

func (*RunStepDeltaEvent) UnmarshalJSON

func (r *RunStepDeltaEvent) UnmarshalJSON(data []byte) (err error)

type RunStepDeltaEventObject

type RunStepDeltaEventObject string

The object type, which is always `thread.run.step.delta`.

const (
	RunStepDeltaEventObjectThreadRunStepDelta RunStepDeltaEventObject = "thread.run.step.delta"
)

func (RunStepDeltaEventObject) IsKnown

func (r RunStepDeltaEventObject) IsKnown() bool

type RunStepDeltaMessageDelta

type RunStepDeltaMessageDelta struct {
	// Always `message_creation`.
	Type            RunStepDeltaMessageDeltaType            `json:"type,required"`
	MessageCreation RunStepDeltaMessageDeltaMessageCreation `json:"message_creation"`
	JSON            runStepDeltaMessageDeltaJSON            `json:"-"`
}

Details of the message creation by the run step.

func (*RunStepDeltaMessageDelta) UnmarshalJSON

func (r *RunStepDeltaMessageDelta) UnmarshalJSON(data []byte) (err error)

type RunStepDeltaMessageDeltaMessageCreation

type RunStepDeltaMessageDeltaMessageCreation struct {
	// The ID of the message that was created by this run step.
	MessageID string                                      `json:"message_id"`
	JSON      runStepDeltaMessageDeltaMessageCreationJSON `json:"-"`
}

func (*RunStepDeltaMessageDeltaMessageCreation) UnmarshalJSON

func (r *RunStepDeltaMessageDeltaMessageCreation) UnmarshalJSON(data []byte) (err error)

type RunStepDeltaMessageDeltaType

type RunStepDeltaMessageDeltaType string

Always `message_creation`.

const (
	RunStepDeltaMessageDeltaTypeMessageCreation RunStepDeltaMessageDeltaType = "message_creation"
)

func (RunStepDeltaMessageDeltaType) IsKnown

func (r RunStepDeltaMessageDeltaType) IsKnown() bool

type RunStepDeltaStepDetails

type RunStepDeltaStepDetails struct {
	// Always `message_creation`.
	Type RunStepDeltaStepDetailsType `json:"type,required"`
	// This field can have the runtime type of
	// [RunStepDeltaMessageDeltaMessageCreation].
	MessageCreation interface{} `json:"message_creation,required"`
	// This field can have the runtime type of [[]ToolCallDelta].
	ToolCalls interface{}                 `json:"tool_calls,required"`
	JSON      runStepDeltaStepDetailsJSON `json:"-"`
	// contains filtered or unexported fields
}

The details of the run step.

func (RunStepDeltaStepDetails) AsUnion

AsUnion returns a RunStepDeltaStepDetailsUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are RunStepDeltaMessageDelta, ToolCallDeltaObject.

func (*RunStepDeltaStepDetails) UnmarshalJSON

func (r *RunStepDeltaStepDetails) UnmarshalJSON(data []byte) (err error)

type RunStepDeltaStepDetailsType

type RunStepDeltaStepDetailsType string

Always `message_creation`.

const (
	RunStepDeltaStepDetailsTypeMessageCreation RunStepDeltaStepDetailsType = "message_creation"
	RunStepDeltaStepDetailsTypeToolCalls       RunStepDeltaStepDetailsType = "tool_calls"
)

func (RunStepDeltaStepDetailsType) IsKnown

func (r RunStepDeltaStepDetailsType) IsKnown() bool

type RunStepDeltaStepDetailsUnion

type RunStepDeltaStepDetailsUnion interface {
	// contains filtered or unexported methods
}

The details of the run step.

Union satisfied by RunStepDeltaMessageDelta or ToolCallDeltaObject.

type RunStepInclude

type RunStepInclude string
const (
	RunStepIncludeStepDetailsToolCallsFileSearchResultsContent RunStepInclude = "step_details.tool_calls[*].file_search.results[*].content"
)

func (RunStepInclude) IsKnown

func (r RunStepInclude) IsKnown() bool

type RunStepLastError

type RunStepLastError struct {
	// One of `server_error` or `rate_limit_exceeded`.
	Code RunStepLastErrorCode `json:"code,required"`
	// A human-readable description of the error.
	Message string               `json:"message,required"`
	JSON    runStepLastErrorJSON `json:"-"`
}

The last error associated with this run step. Will be `null` if there are no errors.

func (*RunStepLastError) UnmarshalJSON

func (r *RunStepLastError) UnmarshalJSON(data []byte) (err error)

type RunStepLastErrorCode

type RunStepLastErrorCode string

One of `server_error` or `rate_limit_exceeded`.

const (
	RunStepLastErrorCodeServerError       RunStepLastErrorCode = "server_error"
	RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded"
)

func (RunStepLastErrorCode) IsKnown

func (r RunStepLastErrorCode) IsKnown() bool

type RunStepObject

type RunStepObject string

The object type, which is always `thread.run.step`.

const (
	RunStepObjectThreadRunStep RunStepObject = "thread.run.step"
)

func (RunStepObject) IsKnown

func (r RunStepObject) IsKnown() bool

type RunStepStatus

type RunStepStatus string

The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.

const (
	RunStepStatusInProgress RunStepStatus = "in_progress"
	RunStepStatusCancelled  RunStepStatus = "cancelled"
	RunStepStatusFailed     RunStepStatus = "failed"
	RunStepStatusCompleted  RunStepStatus = "completed"
	RunStepStatusExpired    RunStepStatus = "expired"
)

func (RunStepStatus) IsKnown

func (r RunStepStatus) IsKnown() bool

type RunStepStepDetails

type RunStepStepDetails struct {
	// Always `message_creation`.
	Type RunStepStepDetailsType `json:"type,required"`
	// This field can have the runtime type of
	// [MessageCreationStepDetailsMessageCreation].
	MessageCreation interface{} `json:"message_creation,required"`
	// This field can have the runtime type of [[]ToolCall].
	ToolCalls interface{}            `json:"tool_calls,required"`
	JSON      runStepStepDetailsJSON `json:"-"`
	// contains filtered or unexported fields
}

The details of the run step.

func (RunStepStepDetails) AsUnion

AsUnion returns a RunStepStepDetailsUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are MessageCreationStepDetails, ToolCallsStepDetails.

func (*RunStepStepDetails) UnmarshalJSON

func (r *RunStepStepDetails) UnmarshalJSON(data []byte) (err error)

type RunStepStepDetailsType

type RunStepStepDetailsType string

Always `message_creation`.

const (
	RunStepStepDetailsTypeMessageCreation RunStepStepDetailsType = "message_creation"
	RunStepStepDetailsTypeToolCalls       RunStepStepDetailsType = "tool_calls"
)

func (RunStepStepDetailsType) IsKnown

func (r RunStepStepDetailsType) IsKnown() bool

type RunStepStepDetailsUnion

type RunStepStepDetailsUnion interface {
	// contains filtered or unexported methods
}

The details of the run step.

Union satisfied by MessageCreationStepDetails or ToolCallsStepDetails.

type RunStepType

type RunStepType string

The type of run step, which can be either `message_creation` or `tool_calls`.

const (
	RunStepTypeMessageCreation RunStepType = "message_creation"
	RunStepTypeToolCalls       RunStepType = "tool_calls"
)

func (RunStepType) IsKnown

func (r RunStepType) IsKnown() bool

type RunStepUsage

type RunStepUsage struct {
	// Number of completion tokens used over the course of the run step.
	CompletionTokens int64 `json:"completion_tokens,required"`
	// Number of prompt tokens used over the course of the run step.
	PromptTokens int64 `json:"prompt_tokens,required"`
	// Total number of tokens used (prompt + completion).
	TotalTokens int64            `json:"total_tokens,required"`
	JSON        runStepUsageJSON `json:"-"`
}

Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`.

func (*RunStepUsage) UnmarshalJSON

func (r *RunStepUsage) UnmarshalJSON(data []byte) (err error)

type RunTruncationStrategy

type RunTruncationStrategy struct {
	// The truncation strategy to use for the thread. The default is `auto`. If set to
	// `last_messages`, the thread will be truncated to the n most recent messages in
	// the thread. When set to `auto`, messages in the middle of the thread will be
	// dropped to fit the context length of the model, `max_prompt_tokens`.
	Type RunTruncationStrategyType `json:"type,required"`
	// The number of most recent messages from the thread when constructing the context
	// for the run.
	LastMessages int64                     `json:"last_messages,nullable"`
	JSON         runTruncationStrategyJSON `json:"-"`
}

Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.

func (*RunTruncationStrategy) UnmarshalJSON

func (r *RunTruncationStrategy) UnmarshalJSON(data []byte) (err error)

type RunTruncationStrategyType

type RunTruncationStrategyType string

The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.

const (
	RunTruncationStrategyTypeAuto         RunTruncationStrategyType = "auto"
	RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
)

func (RunTruncationStrategyType) IsKnown

func (r RunTruncationStrategyType) IsKnown() bool

type RunUsage

type RunUsage struct {
	// Number of completion tokens used over the course of the run.
	CompletionTokens int64 `json:"completion_tokens,required"`
	// Number of prompt tokens used over the course of the run.
	PromptTokens int64 `json:"prompt_tokens,required"`
	// Total number of tokens used (prompt + completion).
	TotalTokens int64        `json:"total_tokens,required"`
	JSON        runUsageJSON `json:"-"`
}

Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.).

func (*RunUsage) UnmarshalJSON

func (r *RunUsage) UnmarshalJSON(data []byte) (err error)

type SpeechModel

type SpeechModel = string
const (
	SpeechModelTTS1   SpeechModel = "tts-1"
	SpeechModelTTS1HD SpeechModel = "tts-1-hd"
)

type StaticFileChunkingStrategy

type StaticFileChunkingStrategy struct {
	// The number of tokens that overlap between chunks. The default value is `400`.
	//
	// Note that the overlap must not exceed half of `max_chunk_size_tokens`.
	ChunkOverlapTokens int64 `json:"chunk_overlap_tokens,required"`
	// The maximum number of tokens in each chunk. The default value is `800`. The
	// minimum value is `100` and the maximum value is `4096`.
	MaxChunkSizeTokens int64                          `json:"max_chunk_size_tokens,required"`
	JSON               staticFileChunkingStrategyJSON `json:"-"`
}

func (*StaticFileChunkingStrategy) UnmarshalJSON

func (r *StaticFileChunkingStrategy) UnmarshalJSON(data []byte) (err error)

type StaticFileChunkingStrategyObject

type StaticFileChunkingStrategyObject struct {
	Static StaticFileChunkingStrategy `json:"static,required"`
	// Always `static`.
	Type StaticFileChunkingStrategyObjectType `json:"type,required"`
	JSON staticFileChunkingStrategyObjectJSON `json:"-"`
}

func (*StaticFileChunkingStrategyObject) UnmarshalJSON

func (r *StaticFileChunkingStrategyObject) UnmarshalJSON(data []byte) (err error)

type StaticFileChunkingStrategyObjectType

type StaticFileChunkingStrategyObjectType string

Always `static`.

const (
	StaticFileChunkingStrategyObjectTypeStatic StaticFileChunkingStrategyObjectType = "static"
)

func (StaticFileChunkingStrategyObjectType) IsKnown

type StaticFileChunkingStrategyParam

type StaticFileChunkingStrategyParam struct {
	// The number of tokens that overlap between chunks. The default value is `400`.
	//
	// Note that the overlap must not exceed half of `max_chunk_size_tokens`.
	ChunkOverlapTokens param.Field[int64] `json:"chunk_overlap_tokens,required"`
	// The maximum number of tokens in each chunk. The default value is `800`. The
	// minimum value is `100` and the maximum value is `4096`.
	MaxChunkSizeTokens param.Field[int64] `json:"max_chunk_size_tokens,required"`
}

func (StaticFileChunkingStrategyParam) MarshalJSON

func (r StaticFileChunkingStrategyParam) MarshalJSON() (data []byte, err error)

type Text

type Text struct {
	Annotations []Annotation `json:"annotations,required"`
	// The data that makes up the text.
	Value string   `json:"value,required"`
	JSON  textJSON `json:"-"`
}

func (*Text) UnmarshalJSON

func (r *Text) UnmarshalJSON(data []byte) (err error)

type TextContentBlock

type TextContentBlock struct {
	Text Text `json:"text,required"`
	// Always `text`.
	Type TextContentBlockType `json:"type,required"`
	JSON textContentBlockJSON `json:"-"`
}

The text content that is part of a message.

func (*TextContentBlock) UnmarshalJSON

func (r *TextContentBlock) UnmarshalJSON(data []byte) (err error)

type TextContentBlockParam

type TextContentBlockParam struct {
	// Text content to be sent to the model
	Text param.Field[string] `json:"text,required"`
	// Always `text`.
	Type param.Field[TextContentBlockParamType] `json:"type,required"`
}

The text content that is part of a message.

func (TextContentBlockParam) MarshalJSON

func (r TextContentBlockParam) MarshalJSON() (data []byte, err error)

type TextContentBlockParamType

type TextContentBlockParamType string

Always `text`.

const (
	TextContentBlockParamTypeText TextContentBlockParamType = "text"
)

func (TextContentBlockParamType) IsKnown

func (r TextContentBlockParamType) IsKnown() bool

type TextContentBlockType

type TextContentBlockType string

Always `text`.

const (
	TextContentBlockTypeText TextContentBlockType = "text"
)

func (TextContentBlockType) IsKnown

func (r TextContentBlockType) IsKnown() bool

type TextDelta

type TextDelta struct {
	Annotations []AnnotationDelta `json:"annotations"`
	// The data that makes up the text.
	Value string        `json:"value"`
	JSON  textDeltaJSON `json:"-"`
}

func (*TextDelta) UnmarshalJSON

func (r *TextDelta) UnmarshalJSON(data []byte) (err error)

type TextDeltaBlock

type TextDeltaBlock struct {
	// The index of the content part in the message.
	Index int64 `json:"index,required"`
	// Always `text`.
	Type TextDeltaBlockType `json:"type,required"`
	Text TextDelta          `json:"text"`
	JSON textDeltaBlockJSON `json:"-"`
}

The text content that is part of a message.

func (*TextDeltaBlock) UnmarshalJSON

func (r *TextDeltaBlock) UnmarshalJSON(data []byte) (err error)

type TextDeltaBlockType

type TextDeltaBlockType string

Always `text`.

const (
	TextDeltaBlockTypeText TextDeltaBlockType = "text"
)

func (TextDeltaBlockType) IsKnown

func (r TextDeltaBlockType) IsKnown() bool

type Thread

type Thread struct {
	// The identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the thread was created.
	CreatedAt int64 `json:"created_at,required"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata interface{} `json:"metadata,required,nullable"`
	// The object type, which is always `thread`.
	Object ThreadObject `json:"object,required"`
	// A set of resources that are made available to the assistant's tools in this
	// thread. The resources are specific to the type of tool. For example, the
	// `code_interpreter` tool requires a list of file IDs, while the `file_search`
	// tool requires a list of vector store IDs.
	ToolResources ThreadToolResources `json:"tool_resources,required,nullable"`
	JSON          threadJSON          `json:"-"`
}

Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages).

func (*Thread) UnmarshalJSON

func (r *Thread) UnmarshalJSON(data []byte) (err error)

type ThreadDeleted

type ThreadDeleted struct {
	ID      string              `json:"id,required"`
	Deleted bool                `json:"deleted,required"`
	Object  ThreadDeletedObject `json:"object,required"`
	JSON    threadDeletedJSON   `json:"-"`
}

func (*ThreadDeleted) UnmarshalJSON

func (r *ThreadDeleted) UnmarshalJSON(data []byte) (err error)

type ThreadDeletedObject

type ThreadDeletedObject string
const (
	ThreadDeletedObjectThreadDeleted ThreadDeletedObject = "thread.deleted"
)

func (ThreadDeletedObject) IsKnown

func (r ThreadDeletedObject) IsKnown() bool

type ThreadObject

type ThreadObject string

The object type, which is always `thread`.

const (
	ThreadObjectThread ThreadObject = "thread"
)

func (ThreadObject) IsKnown

func (r ThreadObject) IsKnown() bool

type ThreadRunParamsResponseFormat

type ThreadRunParamsResponseFormat struct {
	// The type of response format being defined: `text`
	Type       param.Field[ThreadRunParamsResponseFormatType] `json:"type,required"`
	JSONSchema param.Field[interface{}]                       `json:"json_schema,required"`
}

An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.

**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.

func (ThreadRunParamsResponseFormat) ImplementsThreadRunParamsResponseFormatUnion

func (r ThreadRunParamsResponseFormat) ImplementsThreadRunParamsResponseFormatUnion()

func (ThreadRunParamsResponseFormat) MarshalJSON

func (r ThreadRunParamsResponseFormat) MarshalJSON() (data []byte, err error)

type ThreadRunParamsResponseFormatType

type ThreadRunParamsResponseFormatType string

The type of response format being defined: `text`

const (
	ThreadRunParamsResponseFormatTypeText       ThreadRunParamsResponseFormatType = "text"
	ThreadRunParamsResponseFormatTypeJSONObject ThreadRunParamsResponseFormatType = "json_object"
	ThreadRunParamsResponseFormatTypeJSONSchema ThreadRunParamsResponseFormatType = "json_schema"
)

func (ThreadRunParamsResponseFormatType) IsKnown

type ThreadRunParamsResponseFormatUnion

type ThreadRunParamsResponseFormatUnion interface {
	ImplementsThreadRunParamsResponseFormatUnion()
}

An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).

Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.

**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.

Satisfied by shared.ResponseFormatTextParam, shared.ResponseFormatJSONObjectParam, shared.ResponseFormatJSONSchemaParam, ThreadRunParamsResponseFormat.

type ThreadToolResources

type ThreadToolResources struct {
	CodeInterpreter ThreadToolResourcesCodeInterpreter `json:"code_interpreter"`
	FileSearch      ThreadToolResourcesFileSearch      `json:"file_search"`
	JSON            threadToolResourcesJSON            `json:"-"`
}

A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.

func (*ThreadToolResources) UnmarshalJSON

func (r *ThreadToolResources) UnmarshalJSON(data []byte) (err error)

type ThreadToolResourcesCodeInterpreter

type ThreadToolResourcesCodeInterpreter struct {
	// A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
	// available to the `code_interpreter` tool. There can be a maximum of 20 files
	// associated with the tool.
	FileIDs []string                               `json:"file_ids"`
	JSON    threadToolResourcesCodeInterpreterJSON `json:"-"`
}

func (*ThreadToolResourcesCodeInterpreter) UnmarshalJSON

func (r *ThreadToolResourcesCodeInterpreter) UnmarshalJSON(data []byte) (err error)

type ThreadToolResourcesFileSearch

type ThreadToolResourcesFileSearch struct {
	// The
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// attached to this thread. There can be a maximum of 1 vector store attached to
	// the thread.
	VectorStoreIDs []string                          `json:"vector_store_ids"`
	JSON           threadToolResourcesFileSearchJSON `json:"-"`
}

func (*ThreadToolResourcesFileSearch) UnmarshalJSON

func (r *ThreadToolResourcesFileSearch) UnmarshalJSON(data []byte) (err error)

type ToolCall

type ToolCall struct {
	// The ID of the tool call.
	ID string `json:"id,required"`
	// The type of tool call. This is always going to be `code_interpreter` for this
	// type of tool call.
	Type ToolCallType `json:"type,required"`
	// This field can have the runtime type of
	// [CodeInterpreterToolCallCodeInterpreter].
	CodeInterpreter interface{} `json:"code_interpreter,required"`
	// This field can have the runtime type of [FileSearchToolCallFileSearch].
	FileSearch interface{} `json:"file_search,required"`
	// This field can have the runtime type of [FunctionToolCallFunction].
	Function interface{}  `json:"function,required"`
	JSON     toolCallJSON `json:"-"`
	// contains filtered or unexported fields
}

Details of the Code Interpreter tool call the run step was involved in.

func (ToolCall) AsUnion

func (r ToolCall) AsUnion() ToolCallUnion

AsUnion returns a ToolCallUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are CodeInterpreterToolCall, FileSearchToolCall, FunctionToolCall.

func (*ToolCall) UnmarshalJSON

func (r *ToolCall) UnmarshalJSON(data []byte) (err error)

type ToolCallDelta

type ToolCallDelta struct {
	// The index of the tool call in the tool calls array.
	Index int64 `json:"index,required"`
	// The ID of the tool call.
	ID string `json:"id"`
	// The type of tool call. This is always going to be `code_interpreter` for this
	// type of tool call.
	Type ToolCallDeltaType `json:"type,required"`
	// This field can have the runtime type of
	// [CodeInterpreterToolCallDeltaCodeInterpreter].
	CodeInterpreter interface{} `json:"code_interpreter,required"`
	// This field can have the runtime type of [interface{}].
	FileSearch interface{} `json:"file_search,required"`
	// This field can have the runtime type of [FunctionToolCallDeltaFunction].
	Function interface{}       `json:"function,required"`
	JSON     toolCallDeltaJSON `json:"-"`
	// contains filtered or unexported fields
}

Details of the Code Interpreter tool call the run step was involved in.

func (ToolCallDelta) AsUnion

func (r ToolCallDelta) AsUnion() ToolCallDeltaUnion

AsUnion returns a ToolCallDeltaUnion interface which you can cast to the specific types for more type safety.

Possible runtime types of the union are CodeInterpreterToolCallDelta, FileSearchToolCallDelta, FunctionToolCallDelta.

func (*ToolCallDelta) UnmarshalJSON

func (r *ToolCallDelta) UnmarshalJSON(data []byte) (err error)

type ToolCallDeltaObject

type ToolCallDeltaObject struct {
	// Always `tool_calls`.
	Type ToolCallDeltaObjectType `json:"type,required"`
	// An array of tool calls the run step was involved in. These can be associated
	// with one of three types of tools: `code_interpreter`, `file_search`, or
	// `function`.
	ToolCalls []ToolCallDelta         `json:"tool_calls"`
	JSON      toolCallDeltaObjectJSON `json:"-"`
}

Details of the tool call.

func (*ToolCallDeltaObject) UnmarshalJSON

func (r *ToolCallDeltaObject) UnmarshalJSON(data []byte) (err error)

type ToolCallDeltaObjectType

type ToolCallDeltaObjectType string

Always `tool_calls`.

const (
	ToolCallDeltaObjectTypeToolCalls ToolCallDeltaObjectType = "tool_calls"
)

func (ToolCallDeltaObjectType) IsKnown

func (r ToolCallDeltaObjectType) IsKnown() bool

type ToolCallDeltaType

type ToolCallDeltaType string

The type of tool call. This is always going to be `code_interpreter` for this type of tool call.

const (
	ToolCallDeltaTypeCodeInterpreter ToolCallDeltaType = "code_interpreter"
	ToolCallDeltaTypeFileSearch      ToolCallDeltaType = "file_search"
	ToolCallDeltaTypeFunction        ToolCallDeltaType = "function"
)

func (ToolCallDeltaType) IsKnown

func (r ToolCallDeltaType) IsKnown() bool

type ToolCallDeltaUnion

type ToolCallDeltaUnion interface {
	// contains filtered or unexported methods
}

Details of the Code Interpreter tool call the run step was involved in.

Union satisfied by CodeInterpreterToolCallDelta, FileSearchToolCallDelta or FunctionToolCallDelta.

type ToolCallType

type ToolCallType string

The type of tool call. This is always going to be `code_interpreter` for this type of tool call.

const (
	ToolCallTypeCodeInterpreter ToolCallType = "code_interpreter"
	ToolCallTypeFileSearch      ToolCallType = "file_search"
	ToolCallTypeFunction        ToolCallType = "function"
)

func (ToolCallType) IsKnown

func (r ToolCallType) IsKnown() bool

type ToolCallUnion

type ToolCallUnion interface {
	// contains filtered or unexported methods
}

Details of the Code Interpreter tool call the run step was involved in.

Union satisfied by CodeInterpreterToolCall, FileSearchToolCall or FunctionToolCall.

type ToolCallsStepDetails

type ToolCallsStepDetails struct {
	// An array of tool calls the run step was involved in. These can be associated
	// with one of three types of tools: `code_interpreter`, `file_search`, or
	// `function`.
	ToolCalls []ToolCall `json:"tool_calls,required"`
	// Always `tool_calls`.
	Type ToolCallsStepDetailsType `json:"type,required"`
	JSON toolCallsStepDetailsJSON `json:"-"`
}

Details of the tool call.

func (*ToolCallsStepDetails) UnmarshalJSON

func (r *ToolCallsStepDetails) UnmarshalJSON(data []byte) (err error)

type ToolCallsStepDetailsType

type ToolCallsStepDetailsType string

Always `tool_calls`.

const (
	ToolCallsStepDetailsTypeToolCalls ToolCallsStepDetailsType = "tool_calls"
)

func (ToolCallsStepDetailsType) IsKnown

func (r ToolCallsStepDetailsType) IsKnown() bool

type Transcription

type Transcription struct {
	// The transcribed text.
	Text string            `json:"text,required"`
	JSON transcriptionJSON `json:"-"`
}

Represents a transcription response returned by model, based on the provided input.

func (*Transcription) UnmarshalJSON

func (r *Transcription) UnmarshalJSON(data []byte) (err error)

type Translation

type Translation struct {
	Text string          `json:"text,required"`
	JSON translationJSON `json:"-"`
}

func (*Translation) UnmarshalJSON

func (r *Translation) UnmarshalJSON(data []byte) (err error)

type Upload

type Upload struct {
	// The Upload unique identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The intended number of bytes to be uploaded.
	Bytes int64 `json:"bytes,required"`
	// The Unix timestamp (in seconds) for when the Upload was created.
	CreatedAt int64 `json:"created_at,required"`
	// The Unix timestamp (in seconds) for when the Upload was created.
	ExpiresAt int64 `json:"expires_at,required"`
	// The name of the file to be uploaded.
	Filename string `json:"filename,required"`
	// The object type, which is always "upload".
	Object UploadObject `json:"object,required"`
	// The intended purpose of the file.
	// [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose)
	// for acceptable values.
	Purpose string `json:"purpose,required"`
	// The status of the Upload.
	Status UploadStatus `json:"status,required"`
	// The ready File object after the Upload is completed.
	File FileObject `json:"file,nullable"`
	JSON uploadJSON `json:"-"`
}

The Upload object can accept byte chunks in the form of Parts.

func (*Upload) UnmarshalJSON

func (r *Upload) UnmarshalJSON(data []byte) (err error)

type UploadCompleteParams

type UploadCompleteParams struct {
	// The ordered list of Part IDs.
	PartIDs param.Field[[]string] `json:"part_ids,required"`
	// The optional md5 checksum for the file contents to verify if the bytes uploaded
	// matches what you expect.
	Md5 param.Field[string] `json:"md5"`
}

func (UploadCompleteParams) MarshalJSON

func (r UploadCompleteParams) MarshalJSON() (data []byte, err error)

type UploadNewParams

type UploadNewParams struct {
	// The number of bytes in the file you are uploading.
	Bytes param.Field[int64] `json:"bytes,required"`
	// The name of the file to upload.
	Filename param.Field[string] `json:"filename,required"`
	// The MIME type of the file.
	//
	// This must fall within the supported MIME types for your file purpose. See the
	// supported MIME types for assistants and vision.
	MimeType param.Field[string] `json:"mime_type,required"`
	// The intended purpose of the uploaded file.
	//
	// See the
	// [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
	Purpose param.Field[FilePurpose] `json:"purpose,required"`
}

func (UploadNewParams) MarshalJSON

func (r UploadNewParams) MarshalJSON() (data []byte, err error)

type UploadObject

type UploadObject string

The object type, which is always "upload".

const (
	UploadObjectUpload UploadObject = "upload"
)

func (UploadObject) IsKnown

func (r UploadObject) IsKnown() bool

type UploadPart

type UploadPart struct {
	// The upload Part unique identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the Part was created.
	CreatedAt int64 `json:"created_at,required"`
	// The object type, which is always `upload.part`.
	Object UploadPartObject `json:"object,required"`
	// The ID of the Upload object that this Part was added to.
	UploadID string         `json:"upload_id,required"`
	JSON     uploadPartJSON `json:"-"`
}

The upload Part represents a chunk of bytes we can add to an Upload object.

func (*UploadPart) UnmarshalJSON

func (r *UploadPart) UnmarshalJSON(data []byte) (err error)

type UploadPartNewParams

type UploadPartNewParams struct {
	// The chunk of bytes for this Part.
	Data param.Field[io.Reader] `json:"data,required" format:"binary"`
}

func (UploadPartNewParams) MarshalMultipart

func (r UploadPartNewParams) MarshalMultipart() (data []byte, contentType string, err error)

type UploadPartObject

type UploadPartObject string

The object type, which is always `upload.part`.

const (
	UploadPartObjectUploadPart UploadPartObject = "upload.part"
)

func (UploadPartObject) IsKnown

func (r UploadPartObject) IsKnown() bool

type UploadPartService

type UploadPartService struct {
	Options []option.RequestOption
}

UploadPartService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewUploadPartService method instead.

func NewUploadPartService

func NewUploadPartService(opts ...option.RequestOption) (r *UploadPartService)

NewUploadPartService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*UploadPartService) New

func (r *UploadPartService) New(ctx context.Context, uploadID string, body UploadPartNewParams, opts ...option.RequestOption) (res *UploadPart, err error)

Adds a [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an Upload(https://platform.openai.com/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload.

Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB.

It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).

type UploadService

type UploadService struct {
	Options []option.RequestOption
	Parts   *UploadPartService
}

UploadService contains methods and other services that help with interacting with the openai API.

Note, unlike clients, this service does not read variables from the environment automatically. You should not instantiate this service directly, and instead use the NewUploadService method instead.

func NewUploadService

func NewUploadService(opts ...option.RequestOption) (r *UploadService)

NewUploadService generates a new service that applies the given options to each request. These options are applied after the parent client's options (if there is one), and before any request-specific options.

func (*UploadService) Cancel

func (r *UploadService) Cancel(ctx context.Context, uploadID string, opts ...option.RequestOption) (res *Upload, err error)

Cancels the Upload. No Parts may be added after an Upload is cancelled.

func (*UploadService) Complete

func (r *UploadService) Complete(ctx context.Context, uploadID string, body UploadCompleteParams, opts ...option.RequestOption) (res *Upload, err error)

Completes the Upload(https://platform.openai.com/docs/api-reference/uploads/object).

Within the returned Upload object, there is a nested [File](https://platform.openai.com/docs/api-reference/files/object) object that is ready to use in the rest of the platform.

You can specify the order of the Parts by passing in an ordered list of the Part IDs.

The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed.

func (*UploadService) New

func (r *UploadService) New(ctx context.Context, body UploadNewParams, opts ...option.RequestOption) (res *Upload, err error)

Creates an intermediate Upload(https://platform.openai.com/docs/api-reference/uploads/object) object that you can add [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.

Once you complete the Upload, we will create a [File](https://platform.openai.com/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.

For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:

- [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files)

For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](https://platform.openai.com/docs/api-reference/files/create).

type UploadStatus

type UploadStatus string

The status of the Upload.

const (
	UploadStatusPending   UploadStatus = "pending"
	UploadStatusCompleted UploadStatus = "completed"
	UploadStatusCancelled UploadStatus = "cancelled"
	UploadStatusExpired   UploadStatus = "expired"
)

func (UploadStatus) IsKnown

func (r UploadStatus) IsKnown() bool

type VectorStore

type VectorStore struct {
	// The identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the vector store was created.
	CreatedAt  int64                 `json:"created_at,required"`
	FileCounts VectorStoreFileCounts `json:"file_counts,required"`
	// The Unix timestamp (in seconds) for when the vector store was last active.
	LastActiveAt int64 `json:"last_active_at,required,nullable"`
	// Set of 16 key-value pairs that can be attached to an object. This can be useful
	// for storing additional information about the object in a structured format. Keys
	// can be a maximum of 64 characters long and values can be a maxium of 512
	// characters long.
	Metadata interface{} `json:"metadata,required,nullable"`
	// The name of the vector store.
	Name string `json:"name,required"`
	// The object type, which is always `vector_store`.
	Object VectorStoreObject `json:"object,required"`
	// The status of the vector store, which can be either `expired`, `in_progress`, or
	// `completed`. A status of `completed` indicates that the vector store is ready
	// for use.
	Status VectorStoreStatus `json:"status,required"`
	// The total number of bytes used by the files in the vector store.
	UsageBytes int64 `json:"usage_bytes,required"`
	// The expiration policy for a vector store.
	ExpiresAfter VectorStoreExpiresAfter `json:"expires_after"`
	// The Unix timestamp (in seconds) for when the vector store will expire.
	ExpiresAt int64           `json:"expires_at,nullable"`
	JSON      vectorStoreJSON `json:"-"`
}

A vector store is a collection of processed files can be used by the `file_search` tool.

func (*VectorStore) UnmarshalJSON

func (r *VectorStore) UnmarshalJSON(data []byte) (err error)

type VectorStoreDeleted

type VectorStoreDeleted struct {
	ID      string                   `json:"id,required"`
	Deleted bool                     `json:"deleted,required"`
	Object  VectorStoreDeletedObject `json:"object,required"`
	JSON    vectorStoreDeletedJSON   `json:"-"`
}

func (*VectorStoreDeleted) UnmarshalJSON

func (r *VectorStoreDeleted) UnmarshalJSON(data []byte) (err error)

type VectorStoreDeletedObject

type VectorStoreDeletedObject string
const (
	VectorStoreDeletedObjectVectorStoreDeleted VectorStoreDeletedObject = "vector_store.deleted"
)

func (VectorStoreDeletedObject) IsKnown

func (r VectorStoreDeletedObject) IsKnown() bool

type VectorStoreExpiresAfter

type VectorStoreExpiresAfter struct {
	// Anchor timestamp after which the expiration policy applies. Supported anchors:
	// `last_active_at`.
	Anchor VectorStoreExpiresAfterAnchor `json:"anchor,required"`
	// The number of days after the anchor time that the vector store will expire.
	Days int64                       `json:"days,required"`
	JSON vectorStoreExpiresAfterJSON `json:"-"`
}

The expiration policy for a vector store.

func (*VectorStoreExpiresAfter) UnmarshalJSON

func (r *VectorStoreExpiresAfter) UnmarshalJSON(data []byte) (err error)

type VectorStoreExpiresAfterAnchor

type VectorStoreExpiresAfterAnchor string

Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.

const (
	VectorStoreExpiresAfterAnchorLastActiveAt VectorStoreExpiresAfterAnchor = "last_active_at"
)

func (VectorStoreExpiresAfterAnchor) IsKnown

func (r VectorStoreExpiresAfterAnchor) IsKnown() bool

type VectorStoreFile

type VectorStoreFile struct {
	// The identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the vector store file was created.
	CreatedAt int64 `json:"created_at,required"`
	// The last error associated with this vector store file. Will be `null` if there
	// are no errors.
	LastError VectorStoreFileLastError `json:"last_error,required,nullable"`
	// The object type, which is always `vector_store.file`.
	Object VectorStoreFileObject `json:"object,required"`
	// The status of the vector store file, which can be either `in_progress`,
	// `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
	// vector store file is ready for use.
	Status VectorStoreFileStatus `json:"status,required"`
	// The total vector store usage in bytes. Note that this may be different from the
	// original file size.
	UsageBytes int64 `json:"usage_bytes,required"`
	// The ID of the
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// that the [File](https://platform.openai.com/docs/api-reference/files) is
	// attached to.
	VectorStoreID string `json:"vector_store_id,required"`
	// The strategy used to chunk the file.
	ChunkingStrategy FileChunkingStrategy `json:"chunking_strategy"`
	JSON             vectorStoreFileJSON  `json:"-"`
}

A list of files attached to a vector store.

func (*VectorStoreFile) UnmarshalJSON

func (r *VectorStoreFile) UnmarshalJSON(data []byte) (err error)

type VectorStoreFileBatch

type VectorStoreFileBatch struct {
	// The identifier, which can be referenced in API endpoints.
	ID string `json:"id,required"`
	// The Unix timestamp (in seconds) for when the vector store files batch was
	// created.
	CreatedAt  int64                          `json:"created_at,required"`
	FileCounts VectorStoreFileBatchFileCounts `json:"file_counts,required"`
	// The object type, which is always `vector_store.file_batch`.
	Object VectorStoreFileBatchObject `json:"object,required"`
	// The status of the vector store files batch, which can be either `in_progress`,
	// `completed`, `cancelled` or `failed`.
	Status VectorStoreFileBatchStatus `json:"status,required"`
	// The ID of the
	// [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
	// that the [File](https://platform.openai.com/docs/api-reference/files) is
	// attached to.
	VectorStoreID string                   `json:"vector_store_id,required"`
	JSON          vectorStoreFileBatchJSON `json:"-"`
}

A batch of files attached to a vector store.

func (*VectorStoreFileBatch) UnmarshalJSON

func (r *VectorStoreFileBatch) UnmarshalJSON(data []byte) (err error)

type VectorStoreFileBatchFileCounts

type VectorStoreFileBatchFileCounts struct {
	// The number of files that where cancelled.
	Cancelled int64 `json:"cancelled,required"`
	// The number of files that have been processed.
	Completed int64 `json:"completed,required"`
	// The number of files that have failed to process.
	Failed int64 `json:"failed,required"`
	// The number of files that are currently being processed.
	InProgress int64 `json:"in_progress,required"`
	// The total number of files.
	Total int64                              `json:"total,required"`
	JSON  vectorStoreFileBatchFileCountsJSON `json:"-"`
}

func (*VectorStoreFileBatchFileCounts) UnmarshalJSON

func (r *VectorStoreFileBatchFileCounts) UnmarshalJSON(data []byte) (err error)

type VectorStoreFileBatchObject

type VectorStoreFileBatchObject string

The object type, which is always `vector_store.file_batch`.

const (
	VectorStoreFileBatchObjectVectorStoreFilesBatch VectorStoreFileBatchObject = "vector_store.files_batch"
)

func (VectorStoreFileBatchObject) IsKnown

func (r VectorStoreFileBatchObject) IsKnown() bool

type VectorStoreFileBatchStatus

type VectorStoreFileBatchStatus string

The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`.

const (
	VectorStoreFileBatchStatusInProgress VectorStoreFileBatchStatus = "in_progress"
	VectorStoreFileBatchStatusCompleted  VectorStoreFileBatchStatus = "completed"
	VectorStoreFileBatchStatusCancelled  VectorStoreFileBatchStatus = "cancelled"
	VectorStoreFileBatchStatusFailed     VectorStoreFileBatchStatus = "failed"
)

func (VectorStoreFileBatchStatus) IsKnown

func (r VectorStoreFileBatchStatus) IsKnown() bool

type VectorStoreFileCounts

type VectorStoreFileCounts struct {
	// The number of files that were cancelled.
	Cancelled int64 `json:"cancelled,required"`
	// The number of files that have been successfully processed.
	Completed int64 `json:"completed,required"`
	// The number of files that have failed to process.
	Failed int64 `json:"failed,required"`
	// The number of files that are currently being processed.
	InProgress int64 `json:"in_progress,required"`
	// The total number of files.
	Total int64                     `json:"total,required"`
	JSON  vectorStoreFileCountsJSON `json:"-"`
}

func (*VectorStoreFileCounts) UnmarshalJSON

func (r *VectorStoreFileCounts) UnmarshalJSON(data []byte) (err error)

type VectorStoreFileDeleted

type VectorStoreFileDeleted struct {
	ID      string                       `json:"id,required"`
	Deleted bool                         `json:"deleted,required"`
	Object  VectorStoreFileDeletedObject `json:"object,required"`
	JSON    vectorStoreFileDeletedJSON   `json:"-"`
}

func (*VectorStoreFileDeleted) UnmarshalJSON

func (r *VectorStoreFileDeleted) UnmarshalJSON(data []byte) (err error)

type VectorStoreFileDeletedObject

type VectorStoreFileDeletedObject string
const (
	VectorStoreFileDeletedObjectVectorStoreFileDeleted VectorStoreFileDeletedObject = "vector_store.file.deleted"
)

func (VectorStoreFileDeletedObject) IsKnown

func (r VectorStoreFileDeletedObject) IsKnown() bool

type VectorStoreFileLastError

type VectorStoreFileLastError struct {
	// One of `server_error` or `rate_limit_exceeded`.
	Code VectorStoreFileLastErrorCode `json:"code,required"`
	// A human-readable description of the error.
	Message string                       `json:"message,required"`
	JSON    vectorStoreFileLastErrorJSON `json:"-"`
}

The last error associated with this vector store file. Will be `null` if there are no errors.

func (*VectorStoreFileLastError) UnmarshalJSON

func (r *VectorStoreFileLastError) UnmarshalJSON(data []byte) (err error)

type VectorStoreFileLastErrorCode

type VectorStoreFileLastErrorCode string

One of `server_error` or `rate_limit_exceeded`.

const (
	VectorStoreFileLastErrorCodeServerError     VectorStoreFileLastErrorCode = "server_error"
	VectorStoreFileLastErrorCodeUnsupportedFile VectorStoreFileLastErrorCode = "unsupported_file"
	VectorStoreFileLastErrorCodeInvalidFile     VectorStoreFileLastErrorCode = "invalid_file"
)

func (VectorStoreFileLastErrorCode) IsKnown

func (r VectorStoreFileLastErrorCode) IsKnown() bool

type VectorStoreFileObject

type VectorStoreFileObject string

The object type, which is always `vector_store.file`.

const (
	VectorStoreFileObjectVectorStoreFile VectorStoreFileObject = "vector_store.file"
)

func (VectorStoreFileObject) IsKnown

func (r VectorStoreFileObject) IsKnown() bool

type VectorStoreFileStatus

type VectorStoreFileStatus string

The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use.

const (
	VectorStoreFileStatusInProgress VectorStoreFileStatus = "in_progress"
	VectorStoreFileStatusCompleted  VectorStoreFileStatus = "completed"
	VectorStoreFileStatusCancelled  VectorStoreFileStatus = "cancelled"
	VectorStoreFileStatusFailed     VectorStoreFileStatus = "failed"
)

func (VectorStoreFileStatus) IsKnown

func (r VectorStoreFileStatus) IsKnown() bool

type VectorStoreObject

type VectorStoreObject string

The object type, which is always `vector_store`.

const (
	VectorStoreObjectVectorStore VectorStoreObject = "vector_store"
)

func (VectorStoreObject) IsKnown

func (r VectorStoreObject) IsKnown() bool

type VectorStoreStatus

type VectorStoreStatus string

The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use.

const (
	VectorStoreStatusExpired    VectorStoreStatus = "expired"
	VectorStoreStatusInProgress VectorStoreStatus = "in_progress"
	VectorStoreStatusCompleted  VectorStoreStatus = "completed"
)

func (VectorStoreStatus) IsKnown

func (r VectorStoreStatus) IsKnown() bool

Directories

Path Synopsis
Package azure provides configuration options so you can connect and use Azure OpenAI using the [openai.Client].
Package azure provides configuration options so you can connect and use Azure OpenAI using the [openai.Client].
packages

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL