Documentation ¶
Index ¶
- func GetBaseChatRequestApiFormatEnumStringValues() []string
- func GetBaseChatResponseApiFormatEnumStringValues() []string
- func GetChatContentTypeEnumStringValues() []string
- func GetCohereChatResponseFinishReasonEnumStringValues() []string
- func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues() []string
- func GetCohereLlmInferenceRequestTruncateEnumStringValues() []string
- func GetCohereMessageRoleEnumStringValues() []string
- func GetEmbedTextDetailsInputTypeEnumStringValues() []string
- func GetEmbedTextDetailsTruncateEnumStringValues() []string
- func GetLlmInferenceRequestRuntimeTypeEnumStringValues() []string
- func GetLlmInferenceResponseRuntimeTypeEnumStringValues() []string
- func GetServingModeServingTypeEnumStringValues() []string
- func GetSummarizeTextDetailsExtractivenessEnumStringValues() []string
- func GetSummarizeTextDetailsFormatEnumStringValues() []string
- func GetSummarizeTextDetailsLengthEnumStringValues() []string
- type BaseChatRequest
- type BaseChatRequestApiFormatEnum
- type BaseChatResponse
- type BaseChatResponseApiFormatEnum
- type ChatChoice
- type ChatContent
- type ChatContentTypeEnum
- type ChatDetails
- type ChatRequest
- func (request ChatRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
- func (request ChatRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, ...) (http.Request, error)
- func (request ChatRequest) RetryPolicy() *common.RetryPolicy
- func (request ChatRequest) String() string
- func (request ChatRequest) ValidateEnumValue() (bool, error)
- type ChatResponse
- type ChatResult
- type Choice
- type Citation
- type CohereChatRequest
- type CohereChatResponse
- type CohereChatResponseFinishReasonEnum
- type CohereLlmInferenceRequest
- type CohereLlmInferenceRequestReturnLikelihoodsEnum
- type CohereLlmInferenceRequestTruncateEnum
- type CohereLlmInferenceResponse
- type CohereMessage
- type CohereMessageRoleEnum
- type DedicatedServingMode
- type EmbedTextDetails
- type EmbedTextDetailsInputTypeEnum
- type EmbedTextDetailsTruncateEnum
- type EmbedTextRequest
- func (request EmbedTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
- func (request EmbedTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, ...) (http.Request, error)
- func (request EmbedTextRequest) RetryPolicy() *common.RetryPolicy
- func (request EmbedTextRequest) String() string
- func (request EmbedTextRequest) ValidateEnumValue() (bool, error)
- type EmbedTextResponse
- type EmbedTextResult
- type GenerateTextDetails
- type GenerateTextRequest
- func (request GenerateTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
- func (request GenerateTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, ...) (http.Request, error)
- func (request GenerateTextRequest) RetryPolicy() *common.RetryPolicy
- func (request GenerateTextRequest) String() string
- func (request GenerateTextRequest) ValidateEnumValue() (bool, error)
- type GenerateTextResponse
- type GenerateTextResult
- type GeneratedText
- type GenerativeAiInferenceClient
- func NewGenerativeAiInferenceClientWithConfigurationProvider(configProvider common.ConfigurationProvider) (client GenerativeAiInferenceClient, err error)
- func NewGenerativeAiInferenceClientWithOboToken(configProvider common.ConfigurationProvider, oboToken string) (client GenerativeAiInferenceClient, err error)
- func (client GenerativeAiInferenceClient) Chat(ctx context.Context, request ChatRequest) (response ChatResponse, err error)
- func (client *GenerativeAiInferenceClient) ConfigurationProvider() *common.ConfigurationProvider
- func (client GenerativeAiInferenceClient) EmbedText(ctx context.Context, request EmbedTextRequest) (response EmbedTextResponse, err error)
- func (client GenerativeAiInferenceClient) GenerateText(ctx context.Context, request GenerateTextRequest) (response GenerateTextResponse, err error)
- func (client *GenerativeAiInferenceClient) SetRegion(region string)
- func (client GenerativeAiInferenceClient) SummarizeText(ctx context.Context, request SummarizeTextRequest) (response SummarizeTextResponse, err error)
- type GenericChatRequest
- type GenericChatResponse
- type LlamaLlmInferenceRequest
- type LlamaLlmInferenceResponse
- type LlmInferenceRequest
- type LlmInferenceRequestRuntimeTypeEnum
- type LlmInferenceResponse
- type LlmInferenceResponseRuntimeTypeEnum
- type Logprobs
- type Message
- type OnDemandServingMode
- type SearchQuery
- type ServingMode
- type ServingModeServingTypeEnum
- type SummarizeTextDetails
- type SummarizeTextDetailsExtractivenessEnum
- type SummarizeTextDetailsFormatEnum
- type SummarizeTextDetailsLengthEnum
- type SummarizeTextRequest
- func (request SummarizeTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
- func (request SummarizeTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, ...) (http.Request, error)
- func (request SummarizeTextRequest) RetryPolicy() *common.RetryPolicy
- func (request SummarizeTextRequest) String() string
- func (request SummarizeTextRequest) ValidateEnumValue() (bool, error)
- type SummarizeTextResponse
- type SummarizeTextResult
- type TextContent
- type TokenLikelihood
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func GetBaseChatRequestApiFormatEnumStringValues ¶ added in v65.63.2
func GetBaseChatRequestApiFormatEnumStringValues() []string
GetBaseChatRequestApiFormatEnumStringValues Enumerates the set of values in String for BaseChatRequestApiFormatEnum
func GetBaseChatResponseApiFormatEnumStringValues ¶ added in v65.63.2
func GetBaseChatResponseApiFormatEnumStringValues() []string
GetBaseChatResponseApiFormatEnumStringValues Enumerates the set of values in String for BaseChatResponseApiFormatEnum
func GetChatContentTypeEnumStringValues ¶ added in v65.63.2
func GetChatContentTypeEnumStringValues() []string
GetChatContentTypeEnumStringValues Enumerates the set of values in String for ChatContentTypeEnum
func GetCohereChatResponseFinishReasonEnumStringValues ¶ added in v65.63.2
func GetCohereChatResponseFinishReasonEnumStringValues() []string
GetCohereChatResponseFinishReasonEnumStringValues Enumerates the set of values in String for CohereChatResponseFinishReasonEnum
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues ¶
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues() []string
GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestReturnLikelihoodsEnum
func GetCohereLlmInferenceRequestTruncateEnumStringValues ¶
func GetCohereLlmInferenceRequestTruncateEnumStringValues() []string
GetCohereLlmInferenceRequestTruncateEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestTruncateEnum
func GetCohereMessageRoleEnumStringValues ¶ added in v65.63.2
func GetCohereMessageRoleEnumStringValues() []string
GetCohereMessageRoleEnumStringValues Enumerates the set of values in String for CohereMessageRoleEnum
func GetEmbedTextDetailsInputTypeEnumStringValues ¶
func GetEmbedTextDetailsInputTypeEnumStringValues() []string
GetEmbedTextDetailsInputTypeEnumStringValues Enumerates the set of values in String for EmbedTextDetailsInputTypeEnum
func GetEmbedTextDetailsTruncateEnumStringValues ¶
func GetEmbedTextDetailsTruncateEnumStringValues() []string
GetEmbedTextDetailsTruncateEnumStringValues Enumerates the set of values in String for EmbedTextDetailsTruncateEnum
func GetLlmInferenceRequestRuntimeTypeEnumStringValues ¶
func GetLlmInferenceRequestRuntimeTypeEnumStringValues() []string
GetLlmInferenceRequestRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceRequestRuntimeTypeEnum
func GetLlmInferenceResponseRuntimeTypeEnumStringValues ¶
func GetLlmInferenceResponseRuntimeTypeEnumStringValues() []string
GetLlmInferenceResponseRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceResponseRuntimeTypeEnum
func GetServingModeServingTypeEnumStringValues ¶
func GetServingModeServingTypeEnumStringValues() []string
GetServingModeServingTypeEnumStringValues Enumerates the set of values in String for ServingModeServingTypeEnum
func GetSummarizeTextDetailsExtractivenessEnumStringValues ¶
func GetSummarizeTextDetailsExtractivenessEnumStringValues() []string
GetSummarizeTextDetailsExtractivenessEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsExtractivenessEnum
func GetSummarizeTextDetailsFormatEnumStringValues ¶
func GetSummarizeTextDetailsFormatEnumStringValues() []string
GetSummarizeTextDetailsFormatEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsFormatEnum
func GetSummarizeTextDetailsLengthEnumStringValues ¶
func GetSummarizeTextDetailsLengthEnumStringValues() []string
GetSummarizeTextDetailsLengthEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsLengthEnum
Types ¶
type BaseChatRequest ¶ added in v65.63.2
type BaseChatRequest interface { }
BaseChatRequest Base class for chat inference requests
type BaseChatRequestApiFormatEnum ¶ added in v65.63.2
type BaseChatRequestApiFormatEnum string
BaseChatRequestApiFormatEnum Enum with underlying type: string
const ( BaseChatRequestApiFormatCohere BaseChatRequestApiFormatEnum = "COHERE" BaseChatRequestApiFormatGeneric BaseChatRequestApiFormatEnum = "GENERIC" )
Set of constants representing the allowable values for BaseChatRequestApiFormatEnum
func GetBaseChatRequestApiFormatEnumValues ¶ added in v65.63.2
func GetBaseChatRequestApiFormatEnumValues() []BaseChatRequestApiFormatEnum
GetBaseChatRequestApiFormatEnumValues Enumerates the set of values for BaseChatRequestApiFormatEnum
func GetMappingBaseChatRequestApiFormatEnum ¶ added in v65.63.2
func GetMappingBaseChatRequestApiFormatEnum(val string) (BaseChatRequestApiFormatEnum, bool)
GetMappingBaseChatRequestApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum
type BaseChatResponse ¶ added in v65.63.2
type BaseChatResponse interface { }
BaseChatResponse Base class for chat inference response
type BaseChatResponseApiFormatEnum ¶ added in v65.63.2
type BaseChatResponseApiFormatEnum string
BaseChatResponseApiFormatEnum Enum with underlying type: string
const ( BaseChatResponseApiFormatCohere BaseChatResponseApiFormatEnum = "COHERE" BaseChatResponseApiFormatGeneric BaseChatResponseApiFormatEnum = "GENERIC" )
Set of constants representing the allowable values for BaseChatResponseApiFormatEnum
func GetBaseChatResponseApiFormatEnumValues ¶ added in v65.63.2
func GetBaseChatResponseApiFormatEnumValues() []BaseChatResponseApiFormatEnum
GetBaseChatResponseApiFormatEnumValues Enumerates the set of values for BaseChatResponseApiFormatEnum
func GetMappingBaseChatResponseApiFormatEnum ¶ added in v65.63.2
func GetMappingBaseChatResponseApiFormatEnum(val string) (BaseChatResponseApiFormatEnum, bool)
GetMappingBaseChatResponseApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum
type ChatChoice ¶ added in v65.63.2
type ChatChoice struct { // The index of the chat. Index *int `mandatory:"true" json:"index"` Message *Message `mandatory:"true" json:"message"` // The reason why the model stopped generating tokens. // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens. FinishReason *string `mandatory:"true" json:"finishReason"` Logprobs *Logprobs `mandatory:"false" json:"logprobs"` }
ChatChoice Represents a single instance of the chat response.
func (ChatChoice) String ¶ added in v65.63.2
func (m ChatChoice) String() string
func (ChatChoice) ValidateEnumValue ¶ added in v65.63.2
func (m ChatChoice) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type ChatContent ¶ added in v65.63.2
type ChatContent interface { }
ChatContent The base class for the chat content.
type ChatContentTypeEnum ¶ added in v65.63.2
type ChatContentTypeEnum string
ChatContentTypeEnum Enum with underlying type: string
const (
ChatContentTypeText ChatContentTypeEnum = "TEXT"
)
Set of constants representing the allowable values for ChatContentTypeEnum
func GetChatContentTypeEnumValues ¶ added in v65.63.2
func GetChatContentTypeEnumValues() []ChatContentTypeEnum
GetChatContentTypeEnumValues Enumerates the set of values for ChatContentTypeEnum
func GetMappingChatContentTypeEnum ¶ added in v65.63.2
func GetMappingChatContentTypeEnum(val string) (ChatContentTypeEnum, bool)
GetMappingChatContentTypeEnum performs case Insensitive comparison on enum value and return the desired enum
type ChatDetails ¶ added in v65.63.2
type ChatDetails struct { // The OCID of compartment that the user is authorized to use to call into the Generative AI service. CompartmentId *string `mandatory:"true" json:"compartmentId"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` ChatRequest BaseChatRequest `mandatory:"false" json:"chatRequest"` }
ChatDetails Details of the conversation for the model to respond.
func (ChatDetails) String ¶ added in v65.63.2
func (m ChatDetails) String() string
func (*ChatDetails) UnmarshalJSON ¶ added in v65.63.2
func (m *ChatDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (ChatDetails) ValidateEnumValue ¶ added in v65.63.2
func (m ChatDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type ChatRequest ¶ added in v65.63.2
type ChatRequest struct { // Details of the conversation for the model to respond. ChatDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before then due to conflicting operations. For example, if a resource // has been deleted and purged from the system, then a retry of the original creation request // might be rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
ChatRequest wrapper for the Chat operation
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/Chat.go.html to see an example of how to use ChatRequest.
func (ChatRequest) BinaryRequestBody ¶ added in v65.63.2
func (request ChatRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (ChatRequest) HTTPRequest ¶ added in v65.63.2
func (request ChatRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (ChatRequest) RetryPolicy ¶ added in v65.63.2
func (request ChatRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (ChatRequest) String ¶ added in v65.63.2
func (request ChatRequest) String() string
func (ChatRequest) ValidateEnumValue ¶ added in v65.63.2
func (request ChatRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type ChatResponse ¶ added in v65.63.2
type ChatResponse struct { // The underlying http response RawResponse *http.Response // The ChatResult instance ChatResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
ChatResponse wrapper for the Chat operation
func (ChatResponse) HTTPResponse ¶ added in v65.63.2
func (response ChatResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (ChatResponse) String ¶ added in v65.63.2
func (response ChatResponse) String() string
type ChatResult ¶ added in v65.63.2
type ChatResult struct { // The OCID of the model used in this inference request. ModelId *string `mandatory:"true" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"true" json:"modelVersion"` ChatResponse BaseChatResponse `mandatory:"true" json:"chatResponse"` }
ChatResult The response to the chat conversation.
func (ChatResult) String ¶ added in v65.63.2
func (m ChatResult) String() string
func (*ChatResult) UnmarshalJSON ¶ added in v65.63.2
func (m *ChatResult) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (ChatResult) ValidateEnumValue ¶ added in v65.63.2
func (m ChatResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type Choice ¶
type Choice struct { // The index of the generated text. Index *int `mandatory:"true" json:"index"` // The generated text. Text *string `mandatory:"true" json:"text"` // The reason why the model stopped generating tokens. // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens. FinishReason *string `mandatory:"true" json:"finishReason"` Logprobs *Logprobs `mandatory:"false" json:"logprobs"` }
Choice Represents a single instance of generated text.
func (Choice) ValidateEnumValue ¶
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type Citation ¶ added in v65.63.2
type Citation struct { // The index of text that the citation starts at, counting from zero. Start *int `mandatory:"true" json:"start"` // The index of text that the citation ends after, counting from zero. End *int `mandatory:"true" json:"end"` // The text of the citation Text *string `mandatory:"true" json:"text"` // Identifiers of documents cited by this section of the generated reply. DocumentIds []string `mandatory:"true" json:"documentIds"` }
Citation A section of the generated reply which cites external knowledge.
func (Citation) ValidateEnumValue ¶ added in v65.63.2
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type CohereChatRequest ¶ added in v65.63.2
type CohereChatRequest struct { // Text input for the model to respond to. Message *string `mandatory:"true" json:"message"` // A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's message. ChatHistory []CohereMessage `mandatory:"false" json:"chatHistory"` // list of relevant documents that the model can cite to generate a more accurate reply. // Some suggested keys are "text", "author", and "date". For better generation quality, it is // recommended to keep the total word count of the strings in the dictionary to under 300 // words. Documents []interface{} `mandatory:"false" json:"documents"` // When true, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's message will be generated. IsSearchQueriesOnly *bool `mandatory:"false" json:"isSearchQueriesOnly"` // When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style. Default preambles vary for different models. PreambleOverride *string `mandatory:"false" json:"preambleOverride"` // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available. IsStream *bool `mandatory:"false" json:"isStream"` // The maximum number of tokens to predict for each response. Includes input plus output tokens. MaxTokens *int `mandatory:"false" json:"maxTokens"` // A number that sets the randomness of the generated output. A lower temperature means a less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` }
CohereChatRequest Details for the chat request for Cohere models.
func (CohereChatRequest) MarshalJSON ¶ added in v65.63.2
func (m CohereChatRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (CohereChatRequest) String ¶ added in v65.63.2
func (m CohereChatRequest) String() string
func (CohereChatRequest) ValidateEnumValue ¶ added in v65.63.2
func (m CohereChatRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type CohereChatResponse ¶ added in v65.63.2
type CohereChatResponse struct { // Contents of the reply generated by the model. Text *string `mandatory:"true" json:"text"` // Inline citations for the generated reply. Citations []Citation `mandatory:"false" json:"citations"` // Denotes that a search for documents is required. IsSearchRequired *bool `mandatory:"false" json:"isSearchRequired"` // Generated search queries. SearchQueries []SearchQuery `mandatory:"false" json:"searchQueries"` // Documents seen by the model when generating the reply. Each document is a JSON String // representing the field and values of the document. Documents []interface{} `mandatory:"false" json:"documents"` // Why the generation was completed. FinishReason CohereChatResponseFinishReasonEnum `mandatory:"true" json:"finishReason"` }
CohereChatResponse The response to the chat conversation.
func (CohereChatResponse) MarshalJSON ¶ added in v65.63.2
func (m CohereChatResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (CohereChatResponse) String ¶ added in v65.63.2
func (m CohereChatResponse) String() string
func (CohereChatResponse) ValidateEnumValue ¶ added in v65.63.2
func (m CohereChatResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type CohereChatResponseFinishReasonEnum ¶ added in v65.63.2
type CohereChatResponseFinishReasonEnum string
CohereChatResponseFinishReasonEnum Enum with underlying type: string
const ( CohereChatResponseFinishReasonComplete CohereChatResponseFinishReasonEnum = "COMPLETE" CohereChatResponseFinishReasonErrorToxic CohereChatResponseFinishReasonEnum = "ERROR_TOXIC" CohereChatResponseFinishReasonErrorLimit CohereChatResponseFinishReasonEnum = "ERROR_LIMIT" CohereChatResponseFinishReasonError CohereChatResponseFinishReasonEnum = "ERROR" CohereChatResponseFinishReasonUserCancel CohereChatResponseFinishReasonEnum = "USER_CANCEL" CohereChatResponseFinishReasonMaxTokens CohereChatResponseFinishReasonEnum = "MAX_TOKENS" )
Set of constants representing the allowable values for CohereChatResponseFinishReasonEnum
func GetCohereChatResponseFinishReasonEnumValues ¶ added in v65.63.2
func GetCohereChatResponseFinishReasonEnumValues() []CohereChatResponseFinishReasonEnum
GetCohereChatResponseFinishReasonEnumValues Enumerates the set of values for CohereChatResponseFinishReasonEnum
func GetMappingCohereChatResponseFinishReasonEnum ¶ added in v65.63.2
func GetMappingCohereChatResponseFinishReasonEnum(val string) (CohereChatResponseFinishReasonEnum, bool)
GetMappingCohereChatResponseFinishReasonEnum performs case Insensitive comparison on enum value and return the desired enum
type CohereLlmInferenceRequest ¶
type CohereLlmInferenceRequest struct { // Represents the prompt to be completed. The trailing white spaces are trimmed before completion. Prompt *string `mandatory:"true" json:"prompt"` // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available. IsStream *bool `mandatory:"false" json:"isStream"` // The number of generated texts that will be returned. NumGenerations *int `mandatory:"false" json:"numGenerations"` // Whether or not to return the user prompt in the response. This option only applies to non-stream results. IsEcho *bool `mandatory:"false" json:"isEcho"` // The maximum number of tokens to predict for each response. Includes input plus output tokens. MaxTokens *int `mandatory:"false" json:"maxTokens"` // A number that sets the randomness of the generated output. A lower temperature means a less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` // The generated text is cut at the end of the earliest occurrence of this stop sequence. The generated text will include this stop sequence. StopSequences []string `mandatory:"false" json:"stopSequences"` // Specifies how and if the token likelihoods are returned with the response. ReturnLikelihoods CohereLlmInferenceRequestReturnLikelihoodsEnum `mandatory:"false" json:"returnLikelihoods,omitempty"` // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated. Truncate CohereLlmInferenceRequestTruncateEnum `mandatory:"false" json:"truncate,omitempty"` }
CohereLlmInferenceRequest Details for the text generation request for Cohere models.
func (CohereLlmInferenceRequest) MarshalJSON ¶
func (m CohereLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (CohereLlmInferenceRequest) String ¶
func (m CohereLlmInferenceRequest) String() string
func (CohereLlmInferenceRequest) ValidateEnumValue ¶
func (m CohereLlmInferenceRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type CohereLlmInferenceRequestReturnLikelihoodsEnum ¶
type CohereLlmInferenceRequestReturnLikelihoodsEnum string
CohereLlmInferenceRequestReturnLikelihoodsEnum Enum with underlying type: string
const ( CohereLlmInferenceRequestReturnLikelihoodsNone CohereLlmInferenceRequestReturnLikelihoodsEnum = "NONE" CohereLlmInferenceRequestReturnLikelihoodsAll CohereLlmInferenceRequestReturnLikelihoodsEnum = "ALL" CohereLlmInferenceRequestReturnLikelihoodsGeneration CohereLlmInferenceRequestReturnLikelihoodsEnum = "GENERATION" )
Set of constants representing the allowable values for CohereLlmInferenceRequestReturnLikelihoodsEnum
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues ¶
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues() []CohereLlmInferenceRequestReturnLikelihoodsEnum
GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues Enumerates the set of values for CohereLlmInferenceRequestReturnLikelihoodsEnum
func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum ¶
func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum(val string) (CohereLlmInferenceRequestReturnLikelihoodsEnum, bool)
GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum performs case Insensitive comparison on enum value and return the desired enum
type CohereLlmInferenceRequestTruncateEnum ¶
type CohereLlmInferenceRequestTruncateEnum string
CohereLlmInferenceRequestTruncateEnum Enum with underlying type: string
const ( CohereLlmInferenceRequestTruncateNone CohereLlmInferenceRequestTruncateEnum = "NONE" CohereLlmInferenceRequestTruncateStart CohereLlmInferenceRequestTruncateEnum = "START" CohereLlmInferenceRequestTruncateEnd CohereLlmInferenceRequestTruncateEnum = "END" )
Set of constants representing the allowable values for CohereLlmInferenceRequestTruncateEnum
func GetCohereLlmInferenceRequestTruncateEnumValues ¶
func GetCohereLlmInferenceRequestTruncateEnumValues() []CohereLlmInferenceRequestTruncateEnum
GetCohereLlmInferenceRequestTruncateEnumValues Enumerates the set of values for CohereLlmInferenceRequestTruncateEnum
func GetMappingCohereLlmInferenceRequestTruncateEnum ¶
func GetMappingCohereLlmInferenceRequestTruncateEnum(val string) (CohereLlmInferenceRequestTruncateEnum, bool)
GetMappingCohereLlmInferenceRequestTruncateEnum performs case Insensitive comparison on enum value and return the desired enum
type CohereLlmInferenceResponse ¶
type CohereLlmInferenceResponse struct { // Each prompt in the input array has an array of GeneratedText, controlled by numGenerations parameter in the request. GeneratedTexts []GeneratedText `mandatory:"true" json:"generatedTexts"` // The date and time that the model was created in an RFC3339 formatted datetime string. TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // Represents the original prompt. Applies only to non-stream responses. Prompt *string `mandatory:"false" json:"prompt"` }
CohereLlmInferenceResponse The generated text result to return.
func (CohereLlmInferenceResponse) MarshalJSON ¶
func (m CohereLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (CohereLlmInferenceResponse) String ¶
func (m CohereLlmInferenceResponse) String() string
func (CohereLlmInferenceResponse) ValidateEnumValue ¶
func (m CohereLlmInferenceResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type CohereMessage ¶ added in v65.63.2
type CohereMessage struct { // One of CHATBOT|USER to identify who the message is coming from. Role CohereMessageRoleEnum `mandatory:"true" json:"role"` // Contents of the chat message. Message *string `mandatory:"true" json:"message"` }
CohereMessage An message that represents a single dialogue of chat
func (CohereMessage) String ¶ added in v65.63.2
func (m CohereMessage) String() string
func (CohereMessage) ValidateEnumValue ¶ added in v65.63.2
func (m CohereMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type CohereMessageRoleEnum ¶ added in v65.63.2
type CohereMessageRoleEnum string
CohereMessageRoleEnum Enum with underlying type: string
const ( CohereMessageRoleChatbot CohereMessageRoleEnum = "CHATBOT" CohereMessageRoleUser CohereMessageRoleEnum = "USER" )
Set of constants representing the allowable values for CohereMessageRoleEnum
func GetCohereMessageRoleEnumValues ¶ added in v65.63.2
func GetCohereMessageRoleEnumValues() []CohereMessageRoleEnum
GetCohereMessageRoleEnumValues Enumerates the set of values for CohereMessageRoleEnum
func GetMappingCohereMessageRoleEnum ¶ added in v65.63.2
func GetMappingCohereMessageRoleEnum(val string) (CohereMessageRoleEnum, bool)
GetMappingCohereMessageRoleEnum performs case Insensitive comparison on enum value and return the desired enum
type DedicatedServingMode ¶
type DedicatedServingMode struct { // The OCID of the endpoint to use. EndpointId *string `mandatory:"true" json:"endpointId"` }
DedicatedServingMode The model's serving mode is dedicated serving and has an endpoint on a dedicated AI cluster.
func (DedicatedServingMode) MarshalJSON ¶
func (m DedicatedServingMode) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (DedicatedServingMode) String ¶
func (m DedicatedServingMode) String() string
func (DedicatedServingMode) ValidateEnumValue ¶
func (m DedicatedServingMode) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type EmbedTextDetails ¶
type EmbedTextDetails struct { // Provide a list of strings with a maximum number of 96 entries. Each string can be words, a phrase, or a paragraph. The maximum length of each string entry in the list is 512 tokens. Inputs []string `mandatory:"true" json:"inputs"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` // The OCID of compartment that the user is authorized to use to call into the Generative AI service. CompartmentId *string `mandatory:"true" json:"compartmentId"` // Whether or not to include the original inputs in the response. Results are index-based. IsEcho *bool `mandatory:"false" json:"isEcho"` // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated. Truncate EmbedTextDetailsTruncateEnum `mandatory:"false" json:"truncate,omitempty"` // Specifies the input type. InputType EmbedTextDetailsInputTypeEnum `mandatory:"false" json:"inputType,omitempty"` }
EmbedTextDetails Details for the request to embed texts.
func (EmbedTextDetails) String ¶
func (m EmbedTextDetails) String() string
func (*EmbedTextDetails) UnmarshalJSON ¶
func (m *EmbedTextDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (EmbedTextDetails) ValidateEnumValue ¶
func (m EmbedTextDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type EmbedTextDetailsInputTypeEnum ¶
type EmbedTextDetailsInputTypeEnum string
EmbedTextDetailsInputTypeEnum Enum with underlying type: string
const ( EmbedTextDetailsInputTypeSearchDocument EmbedTextDetailsInputTypeEnum = "SEARCH_DOCUMENT" EmbedTextDetailsInputTypeSearchQuery EmbedTextDetailsInputTypeEnum = "SEARCH_QUERY" EmbedTextDetailsInputTypeClassification EmbedTextDetailsInputTypeEnum = "CLASSIFICATION" EmbedTextDetailsInputTypeClustering EmbedTextDetailsInputTypeEnum = "CLUSTERING" )
Set of constants representing the allowable values for EmbedTextDetailsInputTypeEnum
func GetEmbedTextDetailsInputTypeEnumValues ¶
func GetEmbedTextDetailsInputTypeEnumValues() []EmbedTextDetailsInputTypeEnum
GetEmbedTextDetailsInputTypeEnumValues Enumerates the set of values for EmbedTextDetailsInputTypeEnum
func GetMappingEmbedTextDetailsInputTypeEnum ¶
func GetMappingEmbedTextDetailsInputTypeEnum(val string) (EmbedTextDetailsInputTypeEnum, bool)
GetMappingEmbedTextDetailsInputTypeEnum performs case Insensitive comparison on enum value and return the desired enum
type EmbedTextDetailsTruncateEnum ¶
type EmbedTextDetailsTruncateEnum string
EmbedTextDetailsTruncateEnum Enum with underlying type: string
const ( EmbedTextDetailsTruncateNone EmbedTextDetailsTruncateEnum = "NONE" EmbedTextDetailsTruncateStart EmbedTextDetailsTruncateEnum = "START" EmbedTextDetailsTruncateEnd EmbedTextDetailsTruncateEnum = "END" )
Set of constants representing the allowable values for EmbedTextDetailsTruncateEnum
func GetEmbedTextDetailsTruncateEnumValues ¶
func GetEmbedTextDetailsTruncateEnumValues() []EmbedTextDetailsTruncateEnum
GetEmbedTextDetailsTruncateEnumValues Enumerates the set of values for EmbedTextDetailsTruncateEnum
func GetMappingEmbedTextDetailsTruncateEnum ¶
func GetMappingEmbedTextDetailsTruncateEnum(val string) (EmbedTextDetailsTruncateEnum, bool)
GetMappingEmbedTextDetailsTruncateEnum performs case Insensitive comparison on enum value and return the desired enum
type EmbedTextRequest ¶
type EmbedTextRequest struct { // Details for generating the embed response. EmbedTextDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before then due to conflicting operations. For example, if a resource // has been deleted and purged from the system, then a retry of the original creation request // might be rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
EmbedTextRequest wrapper for the EmbedText operation
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedTextRequest.
func (EmbedTextRequest) BinaryRequestBody ¶
func (request EmbedTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (EmbedTextRequest) HTTPRequest ¶
func (request EmbedTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (EmbedTextRequest) RetryPolicy ¶
func (request EmbedTextRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (EmbedTextRequest) String ¶
func (request EmbedTextRequest) String() string
func (EmbedTextRequest) ValidateEnumValue ¶
func (request EmbedTextRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type EmbedTextResponse ¶
type EmbedTextResponse struct { // The underlying http response RawResponse *http.Response // The EmbedTextResult instance EmbedTextResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
EmbedTextResponse wrapper for the EmbedText operation
func (EmbedTextResponse) HTTPResponse ¶
func (response EmbedTextResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (EmbedTextResponse) String ¶
func (response EmbedTextResponse) String() string
type EmbedTextResult ¶
type EmbedTextResult struct { // A unique identifier for the generated result. Id *string `mandatory:"true" json:"id"` // The embeddings corresponding to inputs. Embeddings [][]float32 `mandatory:"true" json:"embeddings"` // The original inputs. Only present if "isEcho" is set to true. Inputs []string `mandatory:"false" json:"inputs"` // The OCID of the model used in this inference request. ModelId *string `mandatory:"false" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"false" json:"modelVersion"` }
EmbedTextResult The generated embedded result to return.
func (EmbedTextResult) String ¶
func (m EmbedTextResult) String() string
func (EmbedTextResult) ValidateEnumValue ¶
func (m EmbedTextResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type GenerateTextDetails ¶
type GenerateTextDetails struct { // The OCID of compartment that the user is authorized to use to call into the Generative AI service. CompartmentId *string `mandatory:"true" json:"compartmentId"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` InferenceRequest LlmInferenceRequest `mandatory:"true" json:"inferenceRequest"` }
GenerateTextDetails Details for the request to generate text.
func (GenerateTextDetails) String ¶
func (m GenerateTextDetails) String() string
func (*GenerateTextDetails) UnmarshalJSON ¶
func (m *GenerateTextDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (GenerateTextDetails) ValidateEnumValue ¶
func (m GenerateTextDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type GenerateTextRequest ¶
type GenerateTextRequest struct { // Details for generating the text response. GenerateTextDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before then due to conflicting operations. For example, if a resource // has been deleted and purged from the system, then a retry of the original creation request // might be rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
GenerateTextRequest wrapper for the GenerateText operation
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateTextRequest.
func (GenerateTextRequest) BinaryRequestBody ¶
func (request GenerateTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (GenerateTextRequest) HTTPRequest ¶
func (request GenerateTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (GenerateTextRequest) RetryPolicy ¶
func (request GenerateTextRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (GenerateTextRequest) String ¶
func (request GenerateTextRequest) String() string
func (GenerateTextRequest) ValidateEnumValue ¶
func (request GenerateTextRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type GenerateTextResponse ¶
type GenerateTextResponse struct { // The underlying http response RawResponse *http.Response // The GenerateTextResult instance GenerateTextResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
GenerateTextResponse wrapper for the GenerateText operation
func (GenerateTextResponse) HTTPResponse ¶
func (response GenerateTextResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (GenerateTextResponse) String ¶
func (response GenerateTextResponse) String() string
type GenerateTextResult ¶
type GenerateTextResult struct { // The OCID of the model used in this inference request. ModelId *string `mandatory:"true" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"true" json:"modelVersion"` InferenceResponse LlmInferenceResponse `mandatory:"true" json:"inferenceResponse"` }
GenerateTextResult The generated text result to return.
func (GenerateTextResult) String ¶
func (m GenerateTextResult) String() string
func (*GenerateTextResult) UnmarshalJSON ¶
func (m *GenerateTextResult) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (GenerateTextResult) ValidateEnumValue ¶
func (m GenerateTextResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type GeneratedText ¶
type GeneratedText struct { // A unique identifier for this text generation. Id *string `mandatory:"true" json:"id"` // The generated text. Text *string `mandatory:"true" json:"text"` // The overall likelihood of the generated text. // When a large language model generates a new token for the output text, a likelihood is assigned to all tokens, where tokens with higher likelihoods are more likely to follow the current token. For example, it's more likely that the word favorite is followed by the word food or book rather than the word zebra. A lower likelihood means that it's less likely that token follows the current token. Likelihood *float64 `mandatory:"true" json:"likelihood"` // The reason why the model stopped generating tokens. // A model stops generating tokens if the model hits a natural stop point or reaches a provided stop sequence. FinishReason *string `mandatory:"false" json:"finishReason"` // A collection of generated tokens and their corresponding likelihoods. TokenLikelihoods []TokenLikelihood `mandatory:"false" json:"tokenLikelihoods"` }
GeneratedText The text generated during each run.
func (GeneratedText) String ¶
func (m GeneratedText) String() string
func (GeneratedText) ValidateEnumValue ¶
func (m GeneratedText) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type GenerativeAiInferenceClient ¶
type GenerativeAiInferenceClient struct { common.BaseClient // contains filtered or unexported fields }
GenerativeAiInferenceClient a client for GenerativeAiInference
func NewGenerativeAiInferenceClientWithConfigurationProvider ¶
func NewGenerativeAiInferenceClientWithConfigurationProvider(configProvider common.ConfigurationProvider) (client GenerativeAiInferenceClient, err error)
NewGenerativeAiInferenceClientWithConfigurationProvider Creates a new default GenerativeAiInference client with the given configuration provider. the configuration provider will be used for the default signer as well as reading the region
func NewGenerativeAiInferenceClientWithOboToken ¶
func NewGenerativeAiInferenceClientWithOboToken(configProvider common.ConfigurationProvider, oboToken string) (client GenerativeAiInferenceClient, err error)
NewGenerativeAiInferenceClientWithOboToken Creates a new default GenerativeAiInference client with the given configuration provider. The obotoken will be added to default headers and signed; the configuration provider will be used for the signer
as well as reading the region
func (GenerativeAiInferenceClient) Chat ¶ added in v65.63.2
func (client GenerativeAiInferenceClient) Chat(ctx context.Context, request ChatRequest) (response ChatResponse, err error)
Chat Creates a response for the given conversation.
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/Chat.go.html to see an example of how to use Chat API. A default retry strategy applies to this operation Chat()
func (*GenerativeAiInferenceClient) ConfigurationProvider ¶
func (client *GenerativeAiInferenceClient) ConfigurationProvider() *common.ConfigurationProvider
ConfigurationProvider the ConfigurationProvider used in this client, or null if none set
func (GenerativeAiInferenceClient) EmbedText ¶
func (client GenerativeAiInferenceClient) EmbedText(ctx context.Context, request EmbedTextRequest) (response EmbedTextResponse, err error)
EmbedText Produces embeddings for the inputs. An embedding is numeric representation of a piece of text. This text can be a phrase, a sentence, or one or more paragraphs. The Generative AI embedding model transforms each phrase, sentence, or paragraph that you input, into an array with 1024 numbers. You can use these embeddings for finding similarity in your input text such as finding phrases that are similar in context or category. Embeddings are mostly used for semantic searches where the search function focuses on the meaning of the text that it's searching through rather than finding results based on keywords.
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedText API. A default retry strategy applies to this operation EmbedText()
func (GenerativeAiInferenceClient) GenerateText ¶
func (client GenerativeAiInferenceClient) GenerateText(ctx context.Context, request GenerateTextRequest) (response GenerateTextResponse, err error)
GenerateText Generates a text response based on the user prompt.
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateText API. A default retry strategy applies to this operation GenerateText()
func (*GenerativeAiInferenceClient) SetRegion ¶
func (client *GenerativeAiInferenceClient) SetRegion(region string)
SetRegion overrides the region of this client.
func (GenerativeAiInferenceClient) SummarizeText ¶
func (client GenerativeAiInferenceClient) SummarizeText(ctx context.Context, request SummarizeTextRequest) (response SummarizeTextResponse, err error)
SummarizeText Summarizes the input text.
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeText API. A default retry strategy applies to this operation SummarizeText()
type GenericChatRequest ¶ added in v65.63.2
type GenericChatRequest struct { // The series of messages associated with this chat completion request. It should include previous messages in the conversation. Each message has a role and content. Messages []Message `mandatory:"false" json:"messages"` // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available. IsStream *bool `mandatory:"false" json:"isStream"` // The number of of generated texts that will be returned. NumGenerations *int `mandatory:"false" json:"numGenerations"` // Whether or not to return the user prompt in the response. Applies only to non-stream results. IsEcho *bool `mandatory:"false" json:"isEcho"` // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // A number that sets the randomness of the generated output. A lower temperature means a less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings. Stop []string `mandatory:"false" json:"stop"` // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens. // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response. LogProbs *int `mandatory:"false" json:"logProbs"` // The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus max_tokens cannot exceed the model's context length. MaxTokens *int `mandatory:"false" json:"maxTokens"` // Modify the likelihood of specified tokens appearing in the completion. LogitBias *interface{} `mandatory:"false" json:"logitBias"` }
GenericChatRequest Details for the chat request.
func (GenericChatRequest) MarshalJSON ¶ added in v65.63.2
func (m GenericChatRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (GenericChatRequest) String ¶ added in v65.63.2
func (m GenericChatRequest) String() string
func (GenericChatRequest) ValidateEnumValue ¶ added in v65.63.2
func (m GenericChatRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type GenericChatResponse ¶ added in v65.63.2
type GenericChatResponse struct { // The Unix timestamp (in seconds) of when the generation was created. TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // A list of generated texts. Can be more than one if n is greater than 1. Choices []ChatChoice `mandatory:"true" json:"choices"` }
GenericChatResponse The response to the chat conversation.
func (GenericChatResponse) MarshalJSON ¶ added in v65.63.2
func (m GenericChatResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (GenericChatResponse) String ¶ added in v65.63.2
func (m GenericChatResponse) String() string
func (GenericChatResponse) ValidateEnumValue ¶ added in v65.63.2
func (m GenericChatResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type LlamaLlmInferenceRequest ¶
type LlamaLlmInferenceRequest struct { // Represents the prompt to be completed. The trailing white spaces are trimmed before completion. Prompt *string `mandatory:"false" json:"prompt"` // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available. IsStream *bool `mandatory:"false" json:"isStream"` // The number of of generated texts that will be returned. NumGenerations *int `mandatory:"false" json:"numGenerations"` // Whether or not to return the user prompt in the response. Applies only to non-stream results. IsEcho *bool `mandatory:"false" json:"isEcho"` // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // A number that sets the randomness of the generated output. A lower temperature means a less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings. Stop []string `mandatory:"false" json:"stop"` // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens. // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response. LogProbs *int `mandatory:"false" json:"logProbs"` // The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus max_tokens cannot exceed the model's context length. MaxTokens *int `mandatory:"false" json:"maxTokens"` }
LlamaLlmInferenceRequest Details for the text generation request for Llama models.
func (LlamaLlmInferenceRequest) MarshalJSON ¶
func (m LlamaLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (LlamaLlmInferenceRequest) String ¶
func (m LlamaLlmInferenceRequest) String() string
func (LlamaLlmInferenceRequest) ValidateEnumValue ¶
func (m LlamaLlmInferenceRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type LlamaLlmInferenceResponse ¶
type LlamaLlmInferenceResponse struct { // The Unix timestamp (in seconds) of when the generation was created. Created *common.SDKTime `mandatory:"true" json:"created"` // A list of generated texts. Can be more than one if n is greater than 1. Choices []Choice `mandatory:"true" json:"choices"` }
LlamaLlmInferenceResponse The generated text result to return.
func (LlamaLlmInferenceResponse) MarshalJSON ¶
func (m LlamaLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (LlamaLlmInferenceResponse) String ¶
func (m LlamaLlmInferenceResponse) String() string
func (LlamaLlmInferenceResponse) ValidateEnumValue ¶
func (m LlamaLlmInferenceResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type LlmInferenceRequest ¶
type LlmInferenceRequest interface { }
LlmInferenceRequest The base class for the inference requests.
type LlmInferenceRequestRuntimeTypeEnum ¶
type LlmInferenceRequestRuntimeTypeEnum string
LlmInferenceRequestRuntimeTypeEnum Enum with underlying type: string
const ( LlmInferenceRequestRuntimeTypeCohere LlmInferenceRequestRuntimeTypeEnum = "COHERE" LlmInferenceRequestRuntimeTypeLlama LlmInferenceRequestRuntimeTypeEnum = "LLAMA" )
Set of constants representing the allowable values for LlmInferenceRequestRuntimeTypeEnum
func GetLlmInferenceRequestRuntimeTypeEnumValues ¶
func GetLlmInferenceRequestRuntimeTypeEnumValues() []LlmInferenceRequestRuntimeTypeEnum
GetLlmInferenceRequestRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceRequestRuntimeTypeEnum
func GetMappingLlmInferenceRequestRuntimeTypeEnum ¶
func GetMappingLlmInferenceRequestRuntimeTypeEnum(val string) (LlmInferenceRequestRuntimeTypeEnum, bool)
GetMappingLlmInferenceRequestRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum
type LlmInferenceResponse ¶
type LlmInferenceResponse interface { }
LlmInferenceResponse The base class for inference responses.
type LlmInferenceResponseRuntimeTypeEnum ¶
type LlmInferenceResponseRuntimeTypeEnum string
LlmInferenceResponseRuntimeTypeEnum Enum with underlying type: string
const ( LlmInferenceResponseRuntimeTypeCohere LlmInferenceResponseRuntimeTypeEnum = "COHERE" LlmInferenceResponseRuntimeTypeLlama LlmInferenceResponseRuntimeTypeEnum = "LLAMA" LlmInferenceResponseRuntimeTypeOpenai LlmInferenceResponseRuntimeTypeEnum = "OPENAI" LlmInferenceResponseRuntimeTypeDalle3 LlmInferenceResponseRuntimeTypeEnum = "DALLE3" )
Set of constants representing the allowable values for LlmInferenceResponseRuntimeTypeEnum
func GetLlmInferenceResponseRuntimeTypeEnumValues ¶
func GetLlmInferenceResponseRuntimeTypeEnumValues() []LlmInferenceResponseRuntimeTypeEnum
GetLlmInferenceResponseRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceResponseRuntimeTypeEnum
func GetMappingLlmInferenceResponseRuntimeTypeEnum ¶
func GetMappingLlmInferenceResponseRuntimeTypeEnum(val string) (LlmInferenceResponseRuntimeTypeEnum, bool)
GetMappingLlmInferenceResponseRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum
type Logprobs ¶
type Logprobs struct { // The text offset. TextOffset []int `mandatory:"false" json:"textOffset"` // The logarithmic probabilites of the output token. TokenLogprobs []float64 `mandatory:"false" json:"tokenLogprobs"` // The list of output tokens. Tokens []string `mandatory:"false" json:"tokens"` // The logarithmic probabilities of each of the top k tokens. TopLogprobs []map[string]string `mandatory:"false" json:"topLogprobs"` }
Logprobs Returns if the logarithmic probabilites is set.
func (Logprobs) ValidateEnumValue ¶
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type Message ¶ added in v65.63.2
type Message struct { // Indicates who is giving the current message. Role *string `mandatory:"true" json:"role"` // Contents of the chat message. Content []ChatContent `mandatory:"true" json:"content"` }
Message An message that represents a single dialogue of chat
func (*Message) UnmarshalJSON ¶ added in v65.63.2
UnmarshalJSON unmarshals from json
func (Message) ValidateEnumValue ¶ added in v65.63.2
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type OnDemandServingMode ¶
type OnDemandServingMode struct { // The unique ID of a model to use. Can use list Models API to list available models. ModelId *string `mandatory:"true" json:"modelId"` }
OnDemandServingMode The model's serving mode is on-demand serving on a shared infrastructure.
func (OnDemandServingMode) MarshalJSON ¶
func (m OnDemandServingMode) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (OnDemandServingMode) String ¶
func (m OnDemandServingMode) String() string
func (OnDemandServingMode) ValidateEnumValue ¶
func (m OnDemandServingMode) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type SearchQuery ¶ added in v65.63.2
type SearchQuery struct { // The text of the search query. Text *string `mandatory:"true" json:"text"` }
SearchQuery The generated search query.
func (SearchQuery) String ¶ added in v65.63.2
func (m SearchQuery) String() string
func (SearchQuery) ValidateEnumValue ¶ added in v65.63.2
func (m SearchQuery) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type ServingMode ¶
type ServingMode interface { }
ServingMode The model's serving mode, which could be on-demand serving or dedicated serving.
type ServingModeServingTypeEnum ¶
type ServingModeServingTypeEnum string
ServingModeServingTypeEnum Enum with underlying type: string
const ( ServingModeServingTypeOnDemand ServingModeServingTypeEnum = "ON_DEMAND" ServingModeServingTypeDedicated ServingModeServingTypeEnum = "DEDICATED" )
Set of constants representing the allowable values for ServingModeServingTypeEnum
func GetMappingServingModeServingTypeEnum ¶
func GetMappingServingModeServingTypeEnum(val string) (ServingModeServingTypeEnum, bool)
GetMappingServingModeServingTypeEnum performs case Insensitive comparison on enum value and return the desired enum
func GetServingModeServingTypeEnumValues ¶
func GetServingModeServingTypeEnumValues() []ServingModeServingTypeEnum
GetServingModeServingTypeEnumValues Enumerates the set of values for ServingModeServingTypeEnum
type SummarizeTextDetails ¶
type SummarizeTextDetails struct { // The input string to be summarized. Input *string `mandatory:"true" json:"input"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` // The OCID of compartment that the user is authorized to use to call into the Generative AI service. CompartmentId *string `mandatory:"true" json:"compartmentId"` // Whether or not to include the original inputs in the response. IsEcho *bool `mandatory:"false" json:"isEcho"` // A number that sets the randomness of the generated output. Lower temperatures mean less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0, and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". For example, "focusing on the next steps" or "written by Yoda". AdditionalCommand *string `mandatory:"false" json:"additionalCommand"` // Indicates the approximate length of the summary. If "AUTO" is selected, the best option will be picked based on the input text. Length SummarizeTextDetailsLengthEnum `mandatory:"false" json:"length,omitempty"` // Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If "AUTO" is selected, the best option will be picked based on the input text. Format SummarizeTextDetailsFormatEnum `mandatory:"false" json:"format,omitempty"` // Controls how close to the original text the summary is. High extractiveness summaries will lean towards reusing sentences verbatim, while low extractiveness summaries will tend to paraphrase more. Extractiveness SummarizeTextDetailsExtractivenessEnum `mandatory:"false" json:"extractiveness,omitempty"` }
SummarizeTextDetails Details for the request to summarize text.
func (SummarizeTextDetails) String ¶
func (m SummarizeTextDetails) String() string
func (*SummarizeTextDetails) UnmarshalJSON ¶
func (m *SummarizeTextDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (SummarizeTextDetails) ValidateEnumValue ¶
func (m SummarizeTextDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type SummarizeTextDetailsExtractivenessEnum ¶
type SummarizeTextDetailsExtractivenessEnum string
SummarizeTextDetailsExtractivenessEnum Enum with underlying type: string
const ( SummarizeTextDetailsExtractivenessLow SummarizeTextDetailsExtractivenessEnum = "LOW" SummarizeTextDetailsExtractivenessMedium SummarizeTextDetailsExtractivenessEnum = "MEDIUM" SummarizeTextDetailsExtractivenessHigh SummarizeTextDetailsExtractivenessEnum = "HIGH" SummarizeTextDetailsExtractivenessAuto SummarizeTextDetailsExtractivenessEnum = "AUTO" )
Set of constants representing the allowable values for SummarizeTextDetailsExtractivenessEnum
func GetMappingSummarizeTextDetailsExtractivenessEnum ¶
func GetMappingSummarizeTextDetailsExtractivenessEnum(val string) (SummarizeTextDetailsExtractivenessEnum, bool)
GetMappingSummarizeTextDetailsExtractivenessEnum performs case Insensitive comparison on enum value and return the desired enum
func GetSummarizeTextDetailsExtractivenessEnumValues ¶
func GetSummarizeTextDetailsExtractivenessEnumValues() []SummarizeTextDetailsExtractivenessEnum
GetSummarizeTextDetailsExtractivenessEnumValues Enumerates the set of values for SummarizeTextDetailsExtractivenessEnum
type SummarizeTextDetailsFormatEnum ¶
type SummarizeTextDetailsFormatEnum string
SummarizeTextDetailsFormatEnum Enum with underlying type: string
const ( SummarizeTextDetailsFormatParagraph SummarizeTextDetailsFormatEnum = "PARAGRAPH" SummarizeTextDetailsFormatBullets SummarizeTextDetailsFormatEnum = "BULLETS" SummarizeTextDetailsFormatAuto SummarizeTextDetailsFormatEnum = "AUTO" )
Set of constants representing the allowable values for SummarizeTextDetailsFormatEnum
func GetMappingSummarizeTextDetailsFormatEnum ¶
func GetMappingSummarizeTextDetailsFormatEnum(val string) (SummarizeTextDetailsFormatEnum, bool)
GetMappingSummarizeTextDetailsFormatEnum performs case Insensitive comparison on enum value and return the desired enum
func GetSummarizeTextDetailsFormatEnumValues ¶
func GetSummarizeTextDetailsFormatEnumValues() []SummarizeTextDetailsFormatEnum
GetSummarizeTextDetailsFormatEnumValues Enumerates the set of values for SummarizeTextDetailsFormatEnum
type SummarizeTextDetailsLengthEnum ¶
type SummarizeTextDetailsLengthEnum string
SummarizeTextDetailsLengthEnum Enum with underlying type: string
const ( SummarizeTextDetailsLengthShort SummarizeTextDetailsLengthEnum = "SHORT" SummarizeTextDetailsLengthMedium SummarizeTextDetailsLengthEnum = "MEDIUM" SummarizeTextDetailsLengthLong SummarizeTextDetailsLengthEnum = "LONG" SummarizeTextDetailsLengthAuto SummarizeTextDetailsLengthEnum = "AUTO" )
Set of constants representing the allowable values for SummarizeTextDetailsLengthEnum
func GetMappingSummarizeTextDetailsLengthEnum ¶
func GetMappingSummarizeTextDetailsLengthEnum(val string) (SummarizeTextDetailsLengthEnum, bool)
GetMappingSummarizeTextDetailsLengthEnum performs case Insensitive comparison on enum value and return the desired enum
func GetSummarizeTextDetailsLengthEnumValues ¶
func GetSummarizeTextDetailsLengthEnumValues() []SummarizeTextDetailsLengthEnum
GetSummarizeTextDetailsLengthEnumValues Enumerates the set of values for SummarizeTextDetailsLengthEnum
type SummarizeTextRequest ¶
type SummarizeTextRequest struct { // Details for summarizing the text. SummarizeTextDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before then due to conflicting operations. For example, if a resource // has been deleted and purged from the system, then a retry of the original creation request // might be rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
SummarizeTextRequest wrapper for the SummarizeText operation
See also ¶
Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeTextRequest.
func (SummarizeTextRequest) BinaryRequestBody ¶
func (request SummarizeTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (SummarizeTextRequest) HTTPRequest ¶
func (request SummarizeTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (SummarizeTextRequest) RetryPolicy ¶
func (request SummarizeTextRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (SummarizeTextRequest) String ¶
func (request SummarizeTextRequest) String() string
func (SummarizeTextRequest) ValidateEnumValue ¶
func (request SummarizeTextRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type SummarizeTextResponse ¶
type SummarizeTextResponse struct { // The underlying http response RawResponse *http.Response // The SummarizeTextResult instance SummarizeTextResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
SummarizeTextResponse wrapper for the SummarizeText operation
func (SummarizeTextResponse) HTTPResponse ¶
func (response SummarizeTextResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (SummarizeTextResponse) String ¶
func (response SummarizeTextResponse) String() string
type SummarizeTextResult ¶
type SummarizeTextResult struct { // A unique identifier for this SummarizeTextResult. Id *string `mandatory:"true" json:"id"` // Summary result corresponding to input. Summary *string `mandatory:"true" json:"summary"` // The original input. Only included if "isEcho" set to true. Input *string `mandatory:"false" json:"input"` // The OCID of the model used in this inference request. ModelId *string `mandatory:"false" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"false" json:"modelVersion"` }
SummarizeTextResult Summarize text result to return to caller.
func (SummarizeTextResult) String ¶
func (m SummarizeTextResult) String() string
func (SummarizeTextResult) ValidateEnumValue ¶
func (m SummarizeTextResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type TextContent ¶ added in v65.63.2
type TextContent struct { // The text content. Text *string `mandatory:"false" json:"text"` }
TextContent Represents a single instance of text chat content.
func (TextContent) MarshalJSON ¶ added in v65.63.2
func (m TextContent) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (TextContent) String ¶ added in v65.63.2
func (m TextContent) String() string
func (TextContent) ValidateEnumValue ¶ added in v65.63.2
func (m TextContent) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
type TokenLikelihood ¶
type TokenLikelihood struct { // A word, part of a word, or a punctuation. // For example, apple is a token and friendship is made up of two tokens, friend and ship. When you run a model, you can set the maximum number of output tokens. Estimate three tokens per word. Token *string `mandatory:"false" json:"token"` // The likelihood of this token during generation. Likelihood *float64 `mandatory:"false" json:"likelihood"` }
TokenLikelihood An object that contains the returned token and its corresponding likelihood.
func (TokenLikelihood) String ¶
func (m TokenLikelihood) String() string
func (TokenLikelihood) ValidateEnumValue ¶
func (m TokenLikelihood) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
Source Files ¶
- base_chat_request.go
- base_chat_response.go
- chat_choice.go
- chat_content.go
- chat_details.go
- chat_request_response.go
- chat_result.go
- choice.go
- citation.go
- cohere_chat_request.go
- cohere_chat_response.go
- cohere_llm_inference_request.go
- cohere_llm_inference_response.go
- cohere_message.go
- dedicated_serving_mode.go
- embed_text_details.go
- embed_text_request_response.go
- embed_text_result.go
- generate_text_details.go
- generate_text_request_response.go
- generate_text_result.go
- generated_text.go
- generativeaiinference_client.go
- generic_chat_request.go
- generic_chat_response.go
- llama_llm_inference_request.go
- llama_llm_inference_response.go
- llm_inference_request.go
- llm_inference_response.go
- logprobs.go
- message.go
- on_demand_serving_mode.go
- search_query.go
- serving_mode.go
- summarize_text_details.go
- summarize_text_request_response.go
- summarize_text_result.go
- text_content.go
- token_likelihood.go