Documentation
¶
Index ¶
- func PTR[T any](input T) *T
- type Conversation
- type ConversationalParameters
- type ConversationalRequest
- type ConversationalResponse
- type ConverstationalInputs
- type ErrorResponse
- type FeatureExtractionRequest
- type FeatureExtractionResponse
- type FeatureExtractionWithAutomaticReductionResponse
- type FillMaskRequest
- type FillMaskResponse
- type HTTPClient
- type InferenceClient
- func (ic *InferenceClient) Conversational(ctx context.Context, req *ConversationalRequest) (*ConversationalResponse, error)
- func (ic *InferenceClient) FeatureExtraction(ctx context.Context, req *FeatureExtractionRequest) (FeatureExtractionResponse, error)
- func (ic *InferenceClient) FeatureExtractionWithAutomaticReduction(ctx context.Context, req *FeatureExtractionRequest) (FeatureExtractionWithAutomaticReductionResponse, error)
- func (ic *InferenceClient) FillMask(ctx context.Context, req *FillMaskRequest) (FillMaskResponse, error)
- func (ic *InferenceClient) QuestionAnswering(ctx context.Context, req *QuestionAnsweringRequest) (*QuestionAnsweringResponse, error)
- func (ic *InferenceClient) SentenceSimilarity(ctx context.Context, req *SentenceSimilarityRequest) (SentenceSimilarityResponse, error)
- func (ic *InferenceClient) SetModel(model string)
- func (ic *InferenceClient) Summarization(ctx context.Context, req *SummarizationRequest) (SummarizationResponse, error)
- func (ic *InferenceClient) TableQuestionAnswering(ctx context.Context, req *TableQuestionAnsweringRequest) (*TableQuestionAnsweringResponse, error)
- func (ic *InferenceClient) Text2TextGeneration(ctx context.Context, req *Text2TextGenerationRequest) (Text2TextGenerationResponse, error)
- func (ic *InferenceClient) TextClassification(ctx context.Context, req *TextClassificationRequest) (TextClassificationResponse, error)
- func (ic *InferenceClient) TextGeneration(ctx context.Context, req *TextGenerationRequest) (TextGenerationResponse, error)
- func (ic *InferenceClient) TokenClassification(ctx context.Context, req *TokenClassificationRequest) (TokenClassificationResponse, error)
- func (ic *InferenceClient) Translation(ctx context.Context, req *TranslationRequest) (TranslationResponse, error)
- func (ic *InferenceClient) ZeroShotClassification(ctx context.Context, req *ZeroShotClassificationRequest) (ZeroShotClassificationResponse, error)
- type InferenceClientOptions
- type Options
- type QuestionAnsweringInputs
- type QuestionAnsweringRequest
- type QuestionAnsweringResponse
- type SentenceSimilarityInputs
- type SentenceSimilarityRequest
- type SentenceSimilarityResponse
- type SummarizationParameters
- type SummarizationRequest
- type SummarizationResponse
- type TableQuestionAnsweringInputs
- type TableQuestionAnsweringRequest
- type TableQuestionAnsweringResponse
- type Text2TextGenerationParameters
- type Text2TextGenerationRequest
- type Text2TextGenerationResponse
- type TextClassificationRequest
- type TextClassificationResponse
- type TextGenerationParameters
- type TextGenerationRequest
- type TextGenerationResponse
- type TokenClassificationRequest
- type TokenClassificationResponse
- type TokenClassificationarameters
- type TranslationRequest
- type TranslationResponse
- type ZeroShotClassificationParameters
- type ZeroShotClassificationRequest
- type ZeroShotClassificationResponse
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
Types ¶
type Conversation ¶ added in v0.0.7
type Conversation struct { // The last outputs from the model in the conversation, after the model has run. GeneratedResponses []string `json:"generated_responses,omitempty"` // The last inputs from the user in the conversation, after the model has run. PastUserInputs []string `json:"past_user_inputs,omitempty"` }
Used with ConversationalResponse
type ConversationalParameters ¶ added in v0.0.7
type ConversationalParameters struct { // (Default: None). Integer to define the minimum length in tokens of the output summary. MinLength *int `json:"min_length,omitempty"` // (Default: None). Integer to define the maximum length in tokens of the output summary. MaxLength *int `json:"max_length,omitempty"` // (Default: None). Integer to define the top tokens considered within the sample operation to create // new text. TopK *int `json:"top_k,omitempty"` // (Default: None). Float to define the tokens that are within the sample` operation of text generation. // Add tokens in the sample for more probable to least probable until the sum of the probabilities is // greater than top_p. TopP *float64 `json:"top_p,omitempty"` // (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, // 0 mens top_k=1, 100.0 is getting closer to uniform probability. Temperature *float64 `json:"temperature,omitempty"` // (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized // to not be picked in successive generation passes. RepetitionPenalty *float64 `json:"repetitionpenalty,omitempty"` // (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. // Network can cause some overhead so it will be a soft limit. MaxTime *float64 `json:"maxtime,omitempty"` }
Used with ConversationalRequest
type ConversationalRequest ¶ added in v0.0.7
type ConversationalRequest struct { // (Required) Inputs ConverstationalInputs `json:"inputs,omitempty"` Parameters ConversationalParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
Request structure for the conversational endpoint
type ConversationalResponse ¶ added in v0.0.7
type ConversationalResponse struct { // The answer of the model GeneratedText string `json:"generated_text,omitempty"` // A facility dictionary to send back for the next input (with the new user input addition). Conversation Conversation `json:"conversation,omitempty"` }
Response structure for the conversational endpoint
type ConverstationalInputs ¶ added in v0.0.7
type ConverstationalInputs struct { // (Required) The last input from the user in the conversation. Text string `json:"text"` // A list of strings corresponding to the earlier replies from the model. GeneratedResponses []string `json:"generated_responses,omitempty"` // A list of strings corresponding to the earlier replies from the user. // Should be of the same length of GeneratedResponses. PastUserInputs []string `json:"past_user_inputs,omitempty"` }
Used with ConversationalRequest
type ErrorResponse ¶ added in v0.0.3
type ErrorResponse struct {
Error string `json:"error"`
}
type FeatureExtractionRequest ¶ added in v0.0.7
type FeatureExtractionRequest struct { // String to get the features from Inputs []string `json:"inputs"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
Request structure for the feature extraction endpoint
type FeatureExtractionResponse ¶ added in v0.0.7
type FeatureExtractionResponse [][][][]float32
Response structure for the feature extraction endpoint
type FeatureExtractionWithAutomaticReductionResponse ¶ added in v0.0.10
type FeatureExtractionWithAutomaticReductionResponse [][]float32
Response structure for the feature extraction endpoint
type FillMaskRequest ¶ added in v0.0.5
type FillMaskRequest struct { // (Required) a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask) Inputs []string `json:"inputs"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
Request structure for the Fill Mask endpoint
type FillMaskResponse ¶ added in v0.0.5
type FillMaskResponse []struct { // The actual sequence of tokens that ran against the model (may contain special tokens) Sequence string `json:"sequence,omitempty"` // The probability for this token. Score float64 `json:"score,omitempty"` // The id of the token TokenID int `json:"token,omitempty"` // The string representation of the token TokenStr string `json:"token_str,omitempty"` }
Response structure for the Fill Mask endpoint
type HTTPClient ¶
HTTPClient is an interface representing an HTTP client.
type InferenceClient ¶
type InferenceClient struct {
// contains filtered or unexported fields
}
InferenceClient is a client for performing inference using Hugging Face models.
func NewInferenceClient ¶
func NewInferenceClient(token string, optFns ...func(o *InferenceClientOptions)) *InferenceClient
NewInferenceClient creates a new InferenceClient instance with the specified token.
func (*InferenceClient) Conversational ¶ added in v0.0.7
func (ic *InferenceClient) Conversational(ctx context.Context, req *ConversationalRequest) (*ConversationalResponse, error)
Conversational performs conversational AI using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided conversational inputs. The response contains the generated conversational response or an error if the request fails.
func (*InferenceClient) FeatureExtraction ¶ added in v0.0.7
func (ic *InferenceClient) FeatureExtraction(ctx context.Context, req *FeatureExtractionRequest) (FeatureExtractionResponse, error)
FeatureExtraction performs feature extraction using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided input data. The response contains the extracted features or an error if the request fails.
func (*InferenceClient) FeatureExtractionWithAutomaticReduction ¶ added in v0.0.10
func (ic *InferenceClient) FeatureExtractionWithAutomaticReduction(ctx context.Context, req *FeatureExtractionRequest) (FeatureExtractionWithAutomaticReductionResponse, error)
FeatureExtractionWithAutomaticReduction performs feature extraction using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided input data. The response contains the extracted features or an error if the request fails.
func (*InferenceClient) FillMask ¶ added in v0.0.5
func (ic *InferenceClient) FillMask(ctx context.Context, req *FillMaskRequest) (FillMaskResponse, error)
FillMask performs masked language modeling using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided inputs. The response contains the generated text with the masked tokens filled or an error if the request fails.
func (*InferenceClient) QuestionAnswering ¶ added in v0.0.3
func (ic *InferenceClient) QuestionAnswering(ctx context.Context, req *QuestionAnsweringRequest) (*QuestionAnsweringResponse, error)
QuestionAnswering performs question answering using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided question and context inputs. The response contains the answer or an error if the request fails.
func (*InferenceClient) SentenceSimilarity ¶ added in v0.0.14
func (ic *InferenceClient) SentenceSimilarity(ctx context.Context, req *SentenceSimilarityRequest) (SentenceSimilarityResponse, error)
SentenceSimilarity sends a sentence similarity computation request to the InferenceClient and returns the sentence similarity response.
func (*InferenceClient) SetModel ¶ added in v0.0.8
func (ic *InferenceClient) SetModel(model string)
func (*InferenceClient) Summarization ¶
func (ic *InferenceClient) Summarization(ctx context.Context, req *SummarizationRequest) (SummarizationResponse, error)
Summarization performs text summarization using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided inputs. The response contains the generated summary or an error if the request fails.
func (*InferenceClient) TableQuestionAnswering ¶ added in v0.0.6
func (ic *InferenceClient) TableQuestionAnswering(ctx context.Context, req *TableQuestionAnsweringRequest) (*TableQuestionAnsweringResponse, error)
TableQuestionAnswering performs table-based question answering using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided inputs. The response contains the answer or an error if the request fails.
func (*InferenceClient) Text2TextGeneration ¶
func (ic *InferenceClient) Text2TextGeneration(ctx context.Context, req *Text2TextGenerationRequest) (Text2TextGenerationResponse, error)
Text2TextGeneration performs text-to-text generation using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided inputs. The response contains the generated text or an error if the request fails.
func (*InferenceClient) TextClassification ¶ added in v0.0.9
func (ic *InferenceClient) TextClassification(ctx context.Context, req *TextClassificationRequest) (TextClassificationResponse, error)
TextClassification performs text classification using the provided request.
func (*InferenceClient) TextGeneration ¶
func (ic *InferenceClient) TextGeneration(ctx context.Context, req *TextGenerationRequest) (TextGenerationResponse, error)
TextGeneration performs text generation using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided inputs. The response contains the generated text or an error if the request fails.
func (*InferenceClient) TokenClassification ¶ added in v0.0.12
func (ic *InferenceClient) TokenClassification(ctx context.Context, req *TokenClassificationRequest) (TokenClassificationResponse, error)
func (*InferenceClient) Translation ¶ added in v0.0.14
func (ic *InferenceClient) Translation(ctx context.Context, req *TranslationRequest) (TranslationResponse, error)
Translation sends a translation request to the InferenceClient and returns the translation response.
func (*InferenceClient) ZeroShotClassification ¶
func (ic *InferenceClient) ZeroShotClassification(ctx context.Context, req *ZeroShotClassificationRequest) (ZeroShotClassificationResponse, error)
ZeroShotClassification performs zero-shot classification using the specified model. It sends a POST request to the Hugging Face inference endpoint with the provided inputs. The response contains the classification results or an error if the request fails.
type InferenceClientOptions ¶
type InferenceClientOptions struct { Model string Endpoint string InferenceEndpoint string HTTPClient HTTPClient }
InferenceClientOptions represents options for the InferenceClient.
type Options ¶
type Options struct { // (Default: true). There is a cache layer on the inference API to speedup // requests we have already seen. Most models can use those results as is // as models are deterministic (meaning the results will be the same anyway). // However if you use a non deterministic model, you can set this parameter // to prevent the caching mechanism from being used resulting in a real new query. UseCache *bool `json:"use_cache,omitempty"` // (Default: false) If the model is not ready, wait for it instead of receiving 503. // It limits the number of requests required to get your inference done. It is advised // to only set this flag to true after receiving a 503 error as it will limit hanging // in your application to known places. WaitForModel *bool `json:"wait_for_model,omitempty"` }
type QuestionAnsweringInputs ¶ added in v0.0.3
type QuestionAnsweringRequest ¶ added in v0.0.3
type QuestionAnsweringRequest struct { // (Required) Inputs QuestionAnsweringInputs `json:"inputs,omitempty"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
Request structure for question answering model
type QuestionAnsweringResponse ¶ added in v0.0.3
type QuestionAnsweringResponse struct { // A string that’s the answer within the Context text. Answer string `json:"answer,omitempty"` // A float that represents how likely that the answer is correct. Score float64 `json:"score,omitempty"` // The string index of the start of the answer within Context. Start int `json:"start,omitempty"` // The string index of the stop of the answer within Context. End int `json:"end,omitempty"` }
Response structure for question answering model
type SentenceSimilarityInputs ¶ added in v0.0.14
type SentenceSimilarityInputs struct { SourceSentence string `json:"source_sentence"` Sentences []string `json:"sentences"` }
SentenceSimilarityInputs represents the inputs for sentence similarity computation.
type SentenceSimilarityRequest ¶ added in v0.0.14
type SentenceSimilarityRequest struct { Inputs SentenceSimilarityInputs `json:"inputs"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
SentenceSimilarityRequest represents a request for sentence similarity computation.
type SentenceSimilarityResponse ¶ added in v0.0.14
type SentenceSimilarityResponse []float32
SentenceSimilarityResponse represents the response for a sentence similarity computation request.
type SummarizationParameters ¶
type SummarizationParameters struct { // (Default: None). Integer to define the minimum length in tokens of the output summary. MinLength *int `json:"min_length,omitempty"` // (Default: None). Integer to define the maximum length in tokens of the output summary. MaxLength *int `json:"max_length,omitempty"` // (Default: None). Integer to define the top tokens considered within the sample operation to create // new text. TopK *int `json:"top_k,omitempty"` // (Default: None). Float to define the tokens that are within the sample` operation of text generation. // Add tokens in the sample for more probable to least probable until the sum of the probabilities is // greater than top_p. TopP *float64 `json:"top_p,omitempty"` // (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, // 0 mens top_k=1, 100.0 is getting closer to uniform probability. Temperature *float64 `json:"temperature,omitempty"` // (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized // to not be picked in successive generation passes. RepetitionPenalty *float64 `json:"repetitionpenalty,omitempty"` // (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. // Network can cause some overhead so it will be a soft limit. MaxTime *float64 `json:"maxtime,omitempty"` }
type SummarizationRequest ¶
type SummarizationRequest struct { // String to be summarized Inputs []string `json:"inputs"` Parameters SummarizationParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
type SummarizationResponse ¶
type SummarizationResponse []struct { // The summarized input string SummaryText string `json:"summary_text,omitempty"` }
type TableQuestionAnsweringInputs ¶ added in v0.0.6
type TableQuestionAnsweringInputs struct { // (Required) The query in plain text that you want to ask the table Query string `json:"query"` // (Required) A table of data represented as a dict of list where entries // are headers and the lists are all the values, all lists must // have the same size. Table map[string][]string `json:"table"` }
type TableQuestionAnsweringRequest ¶ added in v0.0.6
type TableQuestionAnsweringRequest struct { Inputs TableQuestionAnsweringInputs `json:"inputs"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
Request structure for table question answering model
type TableQuestionAnsweringResponse ¶ added in v0.0.6
type TableQuestionAnsweringResponse struct { // The plaintext answer Answer string `json:"answer,omitempty"` // A list of coordinates of the cells references in the answer Coordinates [][]int `json:"coordinates,omitempty"` // A list of coordinates of the cells contents Cells []string `json:"cells,omitempty"` // The aggregator used to get the answer Aggregator string `json:"aggregator,omitempty"` }
Response structure for table question answering model
type Text2TextGenerationParameters ¶
type Text2TextGenerationParameters struct { // (Default: None). Integer to define the top tokens considered within the sample operation to create new text. TopK *int `json:"top_k,omitempty"` // (Default: None). Float to define the tokens that are within the sample` operation of text generation. Add // tokens in the sample for more probable to least probable until the sum of the probabilities is greater // than top_p. TopP *float64 `json:"top_p,omitempty"` // (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, // 0 means top_k=1, 100.0 is getting closer to uniform probability. Temperature *float64 `json:"temperature,omitempty"` // (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized // to not be picked in successive generation passes. RepetitionPenalty *float64 `json:"repetition_penalty,omitempty"` // (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input // length it is a estimate of the size of generated text you want. Each new tokens slows down the request, // so look for balance between response times and length of text generated. MaxNewTokens *int `json:"max_new_tokens,omitempty"` // (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. // Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens // for best results. MaxTime *float64 `json:"max_time,omitempty"` // (Default: True). Bool. If set to False, the return results will not contain the original query making it // easier for prompting. ReturnFullText *bool `json:"return_full_text,omitempty"` // (Default: 1). Integer. The number of proposition you want to be returned. NumReturnSequences *int `json:"num_return_sequences,omitempty"` }
type Text2TextGenerationRequest ¶
type Text2TextGenerationRequest struct { // String to generated from Inputs string `json:"inputs"` Parameters Text2TextGenerationParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
type Text2TextGenerationResponse ¶
type Text2TextGenerationResponse []struct { GeneratedText string `json:"generated_text,omitempty"` }
type TextClassificationRequest ¶ added in v0.0.9
type TextClassificationRequest struct { // Inputs is the string to be generated from. Inputs string `json:"inputs"` // Options represents optional settings for the classification. Options Options `json:"options,omitempty"` // Model is the name of the model to use for classification. Model string `json:"-"` }
TextClassificationRequest represents a request for text classification.
type TextClassificationResponse ¶ added in v0.0.9
type TextClassificationResponse [][]struct { // Label is the label for the class (model-specific). Label string `json:"label,omitempty"` // Score is a float that represents how likely it is that the text belongs to this class. Score float32 `json:"score,omitempty"` }
TextClassificationResponse represents a response for text classification.
type TextGenerationParameters ¶
type TextGenerationParameters struct { // (Default: None). Integer to define the top tokens considered within the sample operation to create new text. TopK *int `json:"top_k,omitempty"` // (Default: None). Float to define the tokens that are within the sample` operation of text generation. Add // tokens in the sample for more probable to least probable until the sum of the probabilities is greater // than top_p. TopP *float64 `json:"top_p,omitempty"` // (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, // 0 means top_k=1, 100.0 is getting closer to uniform probability. Temperature *float64 `json:"temperature,omitempty"` // (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized // to not be picked in successive generation passes. RepetitionPenalty *float64 `json:"repetition_penalty,omitempty"` // (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input // length it is a estimate of the size of generated text you want. Each new tokens slows down the request, // so look for balance between response times and length of text generated. MaxNewTokens *int `json:"max_new_tokens,omitempty"` // (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. // Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens // for best results. MaxTime *float64 `json:"max_time,omitempty"` // (Default: True). Bool. If set to False, the return results will not contain the original query making it // easier for prompting. ReturnFullText *bool `json:"return_full_text,omitempty"` // (Default: 1). Integer. The number of proposition you want to be returned. NumReturnSequences *int `json:"num_return_sequences,omitempty"` }
type TextGenerationRequest ¶
type TextGenerationRequest struct { // String to generated from Inputs string `json:"inputs"` Parameters TextGenerationParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
type TextGenerationResponse ¶
type TextGenerationResponse []struct { GeneratedText string `json:"generated_text,omitempty"` }
A list of generated texts. The length of this list is the value of NumReturnSequences in the request.
type TokenClassificationRequest ¶ added in v0.0.12
type TokenClassificationRequest struct { // Inputs is a string to be classified. Inputs string `json:"inputs"` // Parameters contains token classification parameters. Parameters TokenClassificationarameters `json:"parameters"` // Options contains token classification options. Options Options `json:"options"` Model string `json:"-"` }
TokenClassificationRequest represents the input parameters for token classification.
type TokenClassificationResponse ¶ added in v0.0.12
type TokenClassificationResponse []struct { // EntityGroup is the type for the entity being recognized (model specific). EntityGroup string `json:"entity_group"` // Score indicates how likely the entity was recognized. Score float64 `json:"score"` // Word is the string that was captured. Word string `json:"word"` // Start is the offset stringwise where the answer is located. Useful to disambiguate if the word occurs multiple times. Start int `json:"start"` // End is the offset stringwise where the answer is located. Useful to disambiguate if the word occurs multiple times. End int `json:"end"` }
TokenClassificationResponse represents the output of the token classification.
type TokenClassificationarameters ¶ added in v0.0.12
type TokenClassificationarameters struct { // AggregationStrategy specifies the aggregation strategy. // - none: Every token gets classified without further aggregation. // - simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar). // - first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity. // - average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied. // - max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score. AggregationStrategy string `json:"aggregation_strategy,omitempty"` }
TokenClassificationarameters represents the parameters for token classification.
type TranslationRequest ¶ added in v0.0.14
type TranslationRequest struct { Inputs []string `json:"inputs"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
TranslationRequest represents a request for translation.
type TranslationResponse ¶ added in v0.0.14
type TranslationResponse []struct { TranslationText string `json:"translation_text"` }
TranslationResponse represents the response for a translation request.
type ZeroShotClassificationParameters ¶ added in v0.0.3
type ZeroShotClassificationParameters struct { // (Required) A list of strings that are potential classes for inputs. Max 10 candidate_labels, // for more, simply run multiple requests, results are going to be misleading if using // too many candidate_labels anyway. If you want to keep the exact same, you can // simply run multi_label=True and do the scaling on your end. CandidateLabels []string `json:"candidate_labels"` // (Default: false) Boolean that is set to True if classes can overlap MultiLabel *bool `json:"multi_label,omitempty"` }
type ZeroShotClassificationRequest ¶ added in v0.0.3
type ZeroShotClassificationRequest struct { // (Required) Input or Inputs are required request fields Inputs []string `json:"inputs"` // (Required) Parameters ZeroShotClassificationParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` Model string `json:"-"` }
type ZeroShotClassificationResponse ¶ added in v0.0.3
type ZeroShotClassificationResponse []struct { // The string sent as an input Sequence string `json:"sequence,omitempty"` // The list of labels sent in the request, sorted in descending order // by probability that the input corresponds to the to the label. Labels []string `json:"labels,omitempty"` // a list of floats that correspond the the probability of label, in the same order as labels. Scores []float64 `json:"scores,omitempty"` }
Source Files
¶
- conversational.go
- error.go
- feature_extraction.go
- fill_mask.go
- huggingface.go
- options.go
- question_answering.go
- sentence_similarity.go
- summarization.go
- table_question_answering.go
- text2text_generation.go
- text_classification.go
- text_generation.go
- token_classification.go
- translation.go
- zero_shot_classification.go