Documentation ¶
Index ¶
- Constants
- Variables
- func MakeHFAPIRequest(jsonBody []byte, model string) ([]byte, error)
- func MakeHFAPIRequestWithMedia(model, mediaFile string) ([]byte, error)
- func SendTextToImageRequest(model string, request *TextToImageRequest) (image.Image, error)
- func SetAPIKey(key string)
- type AggregationStrategy
- type AudioClassificationResponse
- type Conversation
- type ConversationalParameters
- func (c *ConversationalParameters) SetMaxLength(maxLength int) *ConversationalParameters
- func (c *ConversationalParameters) SetMaxTime(maxTime float64) *ConversationalParameters
- func (c *ConversationalParameters) SetMinLength(minLength int) *ConversationalParameters
- func (c *ConversationalParameters) SetRepetitionPenalty(penalty float64) *ConversationalParameters
- func (c *ConversationalParameters) SetTempurature(temperature float64) *ConversationalParameters
- func (c *ConversationalParameters) SetTopK(topK int) *ConversationalParameters
- func (c *ConversationalParameters) SetTopP(topP float64) *ConversationalParameters
- type ConversationalRequest
- type ConversationalResponse
- type ConverstationalInputs
- type FillMaskRequest
- type FillMaskResponse
- type FillMaskResponseEntry
- type ObjectBox
- type ObjectDetectionResponse
- type Options
- type QuestionAnsweringInputs
- type QuestionAnsweringRequest
- type QuestionAnsweringResponse
- type SentenceSimilarityInputs
- type SentenceSimilarityRequest
- type SentenceSimilarityResponse
- type SpeechRecognitionResponse
- type SummarizationParameters
- func (sp *SummarizationParameters) SetMaxLength(maxLength int) *SummarizationParameters
- func (sp *SummarizationParameters) SetMaxTime(maxTime float64) *SummarizationParameters
- func (sp *SummarizationParameters) SetMinLength(minLength int) *SummarizationParameters
- func (sp *SummarizationParameters) SetRepetitionPenalty(penalty float64) *SummarizationParameters
- func (sp *SummarizationParameters) SetTempurature(temperature float64) *SummarizationParameters
- func (sp *SummarizationParameters) SetTopK(topK int) *SummarizationParameters
- func (sp *SummarizationParameters) SetTopP(topP float64) *SummarizationParameters
- type SummarizationRequest
- type SummarizationResponse
- type TableQuestionAnsweringInputs
- type TableQuestionAnsweringRequest
- type TableQuestionAnsweringResponse
- type TextClassificationRequest
- type TextClassificationResponse
- type TextClassificationResponseLabel
- type TextGenerationParameters
- func (params *TextGenerationParameters) SetMaxNewTokens(maxNewTokens int) *TextGenerationParameters
- func (params *TextGenerationParameters) SetMaxTime(maxTime float64) *TextGenerationParameters
- func (params *TextGenerationParameters) SetNumReturnSequences(numReturnSequences int) *TextGenerationParameters
- func (params *TextGenerationParameters) SetRepetitionPenaly(penalty float64) *TextGenerationParameters
- func (params *TextGenerationParameters) SetReturnFullText(returnFullText bool) *TextGenerationParameters
- func (params *TextGenerationParameters) SetTempurature(temp float64) *TextGenerationParameters
- func (params *TextGenerationParameters) SetTopK(topK int) *TextGenerationParameters
- func (params *TextGenerationParameters) SetTopP(topP float64) *TextGenerationParameters
- type TextGenerationRequest
- type TextGenerationResponse
- type TextToImageRequest
- type TokenClassificationParameters
- type TokenClassificationRequest
- type TokenClassificationResponse
- type TokenClassificationResponseEntity
- type TranslationRequest
- type TranslationResponse
- type ZeroShotParameters
- type ZeroShotRequest
- type ZeroShotResponse
Constants ¶
const ( AuthHeaderKey = "Authorization" AuthHeaderPrefix = "Bearer " )
const ( MaxCandidateLabels = 10 RecommendedZeroShotModel = "facebook/bart-large-mnli" )
const APIBaseURL = "https://api-inference.huggingface.co/models/"
const RecommendedAudioClassificationModel = "superb/hubert-large-superb-er"
const RecommendedConversationalModel = "microsoft/DialoGPT-large"
const RecommendedFillMaskModel = "bert-base-uncased"
const RecommendedObjectDetectionModel = "facebook/detr-resnet-50"
const RecommendedQuestionAnsweringModel = "deepset/roberta-base-squad2"
const (
RecommendedRussianToEnglishModel = "Helsinki-NLP/opus-mt-ru-en"
)
const (
RecommendedSentenceSimilarityModel = "sentence-transformers/all-MiniLM-L6-v2"
)
const RecommendedSpeechRecongnitionModelEnglish = "facebook/wav2vec2-large-960h-lv60-self"
const RecommendedTableQuestionAnsweringModel = "google/tapas-base-finetuned-wtq"
const (
RecommendedTextClassificationModel = "distilbert-base-uncased-finetuned-sst-2-english"
)
const RecommendedTextGenerationModel = "gpt2"
const RecommendedTextToImageModel = "stabilityai/stable-diffusion-2-1"
const RecommendedTokenClassificationModel = "dbmdz/bert-large-cased-finetuned-conll03-english"
const RecommmendedSummarizationModel = "facebook/bart-large-cnn"
Variables ¶
var APIKey = func() string { return "" }
Functions ¶
func MakeHFAPIRequest ¶
MakeHFAPIRequest builds and sends an HTTP POST request to the given model using the provided JSON body. If the request is successful, returns the response JSON and a nil error. If the request fails, returns an empty slice and an error describing the failure.
func SendTextToImageRequest ¶
func SendTextToImageRequest(model string, request *TextToImageRequest) (image.Image, error)
Types ¶
type AggregationStrategy ¶
type AggregationStrategy string
const ( // Every token gets classified without further aggregation. AggregationStrategyNone AggregationStrategy = "none" // Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar). AggregationStrategySimple AggregationStrategy = "simple" // Same as the simple strategy except entities cannot end up with different tags. Entities will use the tag of the first token when there is ambiguity. AggregationStrategyFirst AggregationStrategy = "first" // Same as the simple strategy except entities cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied. AggregationStrategyAverage AggregationStrategy = "average" // Same as the simple strategy except entities cannot end up with different tags. Entity will be the token with the maximum score. AggregationStrategyMax AggregationStrategy = "max" )
type AudioClassificationResponse ¶
type AudioClassificationResponse struct { Score float64 `json:"score,omitempty"` Label string `json:"label,omitempty"` }
Response structure for audio classification endpoint
func SendAudioClassificationRequest ¶
func SendAudioClassificationRequest(model, audioFile string) ([]*AudioClassificationResponse, error)
type Conversation ¶
type Conversation struct { // The last outputs from the model in the conversation, after the model has run. GeneratedResponses []string `json:"generated_responses,omitempty"` // The last inputs from the user in the conversation, after the model has run. PastUserInputs []string `json:"past_user_inputs,omitempty"` }
Used with ConversationalResponse
type ConversationalParameters ¶
type ConversationalParameters struct { // (Default: None). Integer to define the minimum length in tokens of the output summary. MinLength *int `json:"min_length,omitempty"` // (Default: None). Integer to define the maximum length in tokens of the output summary. MaxLength *int `json:"max_length,omitempty"` // (Default: None). Integer to define the top tokens considered within the sample operation to create // new text. TopK *int `json:"top_k,omitempty"` // (Default: None). Float to define the tokens that are within the sample` operation of text generation. // Add tokens in the sample for more probable to least probable until the sum of the probabilities is // greater than top_p. TopP *float64 `json:"top_p,omitempty"` // (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, // 0 mens top_k=1, 100.0 is getting closer to uniform probability. Temperature *float64 `json:"temperature,omitempty"` // (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized // to not be picked in successive generation passes. RepetitionPenalty *float64 `json:"repetitionpenalty,omitempty"` // (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. // Network can cause some overhead so it will be a soft limit. MaxTime *float64 `json:"maxtime,omitempty"` }
Used with ConversationalRequest
func NewConversationalParameters ¶
func NewConversationalParameters() *ConversationalParameters
func (*ConversationalParameters) SetMaxLength ¶
func (c *ConversationalParameters) SetMaxLength(maxLength int) *ConversationalParameters
func (*ConversationalParameters) SetMaxTime ¶
func (c *ConversationalParameters) SetMaxTime(maxTime float64) *ConversationalParameters
func (*ConversationalParameters) SetMinLength ¶
func (c *ConversationalParameters) SetMinLength(minLength int) *ConversationalParameters
func (*ConversationalParameters) SetRepetitionPenalty ¶
func (c *ConversationalParameters) SetRepetitionPenalty(penalty float64) *ConversationalParameters
func (*ConversationalParameters) SetTempurature ¶
func (c *ConversationalParameters) SetTempurature(temperature float64) *ConversationalParameters
func (*ConversationalParameters) SetTopK ¶
func (c *ConversationalParameters) SetTopK(topK int) *ConversationalParameters
func (*ConversationalParameters) SetTopP ¶
func (c *ConversationalParameters) SetTopP(topP float64) *ConversationalParameters
type ConversationalRequest ¶
type ConversationalRequest struct { // (Required) Inputs ConverstationalInputs `json:"inputs,omitempty"` Parameters ConversationalParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the conversational endpoint
type ConversationalResponse ¶
type ConversationalResponse struct { // The answer of the model GeneratedText string `json:"generated_text,omitempty"` // A facility dictionary to send back for the next input (with the new user input addition). Conversation Conversation `json:"conversation,omitempty"` }
Response structure for the conversational endpoint
func SendConversationalRequest ¶
func SendConversationalRequest(model string, request *ConversationalRequest) (*ConversationalResponse, error)
type ConverstationalInputs ¶
type ConverstationalInputs struct { // (Required) The last input from the user in the conversation. Text string `json:"text,omitempty"` // A list of strings corresponding to the earlier replies from the model. GeneratedResponses []string `json:"generated_responses,omitempty"` // A list of strings corresponding to the earlier replies from the user. // Should be of the same length of GeneratedResponses. PastUserInputs []string `json:"past_user_inputs,omitempty"` }
Used with ConversationalRequest
type FillMaskRequest ¶
type FillMaskRequest struct { // (Required) a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask) Inputs []string `json:"inputs,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the Fill Mask endpoint
type FillMaskResponse ¶
type FillMaskResponse struct {
Masks []*FillMaskResponseEntry
}
Response structure for the Fill Mask endpoint
func SendFillMaskRequest ¶
func SendFillMaskRequest(model string, request *FillMaskRequest) ([]*FillMaskResponse, error)
type FillMaskResponseEntry ¶
type FillMaskResponseEntry struct { // The actual sequence of tokens that ran against the model (may contain special tokens) Sequence string `json:"sequence,omitempty"` // The probability for this token. Score float64 `json:"score,omitempty"` // The id of the token TokenID int `json:"token,omitempty"` // The string representation of the token TokenStr string `json:"token_str,omitempty"` }
Used in the FillMaskResponse struct
type ObjectDetectionResponse ¶
type ObjectDetectionResponse struct { // The label for the class (model specific) of a detected object. Label string `json:"label,omitempty"` // A float that represents how likely it is that the detected object belongs to the given class. Score float64 `json:"score,omitempty"` // Bounding box of the detected object Box ObjectBox }
func SendObjectDetectionRequest ¶
func SendObjectDetectionRequest(model, imageFile string) ([]*ObjectDetectionResponse, error)
type Options ¶
type Options struct { // (Default: false). Boolean to use GPU instead of CPU for inference. // Requires Startup plan at least. UseGPU *bool `json:"use_gpu,omitempty"` // (Default: true). There is a cache layer on the inference API to speedup // requests we have already seen. Most models can use those results as is // as models are deterministic (meaning the results will be the same anyway). // However if you use a non deterministic model, you can set this parameter // to prevent the caching mechanism from being used resulting in a real new query. UseCache *bool `json:"use_cache,omitempty"` // (Default: false) If the model is not ready, wait for it instead of receiving 503. // It limits the number of requests required to get your inference done. It is advised // to only set this flag to true after receiving a 503 error as it will limit hanging // in your application to known places. WaitForModel *bool `json:"wait_for_model,omitempty"` }
func NewOptions ¶
func NewOptions() *Options
func (*Options) SetUseCache ¶
func (*Options) SetWaitForModel ¶
type QuestionAnsweringInputs ¶
type QuestionAnsweringRequest ¶
type QuestionAnsweringRequest struct { // (Required) Inputs QuestionAnsweringInputs `json:"inputs,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for question answering model
type QuestionAnsweringResponse ¶
type QuestionAnsweringResponse struct { // A string that’s the answer within the Context text. Answer string `json:"answer,omitempty"` // A float that represents how likely that the answer is correct. Score float64 `json:"score,omitempty"` // The string index of the start of the answer within Context. Start int `json:"start,omitempty"` // The string index of the stop of the answer within Context. End int `json:"end,omitempty"` }
Response structure for question answering model
func SendQuestionAnsweringRequest ¶
func SendQuestionAnsweringRequest(model string, request *QuestionAnsweringRequest) (*QuestionAnsweringResponse, error)
type SentenceSimilarityInputs ¶
type SentenceSimilarityInputs struct { // (Required) The string that you wish to compare the other strings with. // This can be a phrase, sentence, or longer passage, depending on the // model being used. SourceSentence string `json:"source_sentence,omitempty"` // A list of strings which will be compared against the source_sentence. Sentences []string `json:"sentences,omitempty"` }
type SentenceSimilarityRequest ¶
type SentenceSimilarityRequest struct { // (Required) Inputs for the request. Inputs SentenceSimilarityInputs `json:"inputs,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the Sentence Similarity endpoint.
type SentenceSimilarityResponse ¶
type SentenceSimilarityResponse []float64
Response structure from the Sentence Similarity endpoint. The return value is a list of similarity scores, given as floats. Each list entry corresponds to the Inputs.Sentences list entry of the same index.
func SendSentenceSimilarityRequest ¶
func SendSentenceSimilarityRequest(model string, request *SentenceSimilarityRequest) (*SentenceSimilarityResponse, error)
type SpeechRecognitionResponse ¶
type SpeechRecognitionResponse struct { // The string that was recognized within the audio file. Text string `json:"text,omitempty"` }
func SendSpeechRecognitionRequest ¶
func SendSpeechRecognitionRequest(model, audioFile string) (*SpeechRecognitionResponse, error)
SendSpeechRecognitionRequest takes a model string and a path to an audio file. It reads the file and sends a request to the speech recognition endpoint.
type SummarizationParameters ¶
type SummarizationParameters struct { // (Default: None). Integer to define the minimum length in tokens of the output summary. MinLength *int `json:"min_length,omitempty"` // (Default: None). Integer to define the maximum length in tokens of the output summary. MaxLength *int `json:"max_length,omitempty"` // (Default: None). Integer to define the top tokens considered within the sample operation to create // new text. TopK *int `json:"top_k,omitempty"` // (Default: None). Float to define the tokens that are within the sample` operation of text generation. // Add tokens in the sample for more probable to least probable until the sum of the probabilities is // greater than top_p. TopP *float64 `json:"top_p,omitempty"` // (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, // 0 mens top_k=1, 100.0 is getting closer to uniform probability. Temperature *float64 `json:"temperature,omitempty"` // (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized // to not be picked in successive generation passes. RepetitionPenalty *float64 `json:"repetitionpenalty,omitempty"` // (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. // Network can cause some overhead so it will be a soft limit. MaxTime *float64 `json:"maxtime,omitempty"` }
Used with SummarizationRequest
func NewSummarizationParameters ¶
func NewSummarizationParameters() *SummarizationParameters
func (*SummarizationParameters) SetMaxLength ¶
func (sp *SummarizationParameters) SetMaxLength(maxLength int) *SummarizationParameters
func (*SummarizationParameters) SetMaxTime ¶
func (sp *SummarizationParameters) SetMaxTime(maxTime float64) *SummarizationParameters
func (*SummarizationParameters) SetMinLength ¶
func (sp *SummarizationParameters) SetMinLength(minLength int) *SummarizationParameters
func (*SummarizationParameters) SetRepetitionPenalty ¶
func (sp *SummarizationParameters) SetRepetitionPenalty(penalty float64) *SummarizationParameters
func (*SummarizationParameters) SetTempurature ¶
func (sp *SummarizationParameters) SetTempurature(temperature float64) *SummarizationParameters
func (*SummarizationParameters) SetTopK ¶
func (sp *SummarizationParameters) SetTopK(topK int) *SummarizationParameters
func (*SummarizationParameters) SetTopP ¶
func (sp *SummarizationParameters) SetTopP(topP float64) *SummarizationParameters
type SummarizationRequest ¶
type SummarizationRequest struct { // Strings to be summarized Inputs []string `json:"inputs,omitempty"` Parameters SummarizationParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the summarization endpoint
type SummarizationResponse ¶
type SummarizationResponse struct { // The summarized input string SummaryText string `json:"summary_text,omitempty"` }
Response structure for the summarization endpoint
func SendSummarizationRequest ¶
func SendSummarizationRequest(model string, request *SummarizationRequest) ([]*SummarizationResponse, error)
type TableQuestionAnsweringInputs ¶
type TableQuestionAnsweringInputs struct { // (Required) The query in plain text that you want to ask the table Query string `json:"query,omitempty"` // (Required) A table of data represented as a dict of list where entries // are headers and the lists are all the values, all lists must // have the same size. Table map[string][]string `json:"table,omitempty"` }
type TableQuestionAnsweringRequest ¶
type TableQuestionAnsweringRequest struct { Inputs TableQuestionAnsweringInputs `json:"inputs,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for table question answering model
type TableQuestionAnsweringResponse ¶
type TableQuestionAnsweringResponse struct { // The plaintext answer Answer string `json:"answer,omitempty"` // A list of coordinates of the cells references in the answer Coordinates [][]int `json:"coordinates,omitempty"` // A list of coordinates of the cells contents Cells []string `json:"cells,omitempty"` // The aggregator used to get the answer Aggregator string `json:"aggregator,omitempty"` }
Response structure for table question answering model
func SendTableQuestionAnsweringRequest ¶
func SendTableQuestionAnsweringRequest(model string, request *TableQuestionAnsweringRequest) (*TableQuestionAnsweringResponse, error)
type TextClassificationRequest ¶
type TextClassificationRequest struct { // (Required) strings to be classified Inputs []string `json:"inputs,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the Text classification endpoint
type TextClassificationResponse ¶
type TextClassificationResponse struct { // HFAPI returns a list of labels and their associated scores for // each input. Labels []*TextClassificationResponseLabel }
Response structure for the Text classification endpoint
func SendTextClassificationRequest ¶
func SendTextClassificationRequest(model string, request *TextClassificationRequest) ([]*TextClassificationResponse, error)
type TextClassificationResponseLabel ¶
type TextClassificationResponseLabel struct { // The label for the class (model specific) Name string `json:"label,omitempty"` // A float that represents how likely is that the text belongs in this class. Score float64 `json:"score,omitempty"` }
Used in TextClassificationResponse
type TextGenerationParameters ¶
type TextGenerationParameters struct { // (Default: None). Integer to define the top tokens considered within the sample operation to create new text. TopK *int `json:"top_k,omitempty"` // (Default: None). Float to define the tokens that are within the sample` operation of text generation. Add // tokens in the sample for more probable to least probable until the sum of the probabilities is greater // than top_p. TopP *float64 `json:"top_p,omitempty"` // (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, // 0 means top_k=1, 100.0 is getting closer to uniform probability. Temperature *float64 `json:"temperature,omitempty"` // (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized // to not be picked in successive generation passes. RepetitionPenalty *float64 `json:"repetition_penalty,omitempty"` // (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input // length it is a estimate of the size of generated text you want. Each new tokens slows down the request, // so look for balance between response times and length of text generated. MaxNewTokens *int `json:"max_new_tokens,omitempty"` // (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. // Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens // for best results. MaxTime *float64 `json:"max_time,omitempty"` // (Default: True). Bool. If set to False, the return results will not contain the original query making it // easier for prompting. ReturnFullText *bool `json:"return_full_text,omitempty"` // (Default: 1). Integer. The number of proposition you want to be returned. NumReturnSequences *int `json:"num_return_sequences,omitempty"` }
func NewTextGenerationParameters ¶
func NewTextGenerationParameters() *TextGenerationParameters
func (*TextGenerationParameters) SetMaxNewTokens ¶
func (params *TextGenerationParameters) SetMaxNewTokens(maxNewTokens int) *TextGenerationParameters
func (*TextGenerationParameters) SetMaxTime ¶
func (params *TextGenerationParameters) SetMaxTime(maxTime float64) *TextGenerationParameters
func (*TextGenerationParameters) SetNumReturnSequences ¶
func (params *TextGenerationParameters) SetNumReturnSequences(numReturnSequences int) *TextGenerationParameters
func (*TextGenerationParameters) SetRepetitionPenaly ¶
func (params *TextGenerationParameters) SetRepetitionPenaly(penalty float64) *TextGenerationParameters
func (*TextGenerationParameters) SetReturnFullText ¶
func (params *TextGenerationParameters) SetReturnFullText(returnFullText bool) *TextGenerationParameters
func (*TextGenerationParameters) SetTempurature ¶
func (params *TextGenerationParameters) SetTempurature(temp float64) *TextGenerationParameters
func (*TextGenerationParameters) SetTopK ¶
func (params *TextGenerationParameters) SetTopK(topK int) *TextGenerationParameters
func (*TextGenerationParameters) SetTopP ¶
func (params *TextGenerationParameters) SetTopP(topP float64) *TextGenerationParameters
type TextGenerationRequest ¶
type TextGenerationRequest struct { // (Required) a string to be generated from Inputs []string `json:"inputs,omitempty"` Parameters TextGenerationParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` }
type TextGenerationResponse ¶
type TextGenerationResponse struct { // A list of generated texts. The length of this list is the value of // NumReturnSequences in the request. GeneratedTexts []string }
func SendTextGenerationRequest ¶
func SendTextGenerationRequest(model string, request *TextGenerationRequest) ([]*TextGenerationResponse, error)
type TextToImageRequest ¶
type TextToImageRequest struct { // (Required) a string to be generated from Inputs string `json:"inputs,omitempty"` }
type TokenClassificationParameters ¶
type TokenClassificationParameters struct { // (Default: simple) AggregationStrategy *AggregationStrategy `json:"aggregation_strategy,omitempty"` }
func NewTokenClassificationParameters ¶
func NewTokenClassificationParameters() *TokenClassificationParameters
func (*TokenClassificationParameters) SetAggregationStrategy ¶
func (params *TokenClassificationParameters) SetAggregationStrategy(aggregationStrategy AggregationStrategy) *TokenClassificationParameters
type TokenClassificationRequest ¶
type TokenClassificationRequest struct { // (Required) strings to be classified Inputs []string `json:"inputs,omitempty"` Parameters TokenClassificationParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the token classification endpoint
type TokenClassificationResponse ¶
type TokenClassificationResponse struct {
Entities []*TokenClassificationResponseEntity
}
Response structure for the token classification endpoint
func SendTokenClassificationRequest ¶
func SendTokenClassificationRequest(model string, request *TokenClassificationRequest) ([]*TokenClassificationResponse, error)
type TokenClassificationResponseEntity ¶
type TokenClassificationResponseEntity struct { // The type for the entity being recognized (model specific). Label string `json:"entity_group,omitempty"` // How likely the entity was recognized. Score float64 `json:"score,omitempty"` // The string that was captured Entity string `json:"word,omitempty"` // The offset stringwise where the answer is located. Useful to disambiguate if Entity occurs multiple times. Start int `json:"start,omitempty"` // The offset stringwise where the answer is located. Useful to disambiguate if Entity occurs multiple times. End int `json:"end,omitempty"` }
type TranslationRequest ¶
type TranslationRequest struct { // (Required) a string to be translated in the original languages Inputs []string `json:"inputs,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the Translation endpoint
type TranslationResponse ¶
type TranslationResponse struct { // The translated Input string TranslationText string `json:"translation_text,omitempty"` }
Response structure from the Translation endpoint
func SendTranslationRequest ¶
func SendTranslationRequest(model string, request *TranslationRequest) ([]*TranslationResponse, error)
type ZeroShotParameters ¶
type ZeroShotParameters struct { // (Required) A list of strings that are potential classes for inputs. Max 10 candidate_labels, // for more, simply run multiple requests, results are going to be misleading if using // too many candidate_labels anyway. If you want to keep the exact same, you can // simply run multi_label=True and do the scaling on your end. CandidateLabels []string `json:"candidate_labels,omitempty"` // (Default: false) Boolean that is set to True if classes can overlap MultiLabel *bool `json:"multi_label,omitempty"` }
Used with ZeroShotRequest
func (*ZeroShotParameters) SetMultiLabel ¶
func (zsp *ZeroShotParameters) SetMultiLabel(multiLabel bool) *ZeroShotParameters
type ZeroShotRequest ¶
type ZeroShotRequest struct { // (Required) Input or Inputs are required request fields Inputs []string `json:"inputs,omitempty"` // (Required) Parameters ZeroShotParameters `json:"parameters,omitempty"` Options Options `json:"options,omitempty"` }
Request structure for the Zero-shot classification endpoint.
One of the following fields is required:
Input Inputs
type ZeroShotResponse ¶
type ZeroShotResponse struct { // The string sent as an input Sequence string `json:"sequence,omitempty"` // The list of labels sent in the request, sorted in descending order // by probability that the input corresponds to the to the label. Labels []string `json:"labels,omitempty"` // a list of floats that correspond the the probability of label, in the same order as labels. Scores []float64 `json:"scores,omitempty"` }
Response structure from the Zero-shot classification endpoint.
func SendZeroShotRequest ¶
func SendZeroShotRequest(model string, request *ZeroShotRequest) ([]*ZeroShotResponse, error)
Source Files ¶
- audio_classification.go
- conversational.go
- fill_mask.go
- globals.go
- object_detection.go
- options.go
- question_answering.go
- sentence_similarity.go
- speech_recognition.go
- summarization.go
- table_question_answering.go
- text_classification.go
- text_generation.go
- text_to_image.go
- token_classification.go
- translation.go
- zeroshot_classification.go