Documentation ¶
Overview ¶
Package evaluation provides utilities for evaluating and assessing the performance of generative models.
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type COTQAEvalChain ¶
type COTQAEvalChain struct {
*ContextQAEvalChain
}
COTQAEvalChain is a LLM Chain specifically for evaluating QA using chain of thought reasoning.
func NewCOTQAEvalChain ¶
func NewCOTQAEvalChain(llm schema.LLM, optFns ...func(o *COTQAEvalChainOptions)) (*COTQAEvalChain, error)
type COTQAEvalChainOptions ¶
type ContextQAEvalChain ¶
type ContextQAEvalChain struct {
// contains filtered or unexported fields
}
ConetxtQAEvalChain is a LLM Chain specifically for evaluating QA w/o GT based on context.
func NewContextQAEvalChain ¶
func NewContextQAEvalChain(llm schema.LLM, optFns ...func(o *ContextQAEvalChainOptions)) (*ContextQAEvalChain, error)
func (*ContextQAEvalChain) Evaluate ¶
func (eval *ContextQAEvalChain) Evaluate(ctx context.Context, examples, predictions []map[string]string) ([]schema.ChainValues, error)
type QAEvalChain ¶
type QAEvalChain struct {
// contains filtered or unexported fields
}
QAEvalChain is a LLM Chain specifically for evaluating question answering.
func NewQAEvalChain ¶
func NewQAEvalChain(llm schema.LLM, optFns ...func(o *QAEvalChainOptions)) (*QAEvalChain, error)
func (*QAEvalChain) AnswerKey ¶
func (eval *QAEvalChain) AnswerKey() string
func (*QAEvalChain) Evaluate ¶
func (eval *QAEvalChain) Evaluate(ctx context.Context, examples, predictions []map[string]string) ([]schema.ChainValues, error)
func (*QAEvalChain) PredictionKey ¶
func (eval *QAEvalChain) PredictionKey() string
func (*QAEvalChain) QuestionKey ¶
func (eval *QAEvalChain) QuestionKey() string
Click to show internal directories.
Click to hide internal directories.