Documentation ¶
Index ¶
- Constants
- Variables
- func AddDoubleEscapesForJSON(s string) string
- func ApplyEditToolToLineBuffer(toolCall *util.ToolCall, lineBuffer *LineBuffer) error
- func ChatCompletionRequestMessagesString(msgs []openai.ChatCompletionMessage) string
- func ColorSchemeToStyles(colorScheme *ColorScheme) *styles
- func CompletionRoutine(request *util.CompletionRequest, client LLM, writer io.Writer, ...)
- func GetSystemInfo() string
- func HasRunningChildren() bool
- func HistoryBlocksToString(blocks []util.HistoryBlock) string
- func HistoryTypeToString(historyType int) string
- func IsCompletionModel(modelName string) bool
- func IsLegacyModel(model string) bool
- func JSONString(input any) string
- func LogChatCompletionRequest(req openai.ChatCompletionRequest)
- func LogCompletionRequest(req openai.CompletionRequest)
- func LogCompletionResponse(resp util.CompletionResponse, id string)
- func NewByteMsg(data []byte) *byteMsg
- func NewDiskPromptLibrary(path string, verbose bool, writer io.Writer) (*prompt.DiskPromptLibrary, error)
- func NumTokensForModel(model string) int
- func NumTokensPerMessageForModel(model string) int
- func ParsePS1(data string, regex *regexp.Regexp, currIcon string) (int, int, string)
- func PrettyJSON(input string) string
- func PrintLoggingBox(box LoggingBox)
- func RequestCancelableAutosuggest(ctx context.Context, delay time.Duration, currCommand string, rawPrompt string, ...)
- func RunShell(ctx context.Context, config *ButterfishConfig) error
- func ShellHistoryBlockToGPTChat(block *util.HistoryBlock) *openai.ChatCompletionMessage
- func ShellHistoryBlocksToGPTChat(systemMsg string, blocks []util.HistoryBlock) []openai.ChatCompletionMessage
- func ShellHistoryTypeToRole(t int) string
- type AutosuggestResult
- type ButterfishConfig
- type ButterfishCtx
- func (this *ButterfishCtx) CalculateEmbeddings(ctx context.Context, content []string) ([][]float32, error)
- func (this *ButterfishCtx) Command(cmd string) error
- func (this *ButterfishCtx) EditLineBuffer(lineBuffer *LineBuffer, prompt string, options *CliCommandConfig) error
- func (this *ButterfishCtx) ErrorPrintf(format string, a ...any)
- func (this *ButterfishCtx) ExecCommand(parsed *kong.Context, options *CliCommandConfig) error
- func (this *ButterfishCtx) ParseCommand(cmd string) (*kong.Context, *CliCommandConfig, error)
- func (this *ButterfishCtx) Printf(format string, a ...any)
- func (this *ButterfishCtx) Prompt(cmd *promptCommand) (*util.CompletionResponse, error)
- func (this *ButterfishCtx) SetPS1(childIn io.Writer)
- func (this *ButterfishCtx) ShellMultiplexer(childIn io.Writer, childOut io.Reader, parentIn io.Reader, parentOut io.Writer)
- func (this *ButterfishCtx) StylePrintf(style lipgloss.Style, format string, a ...any)
- func (this *ButterfishCtx) StyleSprintf(style lipgloss.Style, format string, a ...any) string
- func (this *ButterfishCtx) SummarizeChunks(chunks [][]byte) error
- func (this *ButterfishCtx) SummarizePath(path string, chunkSize, maxChunks int) error
- func (this *ButterfishCtx) SummarizePaths(paths []string, chunkSize, maxChunks int) error
- type CliCommandConfig
- type ColorScheme
- type CommandParams
- type EditToolParameters
- type FinishParams
- type GPT
- func (this *GPT) Completion(request *util.CompletionRequest) (*util.CompletionResponse, error)
- func (this *GPT) CompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
- func (this *GPT) Embeddings(ctx context.Context, input []string, verbose bool) ([][]float32, error)
- func (this *GPT) FullChatCompletion(request *util.CompletionRequest) (*util.CompletionResponse, error)
- func (this *GPT) FullChatCompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
- func (this *GPT) InstructCompletion(request *util.CompletionRequest) (*util.CompletionResponse, error)
- func (this *GPT) InstructCompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
- func (this *GPT) SimpleChatCompletion(request *util.CompletionRequest) (*util.CompletionResponse, error)
- func (this *GPT) SimpleChatCompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
- type HistoryBuffer
- type LLM
- type LineBuffer
- type LoggingBox
- type PromptLibrary
- type ShellBuffer
- func (this *ShellBuffer) Clear() []byte
- func (this *ShellBuffer) ClearLast(colorStr string) []byte
- func (this *ShellBuffer) Cursor() int
- func (this *ShellBuffer) EatAutosuggestRune()
- func (this *ShellBuffer) SetColor(color string)
- func (this *ShellBuffer) SetPromptLength(promptLength int)
- func (this *ShellBuffer) SetTerminalWidth(width int)
- func (this *ShellBuffer) Size() int
- func (this *ShellBuffer) String() string
- func (this *ShellBuffer) Write(data string) []byte
- func (this *ShellBuffer) WriteAutosuggest(autosuggestText string, jumpForward int, colorStr string) []byte
- type ShellColorScheme
- type ShellHistory
- func (this *ShellHistory) AddFunctionCall(name, params string)
- func (this *ShellHistory) Append(historyType int, data string)
- func (this *ShellHistory) AppendFunctionOutput(name, data string)
- func (this *ShellHistory) GetLastNBytes(numBytes int, truncateLength int) []util.HistoryBlock
- func (this *ShellHistory) IterateBlocks(cb func(block *HistoryBuffer) bool)
- func (this *ShellHistory) LogRecentHistory()
- type ShellState
- func (this *ShellState) AssembleChat(prompt, sysMsg, functions string, reserveForAnswer int) (string, []util.HistoryBlock, error)
- func (this *ShellState) ClearAutosuggest(colorStr string)
- func (this *ShellState) Errorf(format string, args ...any)
- func (this *ShellState) FilterChildOut(data string) bool
- func (this *ShellState) GetCursorPosition() (int, int)
- func (this *ShellState) GoalModeChat()
- func (this *ShellState) GoalModeFunction(output *util.CompletionResponse)
- func (this *ShellState) GoalModeFunctionResponse(output string)
- func (this *ShellState) GoalModeStart()
- func (this *ShellState) HandleLocalPrompt() bool
- func (this *ShellState) Mux()
- func (this *ShellState) ParentInput(ctx context.Context, data []byte) []byte
- func (this *ShellState) ParentInputLoop(data []byte)
- func (this *ShellState) ParsePS1(data string) (int, int, string)
- func (this *ShellState) PrintError(err error)
- func (this *ShellState) PrintHelp()
- func (this *ShellState) PrintHistory()
- func (this *ShellState) PrintStatus()
- func (this *ShellState) RealizeAutosuggest(buffer *ShellBuffer, sendToChild bool, colorStr string)
- func (this *ShellState) RefreshAutosuggest(newData []byte, buffer *ShellBuffer, colorStr string)
- func (this *ShellState) RequestAutosuggest(delay time.Duration, command string)
- func (this *ShellState) SendPrompt()
- func (this *ShellState) SendPromptResponse(data string)
- func (this *ShellState) ShowAutosuggest(buffer *ShellBuffer, result *AutosuggestResult, cursorCol int, termWidth int)
- type Tokenization
- type UserInputParams
Constants ¶
const BOX_WIDTH = 80
const BestCompletionModel = "gpt-3.5-turbo"
const CLEAR_COLOR = "\x1b[0m"
const EMOJI_DEFAULT = "🐠"
const EMOJI_GOAL = "🟦"
const EMOJI_GOAL_UNSAFE = "⚡"
const ERR_429 = "429:insufficient_quota"
const ERR_429_HELP = "" /* 431-byte string literal not displayed */
const ESC_CLEAR = "\x1b[0K"
const ESC_CUP = "\x1b[6n" // Request the cursor position
const ESC_LEFT = "\x1b[%dD"
const ESC_RIGHT = "\x1b[%dC"
const ESC_UP = "\x1b[%dA"
const GPTEmbeddingsMaxTokens = 8192
const GPTEmbeddingsModel = openai.AdaEmbeddingV2
const H_LINE = "─"
const NE_CORNER = "╮"
const NW_CORNER = "╭"
Box drawing characters with curved corners
const PROMPT_PREFIX = "\033Q"
Special characters that we wrap the shell's command prompt in (PS1) so that we can detect where it starts and ends.
const PROMPT_PREFIX_ESCAPED = "\\033Q"
const PROMPT_SUFFIX = "\033R"
const PROMPT_SUFFIX_ESCAPED = "\\033R"
const SE_CORNER = "╯"
const SW_CORNER = "╰"
const V_LINE = "│"
Variables ¶
var BOX_COLORS = []string{
"\033[38;2;119;221;221m",
"\033[38;2;253;122;72m",
"\033[38;2;253;253;150m",
"\033[38;2;119;221;119m",
"\033[38;2;174;198;207m",
"\033[38;2;119;158;203m",
"\033[38;2;177;156;217m",
"\033[38;2;255;177;209m",
}
var DarkShellColorScheme = &ShellColorScheme{
Prompt: "\x1b[38;5;154m",
PromptGoal: "\x1b[38;5;200m",
PromptGoalUnsafe: "\x1b[38;5;9m",
Command: "\x1b[0m",
Autosuggest: "\x1b[38;5;241m",
Answer: "\x1b[38;5;178m",
GoalMode: "\x1b[38;5;51m",
Error: "\x1b[38;5;196m",
}
var EditSysMsg = `` /* 413-byte string literal not displayed */
var EditTools = []util.ToolDefinition{ { Type: "function", Function: util.FunctionDefinition{ Name: "edit", Description: "Edit a range of lines in a file. The range start is inclusive, the end is exclusive, so values of 5 and 5 would mean that new text is inserted on line 5. Values of 5 and 6 mean that line 5 would be replaced.", Parameters: jsonschema.Definition{ Type: jsonschema.Object, Properties: map[string]jsonschema.Definition{ "range_start": { Type: jsonschema.Number, Description: "The start of the line range, inclusive", }, "range_end": { Type: jsonschema.Number, Description: "The end of the line range, exclusive", }, "code_edit": { Type: jsonschema.String, Description: "The code to replace the range with", }, }, Required: []string{"range_start", "range_end", "code_edit"}, }, }, }, }
var GruvboxDark = ColorScheme{
Foreground: "#ebdbb2",
Background: "#282828",
Error: "#fb4934",
Color1: "#b8bb26",
Color2: "#fabd2f",
Color3: "#83a598",
Color4: "#d3869b",
Color5: "#8ec07c",
Color6: "#fe8019",
Grey: "#928374",
}
Gruvbox Colorscheme from https://github.com/morhetz/gruvbox
var GruvboxLight = ColorScheme{
Foreground: "#7C6F64",
Background: "#FBF1C7",
Error: "#CC241D",
Color1: "#98971A",
Color2: "#D79921",
Color3: "#458588",
Color4: "#B16286",
Color5: "#689D6A",
Color6: "#D65D0E",
Grey: "#928374",
}
var LegacyModelTypes = []string{ openai.GPT3TextAda001, openai.GPT3TextBabbage001, openai.GPT3TextCurie001, openai.GPT3TextDavinci001, openai.GPT3TextDavinci002, openai.GPT3TextDavinci003, }
var LightShellColorScheme = &ShellColorScheme{
Prompt: "\x1b[38;5;28m",
PromptGoal: "\x1b[38;5;200m",
PromptGoalUnsafe: "\x1b[38;5;9m",
Command: "\x1b[0m",
Autosuggest: "\x1b[38;5;241m",
Answer: "\x1b[38;5;178m",
GoalMode: "\x1b[38;5;18m",
Error: "\x1b[38;5;196m",
}
var MODEL_TO_NUM_TOKENS = map[string]int{
"gpt-4": 8192,
"gpt-4-1106": 128000,
"gpt-4-vision": 128000,
"gpt-4-0314": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768,
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-1106": 16384,
"gpt-3.5-turbo-16k": 16384,
"gpt-3.5-turbo-16k-0613": 16384,
"gpt-3.5-turbo-instruct": 4096,
"gpt-3.5-turbo-instruct-0913": 4096,
"text-davinci-003": 2047,
"text-davinci-002": 2047,
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"text-curie-001": 2049,
"text-babbage-001": 2049,
"text-ada-001": 2049,
"davinci": 2049,
"curie": 2049,
"babbage": 2049,
"ada": 2049,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
See https://platform.openai.com/docs/models/overview
var MODEL_TO_TOKENS_PER_MESSAGE = map[string]int{
"gpt-4": 3,
"gpt-4-0314": 3,
"gpt-4-0613": 3,
"gpt-4-32k": 3,
"gpt-4-32k-0314": 3,
"gpt-4-32k-0613": 3,
"gpt-3.5-turbo": 4,
"gpt-3.5-turbo-0301": 4,
"gpt-3.5-turbo-1613": 4,
"gpt-3.5-turbo-1106": 4,
"gpt-3.5-turbo-16k": 4,
"gpt-3.5-turbo-16k-0613": 4,
}
these token numbers come from https://github.com/pkoukk/tiktoken-go#counting-tokens-for-chat-api-calls
var ZSH_CLEAR_REGEX = regexp.MustCompile("^\x1b\\[1m\x1b\\[3m%\x1b\\[23m\x1b\\[1m\x1b\\[0m\x20+\x0d\x20\x0d")
zsh appears to use this sequence to clear formatting and the rest of the line before printing a prompt
Functions ¶
func AddDoubleEscapesForJSON ¶ added in v0.1.6
We're asking GPT to generate bash commands, which can use some escapes like \' which aren't valid JSON but are valid bash. This function identifies those and adds an extra escape so that the JSON is valid.
func ApplyEditToolToLineBuffer ¶ added in v0.2.5
func ApplyEditToolToLineBuffer(toolCall *util.ToolCall, lineBuffer *LineBuffer) error
func ChatCompletionRequestMessagesString ¶ added in v0.0.20
func ChatCompletionRequestMessagesString(msgs []openai.ChatCompletionMessage) string
func ColorSchemeToStyles ¶
func ColorSchemeToStyles(colorScheme *ColorScheme) *styles
func CompletionRoutine ¶ added in v0.1.0
func CompletionRoutine(request *util.CompletionRequest, client LLM, writer io.Writer, outputChan chan *util.CompletionResponse, normalColor, errorColor string)
func GetSystemInfo ¶ added in v0.1.8
func GetSystemInfo() string
func HasRunningChildren ¶ added in v0.0.20
func HasRunningChildren() bool
func HistoryBlocksToString ¶ added in v0.0.20
func HistoryBlocksToString(blocks []util.HistoryBlock) string
func HistoryTypeToString ¶ added in v0.1.0
Turn history type enum to a string
func IsCompletionModel ¶ added in v0.1.10
If the model is legacy or ends with -instruct then it should use completion api, otherwise it should use the chat api.
func IsLegacyModel ¶ added in v0.1.0
func JSONString ¶ added in v0.2.5
func LogChatCompletionRequest ¶ added in v0.1.5
func LogChatCompletionRequest(req openai.ChatCompletionRequest)
func LogCompletionRequest ¶ added in v0.1.5
func LogCompletionRequest(req openai.CompletionRequest)
func LogCompletionResponse ¶ added in v0.1.5
func LogCompletionResponse(resp util.CompletionResponse, id string)
func NewByteMsg ¶
func NewByteMsg(data []byte) *byteMsg
func NewDiskPromptLibrary ¶
func NewDiskPromptLibrary(path string, verbose bool, writer io.Writer) (*prompt.DiskPromptLibrary, error)
Let's initialize our prompts. If we have a prompt library file, we'll load it. Either way, we'll then add the default prompts to the library, replacing loaded prompts only if OkToReplace is set on them. Then we save the library at the same path.
func NumTokensForModel ¶ added in v0.1.0
func NumTokensPerMessageForModel ¶ added in v0.1.0
func ParsePS1 ¶ added in v0.1.0
Given a string of terminal output, identify terminal prompts based on the custom PS1 escape sequences we set. Returns:
- The last exit code/status seen in the string (i.e. will be non-zero if previous command failed.
- The number of prompts identified in the string.
- The string with the special prompt escape sequences removed.
func PrettyJSON ¶ added in v0.2.5
If input can be parsed to JSON, return a nicely formatted and indented version of it, otherwise return the original string
func PrintLoggingBox ¶ added in v0.1.5
func PrintLoggingBox(box LoggingBox)
Given a loggingbox and a writer, write boxes with lines and width 80. The boxes can be nested, and the title will be placed in the top line of the box.
func RequestCancelableAutosuggest ¶ added in v0.0.25
func RequestCancelableAutosuggest( ctx context.Context, delay time.Duration, currCommand string, rawPrompt string, llmClient LLM, model string, verbose bool, history *ShellHistory, maxHistoryBlockTokens int, autosuggestChan chan<- *AutosuggestResult)
This is a function rather than a routine to isolate the concurrent steps taken when this is a goroutine. That's why it has so many args.
func ShellHistoryBlockToGPTChat ¶ added in v0.0.20
func ShellHistoryBlockToGPTChat(block *util.HistoryBlock) *openai.ChatCompletionMessage
func ShellHistoryBlocksToGPTChat ¶ added in v0.1.6
func ShellHistoryBlocksToGPTChat(systemMsg string, blocks []util.HistoryBlock) []openai.ChatCompletionMessage
func ShellHistoryTypeToRole ¶ added in v0.1.6
Types ¶
type AutosuggestResult ¶ added in v0.0.20
type ButterfishConfig ¶
type ButterfishConfig struct { // Verbose mode, prints out more information like raw OpenAI communication. // 0 = no verbose output // 1 = verbose output // 2 = very verbose output Verbose int // build variables BuildInfo string // OpenAI private token, should start with "sk-". // Found at https://platform.openai.com/account/api-keys OpenAIToken string BaseURL string // LLM API communication client that implements the LLM interface LLMClient LLM // Color scheme to use for the shell, see GruvboxDark below ColorScheme *ColorScheme // A list of context-specific styles drawn from the colorscheme // These are what should actually be used during rendering Styles *styles // Path of yaml file from which to load LLM prompts // Defaults to ~/.config/butterfish/prompts.yaml PromptLibraryPath string // The instantiated prompt library used when interpolating prompts before // calling the LLM PromptLibrary PromptLibrary // Shell mode configuration ShellMode bool ShellPluginMode bool ShellColorDark bool ShellBinary string // path to the shell binary to use, e.g. /bin/zsh ShellPromptModel string // used when the user enters an explicit prompt ShellLeavePromptAlone bool // don't try to edit the shell prompt ShellAutosuggestEnabled bool // whether to use autosuggest ShellAutosuggestModel string // used when we're autocompleting a command // how long to wait between when the user stos typing and we ask for an // autosuggest ShellAutosuggestTimeout time.Duration // timeout specifically for a fresh prompt suggestion ShellNewlineAutosuggestTimeout time.Duration // Maximum tokens that a single history line-item can consume ShellMaxHistoryBlockTokens int // Model, temp, and max tokens to use when executing the `gencmd` command GencmdModel string GencmdTemperature float32 GencmdMaxTokens int // Model, temp, and max tokens to use when executing the `exec` command ExeccheckModel string ExeccheckTemperature float32 ExeccheckMaxTokens int // Model, temp, and max tokens to use when executing the `summarize` command SummarizeModel string SummarizeTemperature float32 SummarizeMaxTokens int }
func MakeButterfishConfig ¶
func MakeButterfishConfig() *ButterfishConfig
func (*ButterfishConfig) ParseShell ¶ added in v0.1.0
func (this *ButterfishConfig) ParseShell() string
type ButterfishCtx ¶
type ButterfishCtx struct { // global context, should be passed through to other calls Ctx context.Context // cancel function for the global context Cancel context.CancelFunc // output writer Out io.Writer // configuration Config *ButterfishConfig // true if we're running in console mode InConsoleMode bool // library of prompts PromptLibrary PromptLibrary // GPT client LLMClient LLM // landing space for generated commands CommandRegister string // embedding index for searching local files VectorIndex embedding.FileEmbeddingIndex }
func NewButterfish ¶
func NewButterfish(ctx context.Context, config *ButterfishConfig) (*ButterfishCtx, error)
func (*ButterfishCtx) CalculateEmbeddings ¶
func (*ButterfishCtx) Command ¶
func (this *ButterfishCtx) Command(cmd string) error
Parse and execute a command in a butterfish context
func (*ButterfishCtx) EditLineBuffer ¶ added in v0.2.5
func (this *ButterfishCtx) EditLineBuffer(lineBuffer *LineBuffer, prompt string, options *CliCommandConfig) error
func (*ButterfishCtx) ErrorPrintf ¶
func (this *ButterfishCtx) ErrorPrintf(format string, a ...any)
func (*ButterfishCtx) ExecCommand ¶
func (this *ButterfishCtx) ExecCommand(parsed *kong.Context, options *CliCommandConfig) error
A function to handle a cmd string when received from consoleCommand channel
func (*ButterfishCtx) ParseCommand ¶
func (this *ButterfishCtx) ParseCommand(cmd string) (*kong.Context, *CliCommandConfig, error)
func (*ButterfishCtx) Printf ¶
func (this *ButterfishCtx) Printf(format string, a ...any)
func (*ButterfishCtx) Prompt ¶
func (this *ButterfishCtx) Prompt(cmd *promptCommand) (*util.CompletionResponse, error)
func (*ButterfishCtx) SetPS1 ¶ added in v0.1.0
func (this *ButterfishCtx) SetPS1(childIn io.Writer)
This sets the PS1 shell variable, which is the prompt that the shell displays before each command. We need to be able to parse the child shell's prompt to determine where it starts, ends, exit code, and allow customization to show the user that we're inside butterfish shell. The PS1 is roughly the following: PS1 := promptPrefix $PS1 ShellCommandPrompt $? promptSuffix
func (*ButterfishCtx) ShellMultiplexer ¶ added in v0.0.20
func (*ButterfishCtx) StylePrintf ¶
func (this *ButterfishCtx) StylePrintf(style lipgloss.Style, format string, a ...any)
A local printf that writes to the butterfishctx out using a lipgloss style
func (*ButterfishCtx) StyleSprintf ¶ added in v0.0.18
func (*ButterfishCtx) SummarizeChunks ¶
func (this *ButterfishCtx) SummarizeChunks(chunks [][]byte) error
func (*ButterfishCtx) SummarizePath ¶
func (this *ButterfishCtx) SummarizePath(path string, chunkSize, maxChunks int) error
From OpenAI documentation: Tokens can be words or just chunks of characters. For example, the word “hamburger” gets broken up into the tokens “ham”, “bur” and “ger”, while a short and common word like “pear” is a single token. Many tokens start with a whitespace, for example “ hello” and “ bye”. The number of tokens processed in a given API request depends on the length of both your inputs and outputs. As a rough rule of thumb, 1 token is approximately 4 characters or 0.75 words for English text.
func (*ButterfishCtx) SummarizePaths ¶
func (this *ButterfishCtx) SummarizePaths(paths []string, chunkSize, maxChunks int) error
Iterate through a list of file paths and summarize each
type CliCommandConfig ¶
type CliCommandConfig struct { Prompt struct { Prompt []string `arg:"" help:"LLM model prompt, e.g. 'what is the unix shell?'" optional:""` SystemMessage string `short:"s" default:"" help:"System message to send to model as instructions, e.g. 'respond succinctly'."` Model string `short:"m" default:"gpt-3.5-turbo-1106" help:"LLM to use for the prompt."` NumTokens int `short:"n" default:"1024" help:"Maximum number of tokens to generate."` Temperature float32 `` /* 146-byte string literal not displayed */ Functions string `short:"f" default:"" help:"Path to json file with functions to use for prompt."` NoColor bool `default:"false" help:"Disable color output."` NoBackticks bool `default:"false" help:"Strip out backticks around codeblocks."` } `` /* 381-byte string literal not displayed */ Promptedit struct { File string `short:"f" default:"~/.config/butterfish/prompt.txt" help:"Cached prompt file to use." optional:""` Editor string `short:"e" default:"" help:"Editor to use for the prompt."` Model string `short:"m" default:"gpt-3.5-turbo-1106" help:"GPT model to use for the prompt."` NumTokens int `short:"n" default:"1024" help:"Maximum number of tokens to generate."` Temperature float32 `` /* 146-byte string literal not displayed */ } `` /* 175-byte string literal not displayed */ Edit struct { Filepath string `arg:"" help:"Path to file, will be edited in-place."` Prompt string `arg:"" help:"LLM model prompt, e.g. 'Plan an edit'"` Model string `short:"m" default:"gpt-3.5-turbo-1106" help:"LLM to use for the prompt."` NumTokens int `short:"n" default:"1024" help:"Maximum number of tokens to generate."` Temperature float32 `` /* 146-byte string literal not displayed */ InPlace bool `short:"i" default:"false" help:"Edit the file in-place, otherwise we write to stdout."` NoColor bool `default:"false" help:"Disable color output."` NoBackticks bool `default:"false" help:"Strip out backticks around codeblocks."` } `cmd:"" help:"Edit a file by using a line range editing tool."` Summarize struct { Files []string `arg:"" help:"File paths to summarize." optional:""` ChunkSize int `short:"c" default:"3600" help:"Number of bytes to summarize at a time if the file must be split up."` MaxChunks int `short:"C" default:"8" help:"Maximum number of chunks to summarize from a specific file."` } `` /* 329-byte string literal not displayed */ Gencmd struct { Prompt []string `arg:"" help:"Prompt describing the desired shell command."` Force bool `short:"f" default:"false" help:"Execute the command without prompting."` } `` /* 192-byte string literal not displayed */ Exec struct { Command []string `arg:"" help:"Command to execute." optional:""` } `` /* 160-byte string literal not displayed */ Index struct { Paths []string `arg:"" help:"Paths to index." optional:""` Force bool `short:"f" default:"false" help:"Force re-indexing of files rather than skipping cached embeddings."` ChunkSize int `short:"c" default:"512" help:"Number of bytes to embed at a time when the file is split up."` MaxChunks int `short:"C" default:"256" help:"Maximum number of chunks to embed from a specific file."` } `` /* 380-byte string literal not displayed */ Clearindex struct { Paths []string `arg:"" help:"Paths to clear from the index." optional:""` } `` /* 219-byte string literal not displayed */ Loadindex struct { Paths []string `arg:"" help:"Paths to load into the index." optional:""` } `` /* 225-byte string literal not displayed */ Showindex struct { Paths []string `arg:"" help:"Paths to show from the index." optional:""` } `` /* 128-byte string literal not displayed */ Indexsearch struct { Query string `arg:"" help:"Query to search for."` Results int `short:"r" default:"5" help:"Number of results to return."` } `` /* 247-byte string literal not displayed */ Indexquestion struct { Question string `arg:"" help:"Question to ask."` Model string `short:"m" default:"gpt-3.5-turbo-1106" help:"GPT model to use for the prompt."` NumTokens int `short:"n" default:"1024" help:"Maximum number of tokens to generate."` Temperature float32 `short:"T" default:"0.7" help:"Temperature to use for the prompt."` } `` /* 194-byte string literal not displayed */ }
Kong CLI parser option configuration
type ColorScheme ¶
type CommandParams ¶ added in v0.1.4
type CommandParams struct {
Cmd string `json:"cmd"`
}
type EditToolParameters ¶ added in v0.2.5
type FinishParams ¶ added in v0.1.4
type FinishParams struct {
Success bool `json:"success"`
}
type GPT ¶
type GPT struct {
// contains filtered or unexported fields
}
func (*GPT) Completion ¶
func (this *GPT) Completion(request *util.CompletionRequest) (*util.CompletionResponse, error)
We're doing completions through the chat API by default, this routes to the legacy completion API if the model is the legacy model.
func (*GPT) CompletionStream ¶
func (this *GPT) CompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
We're doing completions through the chat API by default, this routes to the legacy completion API if the model is the legacy model.
func (*GPT) Embeddings ¶
func (*GPT) FullChatCompletion ¶ added in v0.0.20
func (this *GPT) FullChatCompletion(request *util.CompletionRequest) (*util.CompletionResponse, error)
func (*GPT) FullChatCompletionStream ¶ added in v0.0.20
func (this *GPT) FullChatCompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
func (*GPT) InstructCompletion ¶ added in v0.1.10
func (this *GPT) InstructCompletion(request *util.CompletionRequest) (*util.CompletionResponse, error)
Run a GPT completion request and return the response
func (*GPT) InstructCompletionStream ¶ added in v0.1.10
func (this *GPT) InstructCompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
func (*GPT) SimpleChatCompletion ¶ added in v0.0.19
func (this *GPT) SimpleChatCompletion(request *util.CompletionRequest) (*util.CompletionResponse, error)
func (*GPT) SimpleChatCompletionStream ¶ added in v0.0.19
func (this *GPT) SimpleChatCompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error)
type HistoryBuffer ¶ added in v0.0.20
type HistoryBuffer struct { Type int Content *ShellBuffer FunctionName string FunctionParams string // This is to cache tokenization plus truncation of the content // It maps from encoding name to the tokenization of the output Tokenizations map[string]Tokenization }
HistoryBuffer keeps a content buffer, plus an enum of the type of content (user prompt, shell output, etc), plus a cache of tokenizations of the content. Tokenizations are cached for specific encodings, for example newer models use a different encoding than older models.
func (*HistoryBuffer) GetTokenization ¶ added in v0.1.0
func (*HistoryBuffer) SetTokenization ¶ added in v0.1.0
func (this *HistoryBuffer) SetTokenization(encoding string, inputLength int, numTokens int, data string)
type LLM ¶
type LLM interface { CompletionStream(request *util.CompletionRequest, writer io.Writer) (*util.CompletionResponse, error) Completion(request *util.CompletionRequest) (*util.CompletionResponse, error) Embeddings(ctx context.Context, input []string, verbose bool) ([][]float32, error) }
A generic interface for a service that calls a large larguage model based on input prompts.
type LineBuffer ¶ added in v0.2.5
type LineBuffer struct {
Lines []string
}
Manage a buffer of lines, we want to be able to replace a range of lines
func NewLineBuffer ¶ added in v0.2.5
func NewLineBuffer(filepath string) (*LineBuffer, error)
func (*LineBuffer) PrefixLineNumbers ¶ added in v0.2.5
func (this *LineBuffer) PrefixLineNumbers() string
func (*LineBuffer) ReplaceRange ¶ added in v0.2.5
func (this *LineBuffer) ReplaceRange(start, end int, replacement string) error
Replace and insert lines in a buffer Start is inclusive, end is exclusive Lines are 1-indexed Thus if start == end then we insert at the start of the line
func (*LineBuffer) String ¶ added in v0.2.5
func (this *LineBuffer) String() string
type LoggingBox ¶ added in v0.1.5
type LoggingBox struct { Title string Content string Children []LoggingBox Color int }
type PromptLibrary ¶
type PromptLibrary interface { // Get a prompt by name. The arguments are passed in a pattern of key, value. // For example, if the prompt is "Hello, {name}", then you would call // GetPrompt("greeting", "name", "Peter") and "Hello, Peter" would be // returned. If a variable is not found, or an argument is passed that doesn't // have a corresponding variable, an error is returned. GetPrompt(name string, args ...string) (string, error) GetUninterpolatedPrompt(name string) (string, error) InterpolatePrompt(prompt string, args ...string) (string, error) }
Interface for a library that accepts a prompt and interpolates variables within the prompt
type ShellBuffer ¶ added in v0.0.20
type ShellBuffer struct {
// contains filtered or unexported fields
}
This holds a buffer that represents a tty shell buffer. Incoming data manipulates the buffer, for example the left arrow will move the cursor left, a backspace would erase the end of the buffer.
func NewShellBuffer ¶ added in v0.0.20
func NewShellBuffer() *ShellBuffer
func (*ShellBuffer) Clear ¶ added in v0.0.20
func (this *ShellBuffer) Clear() []byte
func (*ShellBuffer) ClearLast ¶ added in v0.0.20
func (this *ShellBuffer) ClearLast(colorStr string) []byte
func (*ShellBuffer) Cursor ¶ added in v0.0.20
func (this *ShellBuffer) Cursor() int
func (*ShellBuffer) EatAutosuggestRune ¶ added in v0.0.20
func (this *ShellBuffer) EatAutosuggestRune()
func (*ShellBuffer) SetColor ¶ added in v0.0.20
func (this *ShellBuffer) SetColor(color string)
func (*ShellBuffer) SetPromptLength ¶ added in v0.0.20
func (this *ShellBuffer) SetPromptLength(promptLength int)
func (*ShellBuffer) SetTerminalWidth ¶ added in v0.0.20
func (this *ShellBuffer) SetTerminalWidth(width int)
func (*ShellBuffer) Size ¶ added in v0.0.20
func (this *ShellBuffer) Size() int
func (*ShellBuffer) String ¶ added in v0.0.20
func (this *ShellBuffer) String() string
func (*ShellBuffer) Write ¶ added in v0.0.20
func (this *ShellBuffer) Write(data string) []byte
func (*ShellBuffer) WriteAutosuggest ¶ added in v0.0.20
func (this *ShellBuffer) WriteAutosuggest(autosuggestText string, jumpForward int, colorStr string) []byte
>>> command : ^ cursor autosuggest: " foobar" jumpForward: 0
>>> command : ^ cursor autosuggest: " foobar" jumpForward: 2
type ShellColorScheme ¶ added in v0.1.0
type ShellHistory ¶ added in v0.0.20
type ShellHistory struct { Blocks []*HistoryBuffer // contains filtered or unexported fields }
ShellHistory keeps a record of past shell history and LLM interaction in a slice of HistoryBuffer objects. You can add a new block, append to the last block, and get the the last n bytes of the history as an array of HistoryBlocks.
func NewShellHistory ¶ added in v0.0.20
func NewShellHistory() *ShellHistory
func (*ShellHistory) AddFunctionCall ¶ added in v0.1.6
func (this *ShellHistory) AddFunctionCall(name, params string)
func (*ShellHistory) Append ¶ added in v0.0.20
func (this *ShellHistory) Append(historyType int, data string)
func (*ShellHistory) AppendFunctionOutput ¶ added in v0.1.6
func (this *ShellHistory) AppendFunctionOutput(name, data string)
func (*ShellHistory) GetLastNBytes ¶ added in v0.0.20
func (this *ShellHistory) GetLastNBytes(numBytes int, truncateLength int) []util.HistoryBlock
Go back in history for a certain number of bytes.
func (*ShellHistory) IterateBlocks ¶ added in v0.1.0
func (this *ShellHistory) IterateBlocks(cb func(block *HistoryBuffer) bool)
func (*ShellHistory) LogRecentHistory ¶ added in v0.0.20
func (this *ShellHistory) LogRecentHistory()
This is not thread safe
type ShellState ¶ added in v0.0.20
type ShellState struct { Butterfish *ButterfishCtx ParentOut io.Writer ChildIn io.Writer Sigwinch chan os.Signal // set based on model PromptMaxTokens int AutosuggestMaxTokens int // The current state of the shell State int GoalMode bool GoalModeBuffer string GoalModeGoal string GoalModeUnsafe bool ActiveFunction string PromptSuffixCounter int ChildOutReader chan *byteMsg ParentInReader chan *byteMsg CursorPosChan chan *cursorPosition PromptOutputChan chan *util.CompletionResponse PrintErrorChan chan error AutosuggestChan chan *AutosuggestResult History *ShellHistory PromptAnswerWriter io.Writer StyleWriter *util.StyleCodeblocksWriter Prompt *ShellBuffer PromptResponseCancel context.CancelFunc Command *ShellBuffer TerminalWidth int Color *ShellColorScheme LastTabPassthrough time.Time // these are used to estimate number of tokens AutosuggestEncoder *tiktoken.Tiktoken PromptEncoder *tiktoken.Tiktoken // autosuggest config AutosuggestEnabled bool LastAutosuggest string AutosuggestCtx context.Context AutosuggestCancel context.CancelFunc AutosuggestBuffer *ShellBuffer // contains filtered or unexported fields }
func (*ShellState) AssembleChat ¶ added in v0.1.0
func (this *ShellState) AssembleChat(prompt, sysMsg, functions string, reserveForAnswer int) (string, []util.HistoryBlock, error)
Prepare to call assembleChat() based on the ShellState variables for calculating token limits.
func (*ShellState) ClearAutosuggest ¶ added in v0.0.20
func (this *ShellState) ClearAutosuggest(colorStr string)
func (*ShellState) Errorf ¶ added in v0.1.0
func (this *ShellState) Errorf(format string, args ...any)
func (*ShellState) FilterChildOut ¶ added in v0.1.6
func (this *ShellState) FilterChildOut(data string) bool
func (*ShellState) GetCursorPosition ¶ added in v0.1.0
func (this *ShellState) GetCursorPosition() (int, int)
func (*ShellState) GoalModeChat ¶ added in v0.1.0
func (this *ShellState) GoalModeChat()
func (*ShellState) GoalModeFunction ¶ added in v0.2.2
func (this *ShellState) GoalModeFunction(output *util.CompletionResponse)
func (*ShellState) GoalModeFunctionResponse ¶ added in v0.1.8
func (this *ShellState) GoalModeFunctionResponse(output string)
func (*ShellState) GoalModeStart ¶ added in v0.1.0
func (this *ShellState) GoalModeStart()
func (*ShellState) HandleLocalPrompt ¶ added in v0.1.0
func (this *ShellState) HandleLocalPrompt() bool
func (*ShellState) Mux ¶ added in v0.0.20
func (this *ShellState) Mux()
TODO add a diagram of streams here
func (*ShellState) ParentInput ¶ added in v0.2.4
func (this *ShellState) ParentInput(ctx context.Context, data []byte) []byte
func (*ShellState) ParentInputLoop ¶ added in v0.2.4
func (this *ShellState) ParentInputLoop(data []byte)
func (*ShellState) ParsePS1 ¶ added in v0.1.0
func (this *ShellState) ParsePS1(data string) (int, int, string)
func (*ShellState) PrintError ¶ added in v0.1.0
func (this *ShellState) PrintError(err error)
func (*ShellState) PrintHelp ¶ added in v0.0.30
func (this *ShellState) PrintHelp()
func (*ShellState) PrintHistory ¶ added in v0.1.0
func (this *ShellState) PrintHistory()
func (*ShellState) PrintStatus ¶ added in v0.0.30
func (this *ShellState) PrintStatus()
func (*ShellState) RealizeAutosuggest ¶ added in v0.0.20
func (this *ShellState) RealizeAutosuggest(buffer *ShellBuffer, sendToChild bool, colorStr string)
When the user presses tab or a similar hotkey, we want to turn the autosuggest into a real command
func (*ShellState) RefreshAutosuggest ¶ added in v0.0.20
func (this *ShellState) RefreshAutosuggest( newData []byte, buffer *ShellBuffer, colorStr string)
Update autosuggest when we receive new data. Clears the old autosuggest if necessary and requests a new one. If the new next matches the old autosuggest prefix then we leave it.
func (*ShellState) RequestAutosuggest ¶ added in v0.0.20
func (this *ShellState) RequestAutosuggest(delay time.Duration, command string)
rewrite this for autosuggest
func (*ShellState) SendPrompt ¶ added in v0.0.20
func (this *ShellState) SendPrompt()
func (*ShellState) SendPromptResponse ¶ added in v0.1.0
func (this *ShellState) SendPromptResponse(data string)
We want to queue up the prompt response, which does the processing (except for actually printing it). The processing like adding to history or executing the next step in goal mode. We have to do this in a goroutine because otherwise we would block the main thread.
func (*ShellState) ShowAutosuggest ¶ added in v0.0.20
func (this *ShellState) ShowAutosuggest( buffer *ShellBuffer, result *AutosuggestResult, cursorCol int, termWidth int)
We have a pending autosuggest and we've just received the cursor location from the terminal. We can now render the autosuggest (in the greyed out style)
type Tokenization ¶ added in v0.1.0
type UserInputParams ¶ added in v0.1.4
type UserInputParams struct {
Question string `json:"question"`
}