Documentation ¶
Index ¶
- Constants
- Variables
- func Copy(c *completionContext) *completionContext
- func NewPartialMatch(name, kind, detail string) autocomplete.Match
- func NewPromQLCompleter(index autocomplete.QueryIndex) autocomplete.PromQLCompleter
- type CompletionContext
- type ContextualToken
- type Earley
- type EarleyChart
- type EarleyItem
- type EarleyNode
- type Grammar
- type GrammarRule
- type ItemId
- type NonTerminalNode
- type StateSet
- type StateType
- type Symbol
- type TokenType
- type Tokens
- type Tokhan
- type TypedToken
Constants ¶
const Cursor = "\u25EC"
const (
PromQLTokenSeparators = " []{}()+-*/%^=!><~,"
)
Variables ¶
var ( // non-terminals Root = NewNonTerminal("root", true) Expression = NewNonTerminal("expression", false) AggrExpression = NewNonTerminal("aggr-expression", false) SubqueryExpression = NewNonTerminal("subquery-expression", false) UnaryExpression = NewNonTerminal("unary-expression", false) // bianry expressions ScalarBinaryExpression = NewNonTerminal("scalar-binary-expression", false) VectorBinaryExpression = NewNonTerminal("vector-binary-expression", false) // function expression VectorFuncExpression = NewNonTerminal("vector-function-expression", false) ScalarFuncExpression = NewNonTerminal("scalar-function-expression", false) // metrics expression MatrixSelector = NewNonTerminal("matrix-selector", false) VectorSelector = NewNonTerminal("vector-selector", false) // Expression types ScalarTypeExpression = NewNonTerminal("scalar-type-expression", false) VectorTypeExpression = NewNonTerminal("vector-type-expression", false) MatrixTypeExpression = NewNonTerminal("matrix-type-expression", false) LabelsExpression = NewNonTerminal("labels-expression", false) LabelsMatchExpression = NewNonTerminal("labels-match-expression", false) LabelValueExpression = NewNonTerminal("label-value-expression", false) AggrCallExpression = NewNonTerminal("aggr-call-expression", false) MetricLabelArgs = NewNonTerminal("label-args", false) OffsetModifier = NewNonTerminal("offset-modifier", false) FunctionCallBody = NewNonTerminal("function-call-body", false) FunctionCallArgs = NewNonTerminal("function-call-args", false) // Binary expressions related non-terminals: BinaryOperator = NewNonTerminal("scalar-binary-operator", false) BinaryGroupModifier = NewNonTerminal("binary-group-modifier", false) // terminals Identifier = NewTerminal(ID) // this one is ambiguous MetricIdentifier = NewTerminalWithSubType(ID, METRIC_ID) // this one is ambiguous MetricLabelIdentifier = NewTerminalWithSubType(ID, METRIC_LABEL_SUBTYPE) // this one is ambiguous ScalarFunctionIdentifier = NewTerminal(FUNCTION_SCALAR_ID) VectorFunctionIdentifier = NewTerminal(FUNCTION_VECTOR_ID) AggregatorOp = NewTerminal(AGGR_OP) AggregateKeyword = NewTerminal(AGGR_KW) BoolKeyword = NewTerminalWithSubType(KEYWORD, BOOL_KW) OffsetKeyword = NewTerminalWithSubType(KEYWORD, OFFSET_KW) GroupKeyword = NewTerminal(GROUP_KW) GroupSide = NewTerminal(GROUP_SIDE) Operator = NewTerminal(OPERATOR) Arithmetic = NewTerminal(ARITHMETIC) UnaryOperator = NewTerminalWithSubType(ARITHMETIC, UNARY_OP) SetOperator = NewTerminal(SET) LabelMatchOperator = NewTerminalWithSubType(OPERATOR, LABELMATCH) Comparision = NewTerminalWithSubType(OPERATOR, COMPARISION) LBrace = NewTerminal(LEFT_BRACE) RBrace = NewTerminal(RIGHT_BRACE) LBracket = NewTerminal(LEFT_BRACKET) RBracket = NewTerminal(RIGHT_BRACKET) Comma = NewTerminal(COMMA) Colon = NewTerminal(COLON) LParen = NewTerminal(LEFT_PAREN) RParen = NewTerminal(RIGHT_PAREN) Str = NewTerminal(STRING) Num = NewTerminal(NUM) Duration = NewTerminal(DURATION) Eof = NewTerminal(EOF) PromQLParser = NewEarleyParser(*promQLGrammar) )
Functions ¶
func Copy ¶
func Copy(c *completionContext) *completionContext
Deep copy completionContext and return a pointer
func NewPartialMatch ¶
func NewPartialMatch(name, kind, detail string) autocomplete.Match
func NewPromQLCompleter ¶
func NewPromQLCompleter(index autocomplete.QueryIndex) autocomplete.PromQLCompleter
Types ¶
type CompletionContext ¶
type ContextualToken ¶
type ContextualToken struct { TokenType // contains filtered or unexported fields }
type Earley ¶
type Earley struct {
// contains filtered or unexported fields
}
func NewEarleyParser ¶
func (*Earley) GetSuggestedTokenType ¶
func (p *Earley) GetSuggestedTokenType(tokens Tokens) (types []ContextualToken)
func (*Earley) Parse ¶
Parse parses the full input string. It first tokenizes the input string and then uses those tokens as atomic units in our grammar, which simplifies our parsing logic considerably.
func (*Earley) ParseTokens ¶
ParseTokens parses the full input tokens from beginning
func (*Earley) PartialParse ¶
This is the incremental bit of our parser. We can basically feed in a list of words (i.e. lexed tokens) and parse at a specific word index.
type EarleyChart ¶
type EarleyChart interface { States() []*StateSet GetState(insertionOrderZeroIndexed int) *StateSet String() string }
Earley items are simple data structures meant to contain the following bits:
(1) a rule in the grammar which has so far been valid (2) the position in the rule that denotes how much of that rule has been consumed by our parser (3) a reference to the index in our symbol list that we've consumed (this probably isn't fully necessary, I could imagine that we could restructure the earley parser such that it takes a earleyStateSet representing the past state, and construct the next stateSet with an arbitrary symbol passed in).
noinspection GoNameStartsWithPackageName
type EarleyItem ¶
type EarleyItem struct { Rule *GrammarRule RulePos int // dot position // contains filtered or unexported fields }
EarleyItem represents A SINGLE possible parse path. More abstractly, this represents a potential grammar rule which we can validly apply. It is the basic unit of state set. noinspection GoNameStartsWithPackageName
func (*EarleyItem) DoesTokenTypeMatch ¶
func (item *EarleyItem) DoesTokenTypeMatch(tkn Tokhan) bool
check if is terminal and if is matching
func (*EarleyItem) GetRightSymbolByIndex ¶
func (item *EarleyItem) GetRightSymbolByIndex(i int) Symbol
func (*EarleyItem) GetRightSymbolByRulePos ¶
func (item *EarleyItem) GetRightSymbolByRulePos() Symbol
get the next symbol after the dot
func (*EarleyItem) GetRightSymbolTypeByRulePos ¶
func (item *EarleyItem) GetRightSymbolTypeByRulePos() *TokenType
check if is terminal and if is matching
func (*EarleyItem) String ¶
func (item *EarleyItem) String() string
I like this bit from gearley, so I am leaving it the way it was
type EarleyNode ¶
type EarleyNode interface { String() string // contains filtered or unexported methods }
func NewTerminal ¶
func NewTerminal(name TokenType) EarleyNode
func NewTerminalWithSubType ¶
func NewTerminalWithSubType(name TokenType, subtype TokenType) EarleyNode
type Grammar ¶
type Grammar struct {
// contains filtered or unexported fields
}
func NewGrammar ¶
func NewGrammar(rules ...*GrammarRule) *Grammar
type GrammarRule ¶
type GrammarRule struct {
// contains filtered or unexported fields
}
func NewRule ¶
func NewRule(t NonTerminalNode, symbols ...Symbol) *GrammarRule
func (*GrammarRule) String ¶
func (r *GrammarRule) String() string
type NonTerminalNode ¶
type NonTerminalNode interface { EarleyNode GetName() string // contains filtered or unexported methods }
func NewNonTerminal ¶
func NewNonTerminal(name string, root bool) NonTerminalNode
type StateSet ¶
type StateSet struct {
// contains filtered or unexported fields
}
An Earley StateSet represents all possible rules at a given index in an Earley chart.
func NewStateSet ¶
func NewStateSet() *StateSet
func (*StateSet) GetStates ¶
func (s *StateSet) GetStates() (states []EarleyItem)
type Symbol ¶
type Symbol interface { String() string // contains filtered or unexported methods }
Earley parsers parse symbols in a bottom up manner
type TokenType ¶
type TokenType string
const ( ID TokenType = "identifier" METRIC_ID TokenType = "metric-identifier" METRIC_LABEL_SUBTYPE TokenType = "metric-label-identifier" FUNCTION_SCALAR_ID TokenType = "function-scalar-identifier" FUNCTION_VECTOR_ID TokenType = "function-vector-identifier" OPERATOR TokenType = "operator" //binary operators ARITHMETIC TokenType = "arithmetic" COMPARISION TokenType = "comparision" SET TokenType = "set" //label match operator LABELMATCH TokenType = "label-match" // unary operators UNARY_OP TokenType = "unary-op" AGGR_OP TokenType = "aggregator_operation" //keywords KEYWORD TokenType = "keyword" AGGR_KW TokenType = "aggregator_keyword" BOOL_KW TokenType = "bool-keyword" OFFSET_KW TokenType = "offset-keyword" GROUP_SIDE TokenType = "group-side" GROUP_KW TokenType = "group-keyword" LEFT_BRACE TokenType = "leftbrace" RIGHT_BRACE TokenType = "rightbrace" LEFT_PAREN TokenType = "leftparen" RIGHT_PAREN TokenType = "rightparen" LEFT_BRACKET TokenType = "leftbracket" RIGHT_BRACKET TokenType = "rightbracket" COMMA TokenType = "comma" COLON TokenType = "colon" STRING TokenType = "string" NUM TokenType = "number" DURATION TokenType = "duration" EOF TokenType = "EOF" UNKNOWN TokenType = "unknown" )
type Tokhan ¶
type Tokhan struct { StartPos int EndPos int Type TokenType ItemType parser.ItemType Val string // contains filtered or unexported fields }
Tokhan contains the essential bits of data we need for processing a single lexical unit.
type TypedToken ¶
type TypedToken interface {
GetTokenType() TokenType
}