Documentation ¶
Overview ¶
Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. Use of this file is governed by the BSD 3-clause license that can be found in the LICENSE.txt file in the project root.
Index ¶
- Constants
- Variables
- func EscapeWhitespace(s string, escapeSpaces bool) string
- func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool
- func PredictionModeallSubsetsConflict(altsets []*BitSet) bool
- func PredictionModeallSubsetsEqual(altsets []*BitSet) bool
- func PredictionModegetSingleViableAlt(altsets []*BitSet) int
- func PredictionModegetUniqueAlt(altsets []*BitSet) int
- func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool
- func PredictionModehasConflictingAltSet(altsets []*BitSet) bool
- func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool
- func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool
- func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool
- func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int
- func PrintArrayJavaStyle(sa []string) string
- func TerminalNodeToStringArray(sa []TerminalNode) []string
- func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string
- func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string
- type AND
- type ATN
- type ATNConfig
- type ATNConfigSet
- type ATNConfigSetPair
- type ATNDeserializationOptions
- func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool
- func (opts *ATNDeserializationOptions) ReadOnly() bool
- func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool)
- func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool)
- func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool)
- func (opts *ATNDeserializationOptions) VerifyATN() bool
- type ATNDeserializer
- type ATNState
- type AbstractPredicateTransition
- type ActionTransition
- type AltDict
- type ArrayPredictionContext
- type AtomTransition
- type BailErrorStrategy
- type BaseATNConfig
- func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, ...) *BaseATNConfig
- func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig
- func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig
- func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig
- func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig
- func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, ...) *BaseATNConfig
- func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig
- func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig
- func (b *BaseATNConfig) GetAlt() int
- func (b *BaseATNConfig) GetContext() PredictionContext
- func (b *BaseATNConfig) GetReachesIntoOuterContext() int
- func (b *BaseATNConfig) GetSemanticContext() SemanticContext
- func (b *BaseATNConfig) GetState() ATNState
- func (b *BaseATNConfig) SetContext(v PredictionContext)
- func (b *BaseATNConfig) SetReachesIntoOuterContext(v int)
- func (b *BaseATNConfig) String() string
- type BaseATNConfigSet
- func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool
- func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool
- func (b *BaseATNConfigSet) Alts() *BitSet
- func (b *BaseATNConfigSet) Clear()
- func (b *BaseATNConfigSet) Contains(item ATNConfig) bool
- func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool
- func (b *BaseATNConfigSet) Equals(other interface{}) bool
- func (b *BaseATNConfigSet) FullContext() bool
- func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet
- func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool
- func (b *BaseATNConfigSet) GetItems() []ATNConfig
- func (b *BaseATNConfigSet) GetPredicates() []SemanticContext
- func (b *BaseATNConfigSet) GetStates() Set
- func (b *BaseATNConfigSet) GetUniqueAlt() int
- func (b *BaseATNConfigSet) HasSemanticContext() bool
- func (b *BaseATNConfigSet) IsEmpty() bool
- func (b *BaseATNConfigSet) Length() int
- func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator)
- func (b *BaseATNConfigSet) ReadOnly() bool
- func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet)
- func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool)
- func (b *BaseATNConfigSet) SetHasSemanticContext(v bool)
- func (b *BaseATNConfigSet) SetReadOnly(readOnly bool)
- func (b *BaseATNConfigSet) SetUniqueAlt(v int)
- func (b *BaseATNConfigSet) String() string
- type BaseATNSimulator
- type BaseATNState
- func (as *BaseATNState) AddTransition(trans Transition, index int)
- func (as *BaseATNState) GetATN() *ATN
- func (as *BaseATNState) GetEpsilonOnlyTransitions() bool
- func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet
- func (as *BaseATNState) GetRuleIndex() int
- func (as *BaseATNState) GetStateNumber() int
- func (as *BaseATNState) GetStateType() int
- func (as *BaseATNState) GetTransitions() []Transition
- func (as *BaseATNState) SetATN(atn *ATN)
- func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet)
- func (as *BaseATNState) SetRuleIndex(v int)
- func (as *BaseATNState) SetStateNumber(stateNumber int)
- func (as *BaseATNState) SetTransitions(t []Transition)
- func (as *BaseATNState) String() string
- type BaseAbstractPredicateTransition
- type BaseBlockStartState
- type BaseDecisionState
- type BaseInterpreterRuleContext
- type BaseLexer
- func (b *BaseLexer) Emit() Token
- func (b *BaseLexer) EmitEOF() Token
- func (b *BaseLexer) EmitToken(token Token)
- func (b *BaseLexer) GetATN() *ATN
- func (b *BaseLexer) GetAllTokens() []Token
- func (b *BaseLexer) GetCharIndex() int
- func (b *BaseLexer) GetCharPositionInLine() int
- func (b *BaseLexer) GetInputStream() CharStream
- func (b *BaseLexer) GetInterpreter() ILexerATNSimulator
- func (b *BaseLexer) GetLine() int
- func (b *BaseLexer) GetSourceName() string
- func (b *BaseLexer) GetText() string
- func (b *BaseLexer) GetTokenFactory() TokenFactory
- func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair
- func (b *BaseLexer) GetType() int
- func (b *BaseLexer) More()
- func (b *BaseLexer) NextToken() Token
- func (b *BaseLexer) PopMode() int
- func (b *BaseLexer) PushMode(m int)
- func (b *BaseLexer) Recover(re RecognitionException)
- func (b *BaseLexer) SetChannel(v int)
- func (b *BaseLexer) SetInputStream(input CharStream)
- func (b *BaseLexer) SetMode(m int)
- func (b *BaseLexer) SetText(text string)
- func (b *BaseLexer) SetType(t int)
- func (b *BaseLexer) Skip()
- type BaseLexerAction
- type BaseParseTreeListener
- type BaseParseTreeVisitor
- type BaseParser
- func (p *BaseParser) AddParseListener(listener ParseTreeListener)
- func (p *BaseParser) Consume() Token
- func (p *BaseParser) DumpDFA()
- func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int)
- func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int)
- func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int)
- func (p *BaseParser) ExitRule()
- func (p *BaseParser) GetATN() *ATN
- func (p *BaseParser) GetATNWithBypassAlts()
- func (p *BaseParser) GetCurrentToken() Token
- func (p *BaseParser) GetDFAStrings() string
- func (p *BaseParser) GetErrorHandler() ErrorStrategy
- func (p *BaseParser) GetExpectedTokens() *IntervalSet
- func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet
- func (p *BaseParser) GetInputStream() IntStream
- func (p *BaseParser) GetInterpreter() *ParserATNSimulator
- func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext
- func (p *BaseParser) GetParseListeners() []ParseTreeListener
- func (p *BaseParser) GetParserRuleContext() ParserRuleContext
- func (p *BaseParser) GetPrecedence() int
- func (p *BaseParser) GetRuleIndex(ruleName string) int
- func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string
- func (p *BaseParser) GetSourceName() string
- func (p *BaseParser) GetTokenFactory() TokenFactory
- func (p *BaseParser) GetTokenStream() TokenStream
- func (p *BaseParser) IsExpectedToken(symbol int) bool
- func (p *BaseParser) Match(ttype int) Token
- func (p *BaseParser) MatchWildcard() Token
- func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException)
- func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool
- func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int)
- func (p *BaseParser) RemoveParseListener(listener ParseTreeListener)
- func (p *BaseParser) SetErrorHandler(e ErrorStrategy)
- func (p *BaseParser) SetInputStream(input TokenStream)
- func (p *BaseParser) SetParserRuleContext(v ParserRuleContext)
- func (p *BaseParser) SetTokenStream(input TokenStream)
- func (p *BaseParser) SetTrace(trace *TraceListener)
- func (p *BaseParser) TriggerEnterRuleEvent()
- func (p *BaseParser) TriggerExitRuleEvent()
- func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext)
- type BaseParserRuleContext
- func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{}
- func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext
- func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl
- func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl
- func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext)
- func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener)
- func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener)
- func (prc *BaseParserRuleContext) GetChild(i int) Tree
- func (prc *BaseParserRuleContext) GetChildCount() int
- func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext
- func (prc *BaseParserRuleContext) GetChildren() []Tree
- func (prc *BaseParserRuleContext) GetPayload() interface{}
- func (prc *BaseParserRuleContext) GetRuleContext() RuleContext
- func (prc *BaseParserRuleContext) GetSourceInterval() *Interval
- func (prc *BaseParserRuleContext) GetStart() Token
- func (prc *BaseParserRuleContext) GetStop() Token
- func (prc *BaseParserRuleContext) GetText() string
- func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode
- func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode
- func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext
- func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext
- func (prc *BaseParserRuleContext) RemoveLastChild()
- func (prc *BaseParserRuleContext) SetException(e RecognitionException)
- func (prc *BaseParserRuleContext) SetStart(t Token)
- func (prc *BaseParserRuleContext) SetStop(t Token)
- func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string
- func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string
- type BasePredictionContext
- type BaseRecognitionException
- type BaseRecognizer
- func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int)
- func (b *BaseRecognizer) AddErrorListener(listener ErrorListener)
- func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string
- func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener
- func (b *BaseRecognizer) GetLiteralNames() []string
- func (b *BaseRecognizer) GetRuleIndexMap() map[string]int
- func (b *BaseRecognizer) GetRuleNames() []string
- func (b *BaseRecognizer) GetState() int
- func (b *BaseRecognizer) GetSymbolicNames() []string
- func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string
- func (b *BaseRecognizer) GetTokenNames() []string
- func (b *BaseRecognizer) GetTokenType(tokenName string) int
- func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool
- func (b *BaseRecognizer) RemoveErrorListeners()
- func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool
- func (b *BaseRecognizer) SetState(v int)
- type BaseRewriteOperation
- func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int
- func (op *BaseRewriteOperation) GetIndex() int
- func (op *BaseRewriteOperation) GetInstructionIndex() int
- func (op *BaseRewriteOperation) GetOpName() string
- func (op *BaseRewriteOperation) GetText() string
- func (op *BaseRewriteOperation) GetTokens() TokenStream
- func (op *BaseRewriteOperation) SetIndex(val int)
- func (op *BaseRewriteOperation) SetInstructionIndex(val int)
- func (op *BaseRewriteOperation) SetOpName(val string)
- func (op *BaseRewriteOperation) SetText(val string)
- func (op *BaseRewriteOperation) SetTokens(val TokenStream)
- func (op *BaseRewriteOperation) String() string
- type BaseRuleContext
- func (b *BaseRuleContext) GetAltNumber() int
- func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext
- func (b *BaseRuleContext) GetInvokingState() int
- func (b *BaseRuleContext) GetParent() Tree
- func (b *BaseRuleContext) GetRuleIndex() int
- func (b *BaseRuleContext) IsEmpty() bool
- func (b *BaseRuleContext) SetAltNumber(altNumber int)
- func (b *BaseRuleContext) SetInvokingState(t int)
- func (b *BaseRuleContext) SetParent(v Tree)
- type BaseSingletonPredictionContext
- type BaseToken
- func (b *BaseToken) GetChannel() int
- func (b *BaseToken) GetColumn() int
- func (b *BaseToken) GetInputStream() CharStream
- func (b *BaseToken) GetLine() int
- func (b *BaseToken) GetSource() *TokenSourceCharStreamPair
- func (b *BaseToken) GetStart() int
- func (b *BaseToken) GetStop() int
- func (b *BaseToken) GetTokenIndex() int
- func (b *BaseToken) GetTokenSource() TokenSource
- func (b *BaseToken) GetTokenType() int
- func (b *BaseToken) SetTokenIndex(v int)
- type BaseTransition
- type BasicBlockStartState
- type BasicState
- type BitSet
- type BlockEndState
- type BlockStartState
- type CharStream
- type CommonToken
- type CommonTokenFactory
- type CommonTokenStream
- func (c *CommonTokenStream) Consume()
- func (c *CommonTokenStream) Fill()
- func (c *CommonTokenStream) Get(index int) Token
- func (c *CommonTokenStream) GetAllText() string
- func (c *CommonTokenStream) GetAllTokens() []Token
- func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token
- func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token
- func (c *CommonTokenStream) GetSourceName() string
- func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string
- func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
- func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string
- func (c *CommonTokenStream) GetTokenSource() TokenSource
- func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token
- func (c *CommonTokenStream) Index() int
- func (c *CommonTokenStream) LA(i int) int
- func (c *CommonTokenStream) LB(k int) Token
- func (c *CommonTokenStream) LT(k int) Token
- func (c *CommonTokenStream) Mark() int
- func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int
- func (c *CommonTokenStream) Release(marker int)
- func (c *CommonTokenStream) Seek(index int)
- func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource)
- func (c *CommonTokenStream) Size() int
- func (c *CommonTokenStream) Sync(i int) bool
- type ConsoleErrorListener
- type DFA
- type DFASerializer
- type DFAState
- type DecisionState
- type DefaultErrorListener
- func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ...)
- func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, ...)
- func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, ...)
- func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, ...)
- type DefaultErrorStrategy
- func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
- func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token
- func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string
- func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool
- func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException)
- func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token
- func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException)
- func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException)
- func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException)
- func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser)
- func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser)
- func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException)
- func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser)
- func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token
- func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool
- func (d *DefaultErrorStrategy) Sync(recognizer Parser)
- type DiagnosticErrorListener
- func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ...)
- func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, ...)
- func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, ...)
- type DoubleDict
- type EmptyPredictionContext
- type EpsilonTransition
- type ErrorListener
- type ErrorNode
- type ErrorNodeImpl
- type ErrorStrategy
- type FailedPredicateException
- type FileStream
- type IATNSimulator
- type ILexerATNSimulator
- type InputMisMatchException
- type InputStream
- func (is *InputStream) Consume()
- func (*InputStream) GetSourceName() string
- func (is *InputStream) GetText(start int, stop int) string
- func (is *InputStream) GetTextFromInterval(i *Interval) string
- func (is *InputStream) GetTextFromTokens(start, stop Token) string
- func (is *InputStream) Index() int
- func (is *InputStream) LA(offset int) int
- func (is *InputStream) LT(offset int) int
- func (is *InputStream) Mark() int
- func (is *InputStream) Release(marker int)
- func (is *InputStream) Seek(index int)
- func (is *InputStream) Size() int
- func (is *InputStream) String() string
- type InsertAfterOp
- type InsertBeforeOp
- type IntStack
- type IntStream
- type InterpreterRuleContext
- type Interval
- type IntervalSet
- type LL1Analyzer
- type Lexer
- type LexerATNConfig
- func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig
- func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig
- func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig
- func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig
- func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, ...) *LexerATNConfig
- func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig
- type LexerATNSimulator
- func (l *LexerATNSimulator) Consume(input CharStream)
- func (l *LexerATNSimulator) GetCharPositionInLine() int
- func (l *LexerATNSimulator) GetLine() int
- func (l *LexerATNSimulator) GetText(input CharStream) string
- func (l *LexerATNSimulator) GetTokenName(tt int) string
- func (l *LexerATNSimulator) Match(input CharStream, mode int) int
- func (l *LexerATNSimulator) MatchATN(input CharStream) int
- type LexerAction
- type LexerActionExecutor
- type LexerChannelAction
- type LexerCustomAction
- type LexerDFASerializer
- type LexerIndexedCustomAction
- type LexerModeAction
- type LexerMoreAction
- type LexerNoViableAltException
- type LexerPopModeAction
- type LexerPushModeAction
- type LexerSkipAction
- type LexerTypeAction
- type LoopEndState
- type NoViableAltException
- type NotSetTransition
- type OR
- type OrderedATNConfigSet
- type ParseCancellationException
- type ParseTree
- type ParseTreeListener
- type ParseTreeVisitor
- type ParseTreeWalker
- type Parser
- type ParserATNSimulator
- func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int
- func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int
- func (p *ParserATNSimulator) GetPredictionMode() int
- func (p *ParserATNSimulator) GetTokenName(t int) string
- func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, exact bool, ...)
- func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, ...)
- func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int)
- func (p *ParserATNSimulator) SetPredictionMode(v int)
- type ParserRuleContext
- type PlusBlockStartState
- type PlusLoopbackState
- type PrecedencePredicate
- type PrecedencePredicateTransition
- type PredPrediction
- type Predicate
- type PredicateTransition
- type PredictionContext
- type PredictionContextCache
- type ProxyErrorListener
- func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ...)
- func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, ...)
- func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, ...)
- func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, ...)
- type RangeTransition
- type RecognitionException
- type Recognizer
- type ReplaceOp
- type RewriteOperation
- type RuleContext
- type RuleNode
- type RuleStartState
- type RuleStopState
- type RuleTransition
- type SemanticContext
- type Set
- type SetTransition
- type SimState
- type SingletonPredictionContext
- type StarBlockStartState
- type StarLoopEntryState
- type StarLoopbackState
- type SyntaxTree
- type TerminalNode
- type TerminalNodeImpl
- func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{}
- func (t *TerminalNodeImpl) GetChild(i int) Tree
- func (t *TerminalNodeImpl) GetChildCount() int
- func (t *TerminalNodeImpl) GetChildren() []Tree
- func (t *TerminalNodeImpl) GetParent() Tree
- func (t *TerminalNodeImpl) GetPayload() interface{}
- func (t *TerminalNodeImpl) GetSourceInterval() *Interval
- func (t *TerminalNodeImpl) GetSymbol() Token
- func (t *TerminalNodeImpl) GetText() string
- func (t *TerminalNodeImpl) SetChildren(tree []Tree)
- func (t *TerminalNodeImpl) SetParent(tree Tree)
- func (t *TerminalNodeImpl) String() string
- func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string
- type Token
- type TokenFactory
- type TokenSource
- type TokenSourceCharStreamPair
- type TokenStream
- type TokenStreamRewriter
- func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation)
- func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int)
- func (tsr *TokenStreamRewriter) DeleteDefault(from, to int)
- func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int)
- func (tsr *TokenStreamRewriter) DeleteProgram(program_name string)
- func (tsr *TokenStreamRewriter) DeleteProgramDefault()
- func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token)
- func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token)
- func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int
- func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int
- func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation
- func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string
- func (tsr *TokenStreamRewriter) GetTextDefault() string
- func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream
- func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation
- func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string)
- func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string)
- func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string)
- func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string)
- func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string)
- func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string)
- func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string)
- func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string)
- func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string)
- func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string)
- func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string)
- func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string)
- func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int)
- func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int)
- func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int)
- type TokensStartState
- type TraceListener
- type Transition
- type Tree
- type WildcardTransition
Constants ¶
const ( ATNStateInvalidType = 0 ATNStateBasic = 1 ATNStateRuleStart = 2 ATNStateBlockStart = 3 ATNStatePlusBlockStart = 4 ATNStateStarBlockStart = 5 ATNStateTokenStart = 6 ATNStateRuleStop = 7 ATNStateBlockEnd = 8 ATNStateStarLoopBack = 9 ATNStateStarLoopEntry = 10 ATNStatePlusLoopBack = 11 ATNStateLoopEnd = 12 ATNStateInvalidStateNumber = -1 )
Constants for serialization.
const ( ATNTypeLexer = 0 ATNTypeParser = 1 )
Represent the type of recognizer an ATN applies to.
const ( LexerDefaultMode = 0 LexerMore = -2 LexerSkip = -3 )
const ( LexerDefaultTokenChannel = TokenDefaultChannel LexerHidden = TokenHiddenChannel LexerMinCharValue = 0x0000 LexerMaxCharValue = 0x10FFFF )
const ( LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. )
const ( // // The SLL(*) prediction mode. This prediction mode ignores the current // parser context when making predictions. This is the fastest prediction // mode, and provides correct results for many grammars. This prediction // mode is more powerful than the prediction mode provided by ANTLR 3, but // may result in syntax errors for grammar and input combinations which are // not SLL. // // <p> // When using this prediction mode, the parser will either return a correct // parse tree (i.e. the same parse tree that would be returned with the // {@link //LL} prediction mode), or it will Report a syntax error. If a // syntax error is encountered when using the {@link //SLL} prediction mode, // it may be due to either an actual syntax error in the input or indicate // that the particular combination of grammar and input requires the more // powerful {@link //LL} prediction abilities to complete successfully.</p> // // <p> // This prediction mode does not provide any guarantees for prediction // behavior for syntactically-incorrect inputs.</p> // PredictionModeSLL = 0 // // The LL(*) prediction mode. This prediction mode allows the current parser // context to be used for resolving SLL conflicts that occur during // prediction. This is the fastest prediction mode that guarantees correct // parse results for all combinations of grammars with syntactically correct // inputs. // // <p> // When using this prediction mode, the parser will make correct decisions // for all syntactically-correct grammar and input combinations. However, in // cases where the grammar is truly ambiguous this prediction mode might not // Report a precise answer for <em>exactly which</em> alternatives are // ambiguous.</p> // // <p> // This prediction mode does not provide any guarantees for prediction // behavior for syntactically-incorrect inputs.</p> // PredictionModeLL = 1 // // The LL(*) prediction mode with exact ambiguity detection. In addition to // the correctness guarantees provided by the {@link //LL} prediction mode, // this prediction mode instructs the prediction algorithm to determine the // complete and exact set of ambiguous alternatives for every ambiguous // decision encountered while parsing. // // <p> // This prediction mode may be used for diagnosing ambiguities during // grammar development. Due to the performance overhead of calculating sets // of ambiguous alternatives, this prediction mode should be avoided when // the exact results are not necessary.</p> // // <p> // This prediction mode does not provide any guarantees for prediction // behavior for syntactically-incorrect inputs.</p> // PredictionModeLLExactAmbigDetection = 2 )
const ( TokenInvalidType = 0 // During lookahead operations, this "token" signifies we hit rule end ATN state // and did not follow it despite needing to. TokenEpsilon = -2 TokenMinUserTokenType = 1 TokenEOF = -1 TokenDefaultChannel = 0 TokenHiddenChannel = 1 )
const ( Default_Program_Name = "default" Program_Init_Size = 100 Min_Token_Index = 0 )
const ( TransitionEPSILON = 1 TransitionRANGE = 2 TransitionRULE = 3 TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? TransitionATOM = 5 TransitionACTION = 6 TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 TransitionNOTSET = 8 TransitionWILDCARD = 9 TransitionPRECEDENCE = 10 )
const (
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
)
Represents {@code $} in local context prediction, which means wildcard. {@code//+x =//}. /
const (
LL1AnalyzerHitPred = TokenInvalidType
)
- Special value added to the lookahead sets to indicate that we hit a predicate during analysis if {@code seeThruPreds==false}.
/
Variables ¶
var ( LexerATNSimulatorDebug = false LexerATNSimulatorDFADebug = false LexerATNSimulatorMinDFAEdge = 0 LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN LexerATNSimulatorMatchCalls = 0 )
var ( ParserATNSimulatorDebug = false ParserATNSimulatorListATNDecisions = false ParserATNSimulatorDFADebug = false ParserATNSimulatorRetryDebug = false TurnOffLRLoopEntryBranchOpt = false )
var ( BasePredictionContextglobalNodeCount = 1 BasePredictionContextid = BasePredictionContextglobalNodeCount )
var ATNInvalidAltNumber int
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
var ATNStateInitialNumTransitions = 4
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not explicitly copy token text when constructing tokens.
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
Provides a default instance of {@link ConsoleErrorListener}.
var ErrEmptyStack = errors.New("Stack is empty")
var LexerMoreActionINSTANCE = NewLexerMoreAction()
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
var LexerSkipActionINSTANCE = NewLexerSkipAction()
Provides a singleton instance of l parameterless lexer action.
var ParseTreeWalkerDefault = NewParseTreeWalker()
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
var TransitionserializationNames = []string{
"INVALID",
"EPSILON",
"RANGE",
"RULE",
"PREDICATE",
"ATOM",
"ACTION",
"SET",
"NOT_SET",
"WILDCARD",
"PRECEDENCE",
}
var TreeInvalidInterval = NewInterval(-1, -2)
Functions ¶
func EscapeWhitespace ¶
func PredictionModeallConfigsInRuleStopStates ¶
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool
Checks if all configurations in {@code configs} are in a {@link RuleStopState}. Configurations meeting this condition have reached the end of the decision rule (local context) or end of start rule (full context).
@param configs the configuration set to test @return {@code true} if all configurations in {@code configs} are in a {@link RuleStopState}, otherwise {@code false}
func PredictionModeallSubsetsConflict ¶
Determines if every alternative subset in {@code altsets} contains more than one alternative.
@param altsets a collection of alternative subsets @return {@code true} if every {@link BitSet} in {@code altsets} has {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
func PredictionModeallSubsetsEqual ¶
Determines if every alternative subset in {@code altsets} is equivalent.
@param altsets a collection of alternative subsets @return {@code true} if every member of {@code altsets} is equal to the others, otherwise {@code false}
func PredictionModegetUniqueAlt ¶
Returns the unique alternative predicted by all alternative subsets in {@code altsets}. If no such alternative exists, this method returns {@link ATN//INVALID_ALT_NUMBER}.
@param altsets a collection of alternative subsets
func PredictionModehasConfigInRuleStopState ¶
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool
Checks if any configuration in {@code configs} is in a {@link RuleStopState}. Configurations meeting this condition have reached the end of the decision rule (local context) or end of start rule (full context).
@param configs the configuration set to test @return {@code true} if any configuration in {@code configs} is in a {@link RuleStopState}, otherwise {@code false}
func PredictionModehasConflictingAltSet ¶
Determines if any single alternative subset in {@code altsets} contains more than one alternative.
@param altsets a collection of alternative subsets @return {@code true} if {@code altsets} contains a {@link BitSet} with {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
func PredictionModehasNonConflictingAltSet ¶
Determines if any single alternative subset in {@code altsets} contains exactly one alternative.
@param altsets a collection of alternative subsets @return {@code true} if {@code altsets} contains a {@link BitSet} with {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
func PredictionModehasSLLConflictTerminatingPrediction ¶
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool
Computes the SLL prediction termination condition.
<p> This method computes the SLL prediction termination condition for both of the following cases.</p>
<ul> <li>The usual SLL+LL fallback upon SLL conflict</li> <li>Pure SLL without LL fallback</li> </ul>
<p><strong>COMBINED SLL+LL PARSING</strong></p>
<p>When LL-fallback is enabled upon SLL conflict, correct predictions are ensured regardless of how the termination condition is computed by this method. Due to the substantially higher cost of LL prediction, the prediction should only fall back to LL when the additional lookahead cannot lead to a unique SLL prediction.</p>
<p>Assuming combined SLL+LL parsing, an SLL configuration set with only conflicting subsets should fall back to full LL, even if the configuration sets don't resolve to the same alternative (e.g. {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting configuration, SLL could continue with the hopes that more lookahead will resolve via one of those non-conflicting configurations.</p>
<p>Here's the prediction termination rule them: SLL (for SLL+LL parsing) stops when it sees only conflicting configuration subsets. In contrast, full LL keeps going when there is uncertainty.</p>
<p><strong>HEURISTIC</strong></p>
<p>As a heuristic, we stop prediction when we see any conflicting subset unless we see a state that only has one alternative associated with it. The single-alt-state thing lets prediction continue upon rules like (otherwise, it would admit defeat too soon):</p>
<p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }</p>
<p>When the ATN simulation reaches the state before {@code ”}, it has a DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop processing this node because alternative to has another way to continue, via {@code [6|2|[]]}.</p>
<p>It also let's us continue for this rule:</p>
<p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
<p>After Matching input A, we reach the stop state for rule A, state 1. State 8 is the state right before B. Clearly alternatives 1 and 2 conflict and no amount of further lookahead will separate the two. However, alternative 3 will be able to continue and so we do not stop working on this state. In the previous example, we're concerned with states associated with the conflicting alternatives. Here alt 3 is not associated with the conflicting configs, but since we can continue looking for input reasonably, don't declare the state done.</p>
<p><strong>PURE SLL PARSING</strong></p>
<p>To handle pure SLL parsing, all we have to do is make sure that we combine stack contexts for configurations that differ only by semantic predicate. From there, we can do the usual SLL termination heuristic.</p>
<p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
<p>SLL decisions don't evaluate predicates until after they reach DFA stop states because they need to create the DFA cache that works in all semantic situations. In contrast, full LL evaluates predicates collected during start state computation so it can ignore predicates thereafter. This means that SLL termination detection can totally ignore semantic predicates.</p>
<p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not semantic predicate contexts so we might see two configurations like the following.</p>
<p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
<p>Before testing these configurations against others, we have to merge {@code x} and {@code x'} (without modifying the existing configurations). For example, we test {@code (x+x')==x”} when looking for conflicts in the following configurations.</p>
<p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}</p>
<p>If the configuration set has predicates (as indicated by {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of the configurations to strip out all of the predicates so that a standard {@link ATNConfigSet} will merge everything ignoring predicates.</p>
func PredictionModehasStateAssociatedWithOneAlt ¶
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool
func PredictionModeresolvesToJustOneViableAlt ¶
Full LL prediction termination.
<p>Can we stop looking ahead during ATN simulation or is there some uncertainty as to which alternative we will ultimately pick, after consuming more input? Even if there are partial conflicts, we might know that everything is going to resolve to the same minimum alternative. That means we can stop since no more lookahead will change that fact. On the other hand, there might be multiple conflicts that resolve to different minimums. That means we need more look ahead to decide which of those alternatives we should predict.</p>
<p>The basic idea is to split the set of configurations {@code C}, into conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with non-conflicting configurations. Two configurations conflict if they have identical {@link ATNConfig//state} and {@link ATNConfig//context} values but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
<p>Reduce these configuration subsets to the set of possible alternatives. You can compute the alternative subsets in one pass as follows:</p>
<p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in {@code C} holding {@code s} and {@code ctx} fixed.</p>
<p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
<pre> map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not alt and not pred </pre>
<p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
<p>If {@code |A_s,ctx|=1} then there is no conflict associated with {@code s} and {@code ctx}.</p>
<p>Reduce the subsets to singletons by choosing a minimum of each subset. If the union of these alternative subsets is a singleton, then no amount of more lookahead will help us. We will always pick that alternative. If, however, there is more than one alternative, then we are uncertain which alternative to predict and must continue looking for resolution. We may or may not discover an ambiguity in the future, even if there are no conflicting subsets this round.</p>
<p>The biggest sin is to terminate early because it means we've made a decision but were uncertain as to the eventual outcome. We haven't used enough lookahead. On the other hand, announcing a conflict too late is no big deal you will still have the conflict. It's just inefficient. It might even look until the end of file.</p>
<p>No special consideration for semantic predicates is required because predicates are evaluated on-the-fly for full LL prediction, ensuring that no configuration contains a semantic context during the termination check.</p>
<p><strong>CONFLICTING CONFIGS</strong></p>
<p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict when {@code i!=j} but {@code x=x'}. Because we merge all {@code (s, i, _)} configurations together, that means that there are at most {@code n} configurations associated with state {@code s} for {@code n} possible alternatives in the decision. The merged stacks complicate the comparison of configuration contexts {@code x} and {@code x'}. Sam checks to see if one is a subset of the other by calling merge and checking to see if the merged result is either {@code x} or {@code x'}. If the {@code x} associated with lowest alternative {@code i} is the superset, then {@code i} is the only possible prediction since the others resolve to {@code min(i)} as well. However, if {@code x} is associated with {@code j>i} then at least one stack configuration for {@code j} is not in conflict with alternative {@code i}. The algorithm should keep going, looking for more lookahead due to the uncertainty.</p>
<p>For simplicity, I'm doing a equality check between {@code x} and {@code x'} that lets the algorithm continue to consume lookahead longer than necessary. The reason I like the equality is of course the simplicity but also because that is the test you need to detect the alternatives that are actually in conflict.</p>
<p><strong>CONTINUE/STOP RULE</strong></p>
<p>Continue if union of resolved alternative sets from non-conflicting and conflicting alternative subsets has more than one alternative. We are uncertain about which alternative to predict.</p>
<p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which alternatives are still in the running for the amount of input we've consumed at this point. The conflicting sets let us to strip away configurations that won't lead to more states because we resolve conflicts to the configuration with a minimum alternate for the conflicting set.</p>
<p><strong>CASES</strong></p>
<ul>
<li>no conflicts and more than 1 alternative in set => continue</li>
<li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = {@code {1,3}} => continue </li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = {@code {1}} => stop and predict 1</li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U {@code {1}} = {@code {1}} => stop and predict 1, can announce ambiguity {@code {1,2}}</li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U {@code {2}} = {@code {1,2}} => continue</li>
<li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U {@code {3}} = {@code {1,3}} => continue</li>
</ul>
<p><strong>EXACT AMBIGUITY DETECTION</strong></p>
<p>If all states Report the same conflicting set of alternatives, then we know we have the exact ambiguity set.</p>
<p><code>|A_<em>i</em>|>1</code> and <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
<p>In other words, we continue examining lookahead until all {@code A_i} have more than one alternative and all {@code A_i} are the same. If {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate because the resolved set is {@code {1}}. To determine what the real ambiguity is, we have to know whether the ambiguity is between one and two or one and three so we keep going. We can only stop prediction when we need exact ambiguity detection when the sets look like {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
func PrintArrayJavaStyle ¶
func TerminalNodeToStringArray ¶
func TerminalNodeToStringArray(sa []TerminalNode) []string
func TreesStringTree ¶
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string
Print out a whole tree in LISP form. {@link //getNodeText} is used on the
node payloads to get the text for the nodes. Detect parse trees and extract data appropriately.
Types ¶
type AND ¶
type AND struct {
// contains filtered or unexported fields
}
func NewAND ¶
func NewAND(a, b SemanticContext) *AND
type ATN ¶
type ATN struct { // DecisionToState is the decision points for all rules, subrules, optional // blocks, ()+, ()*, etc. Used to build DFA predictors for them. DecisionToState []DecisionState // contains filtered or unexported fields }
func (*ATN) NextTokens ¶
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet
func (*ATN) NextTokensInContext ¶
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet
NextTokensInContext computes the set of valid tokens that can occur starting in state s. If ctx is nil, the set of tokens will not include what can follow the rule surrounding s. In other words, the set will be restricted to tokens reachable staying within the rule of s.
func (*ATN) NextTokensNoContext ¶
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet
NextTokensNoContext computes the set of valid tokens that can occur starting in s and staying in same rule. Token.EPSILON is in set if we reach end of rule.
type ATNConfig ¶
type ATNConfig interface { GetState() ATNState GetAlt() int GetSemanticContext() SemanticContext GetContext() PredictionContext SetContext(PredictionContext) GetReachesIntoOuterContext() int SetReachesIntoOuterContext(int) String() string // contains filtered or unexported methods }
ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic context). The syntactic context is a graph-structured stack node whose path(s) to the root is the rule invocation(s) chain used to arrive at the state. The semantic context is the tree of semantic predicates encountered before reaching an ATN state.
type ATNConfigSet ¶
type ATNConfigSet interface { Add(ATNConfig, *DoubleDict) bool AddAll([]ATNConfig) bool GetStates() Set GetPredicates() []SemanticContext GetItems() []ATNConfig OptimizeConfigs(interpreter *BaseATNSimulator) Equals(other interface{}) bool Length() int IsEmpty() bool Contains(ATNConfig) bool ContainsFast(ATNConfig) bool Clear() String() string HasSemanticContext() bool SetHasSemanticContext(v bool) ReadOnly() bool SetReadOnly(bool) GetConflictingAlts() *BitSet SetConflictingAlts(*BitSet) Alts() *BitSet FullContext() bool GetUniqueAlt() int SetUniqueAlt(int) GetDipsIntoOuterContext() bool SetDipsIntoOuterContext(bool) // contains filtered or unexported methods }
type ATNConfigSetPair ¶
type ATNConfigSetPair struct {
// contains filtered or unexported fields
}
type ATNDeserializationOptions ¶
type ATNDeserializationOptions struct {
// contains filtered or unexported fields
}
func DefaultATNDeserializationOptions ¶
func DefaultATNDeserializationOptions() *ATNDeserializationOptions
func NewATNDeserializationOptions ¶
func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions
func (*ATNDeserializationOptions) GenerateRuleBypassTransitions ¶
func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool
func (*ATNDeserializationOptions) ReadOnly ¶
func (opts *ATNDeserializationOptions) ReadOnly() bool
func (*ATNDeserializationOptions) SetGenerateRuleBypassTransitions ¶
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool)
func (*ATNDeserializationOptions) SetReadOnly ¶
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool)
func (*ATNDeserializationOptions) SetVerifyATN ¶
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool)
func (*ATNDeserializationOptions) VerifyATN ¶
func (opts *ATNDeserializationOptions) VerifyATN() bool
type ATNDeserializer ¶
type ATNDeserializer struct {
// contains filtered or unexported fields
}
func NewATNDeserializer ¶
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer
func (*ATNDeserializer) Deserialize ¶
func (a *ATNDeserializer) Deserialize(data []int32) *ATN
type ATNState ¶
type ATNState interface { GetEpsilonOnlyTransitions() bool GetRuleIndex() int SetRuleIndex(int) GetNextTokenWithinRule() *IntervalSet SetNextTokenWithinRule(*IntervalSet) GetATN() *ATN SetATN(*ATN) GetStateType() int GetStateNumber() int SetStateNumber(int) GetTransitions() []Transition SetTransitions([]Transition) AddTransition(Transition, int) String() string // contains filtered or unexported methods }
type AbstractPredicateTransition ¶
type AbstractPredicateTransition interface { Transition IAbstractPredicateTransitionFoo() }
type ActionTransition ¶
type ActionTransition struct { *BaseTransition // contains filtered or unexported fields }
func NewActionTransition ¶
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition
func (*ActionTransition) Matches ¶
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*ActionTransition) String ¶
func (t *ActionTransition) String() string
type AltDict ¶
type AltDict struct {
// contains filtered or unexported fields
}
func NewAltDict ¶
func NewAltDict() *AltDict
func PredictionModeGetStateToAltMap ¶
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict
Get a map from state to alt subset from a configuration set. For each configuration {@code c} in {@code configs}:
<pre> map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt} </pre>
type ArrayPredictionContext ¶
type ArrayPredictionContext struct { *BasePredictionContext // contains filtered or unexported fields }
func NewArrayPredictionContext ¶
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext
func (*ArrayPredictionContext) GetParent ¶
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext
func (*ArrayPredictionContext) GetReturnStates ¶
func (a *ArrayPredictionContext) GetReturnStates() []int
func (*ArrayPredictionContext) String ¶
func (a *ArrayPredictionContext) String() string
type AtomTransition ¶
type AtomTransition struct {
*BaseTransition
}
TODO: make all transitions sets? no, should remove set edges
func NewAtomTransition ¶
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition
func (*AtomTransition) Matches ¶
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*AtomTransition) String ¶
func (t *AtomTransition) String() string
type BailErrorStrategy ¶
type BailErrorStrategy struct {
*DefaultErrorStrategy
}
func NewBailErrorStrategy ¶
func NewBailErrorStrategy() *BailErrorStrategy
func (*BailErrorStrategy) Recover ¶
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException)
Instead of recovering from exception {@code e}, re-panic it wrapped in a {@link ParseCancellationException} so it is not caught by the rule func catches. Use {@link Exception//getCause()} to get the original {@link RecognitionException}.
func (*BailErrorStrategy) RecoverInline ¶
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token
Make sure we don't attempt to recover inline if the parser successfully recovers, it won't panic an exception.
func (*BailErrorStrategy) Sync ¶
func (b *BailErrorStrategy) Sync(recognizer Parser)
Make sure we don't attempt to recover from problems in subrules.//
type BaseATNConfig ¶
type BaseATNConfig struct {
// contains filtered or unexported fields
}
func NewBaseATNConfig ¶
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig1 ¶
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig
func NewBaseATNConfig2 ¶
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig3 ¶
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig4 ¶
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig
func NewBaseATNConfig5 ¶
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig
func NewBaseATNConfig6 ¶
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig
func NewBaseATNConfig7 ¶
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig
func (*BaseATNConfig) GetAlt ¶
func (b *BaseATNConfig) GetAlt() int
func (*BaseATNConfig) GetContext ¶
func (b *BaseATNConfig) GetContext() PredictionContext
func (*BaseATNConfig) GetReachesIntoOuterContext ¶
func (b *BaseATNConfig) GetReachesIntoOuterContext() int
func (*BaseATNConfig) GetSemanticContext ¶
func (b *BaseATNConfig) GetSemanticContext() SemanticContext
func (*BaseATNConfig) GetState ¶
func (b *BaseATNConfig) GetState() ATNState
func (*BaseATNConfig) SetContext ¶
func (b *BaseATNConfig) SetContext(v PredictionContext)
func (*BaseATNConfig) SetReachesIntoOuterContext ¶
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int)
func (*BaseATNConfig) String ¶
func (b *BaseATNConfig) String() string
type BaseATNConfigSet ¶
type BaseATNConfigSet struct {
// contains filtered or unexported fields
}
BaseATNConfigSet is a specialized set of ATNConfig that tracks information about its elements and can combine similar configurations using a graph-structured stack.
func NewBaseATNConfigSet ¶
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet
func (*BaseATNConfigSet) Add ¶
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool
Add merges contexts with existing configs for (s, i, pi, _), where s is the ATNConfig.state, i is the ATNConfig.alt, and pi is the ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates dipsIntoOuterContext and hasSemanticContext when necessary.
func (*BaseATNConfigSet) AddAll ¶
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool
func (*BaseATNConfigSet) Alts ¶
func (b *BaseATNConfigSet) Alts() *BitSet
func (*BaseATNConfigSet) Clear ¶
func (b *BaseATNConfigSet) Clear()
func (*BaseATNConfigSet) Contains ¶
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool
func (*BaseATNConfigSet) ContainsFast ¶
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool
func (*BaseATNConfigSet) Equals ¶
func (b *BaseATNConfigSet) Equals(other interface{}) bool
func (*BaseATNConfigSet) FullContext ¶
func (b *BaseATNConfigSet) FullContext() bool
func (*BaseATNConfigSet) GetConflictingAlts ¶
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet
func (*BaseATNConfigSet) GetDipsIntoOuterContext ¶
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool
func (*BaseATNConfigSet) GetItems ¶
func (b *BaseATNConfigSet) GetItems() []ATNConfig
func (*BaseATNConfigSet) GetPredicates ¶
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext
func (*BaseATNConfigSet) GetStates ¶
func (b *BaseATNConfigSet) GetStates() Set
func (*BaseATNConfigSet) GetUniqueAlt ¶
func (b *BaseATNConfigSet) GetUniqueAlt() int
func (*BaseATNConfigSet) HasSemanticContext ¶
func (b *BaseATNConfigSet) HasSemanticContext() bool
func (*BaseATNConfigSet) IsEmpty ¶
func (b *BaseATNConfigSet) IsEmpty() bool
func (*BaseATNConfigSet) Length ¶
func (b *BaseATNConfigSet) Length() int
func (*BaseATNConfigSet) OptimizeConfigs ¶
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator)
func (*BaseATNConfigSet) ReadOnly ¶
func (b *BaseATNConfigSet) ReadOnly() bool
func (*BaseATNConfigSet) SetConflictingAlts ¶
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet)
func (*BaseATNConfigSet) SetDipsIntoOuterContext ¶
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool)
func (*BaseATNConfigSet) SetHasSemanticContext ¶
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool)
func (*BaseATNConfigSet) SetReadOnly ¶
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool)
func (*BaseATNConfigSet) SetUniqueAlt ¶
func (b *BaseATNConfigSet) SetUniqueAlt(v int)
func (*BaseATNConfigSet) String ¶
func (b *BaseATNConfigSet) String() string
type BaseATNSimulator ¶
type BaseATNSimulator struct {
// contains filtered or unexported fields
}
func NewBaseATNSimulator ¶
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator
func (*BaseATNSimulator) ATN ¶
func (b *BaseATNSimulator) ATN() *ATN
func (*BaseATNSimulator) DecisionToDFA ¶
func (b *BaseATNSimulator) DecisionToDFA() []*DFA
func (*BaseATNSimulator) SharedContextCache ¶
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache
type BaseATNState ¶
type BaseATNState struct { // NextTokenWithinRule caches lookahead during parsing. Not used during construction. NextTokenWithinRule *IntervalSet // contains filtered or unexported fields }
func NewBaseATNState ¶
func NewBaseATNState() *BaseATNState
func (*BaseATNState) AddTransition ¶
func (as *BaseATNState) AddTransition(trans Transition, index int)
func (*BaseATNState) GetATN ¶
func (as *BaseATNState) GetATN() *ATN
func (*BaseATNState) GetEpsilonOnlyTransitions ¶
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool
func (*BaseATNState) GetNextTokenWithinRule ¶
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet
func (*BaseATNState) GetRuleIndex ¶
func (as *BaseATNState) GetRuleIndex() int
func (*BaseATNState) GetStateNumber ¶
func (as *BaseATNState) GetStateNumber() int
func (*BaseATNState) GetStateType ¶
func (as *BaseATNState) GetStateType() int
func (*BaseATNState) GetTransitions ¶
func (as *BaseATNState) GetTransitions() []Transition
func (*BaseATNState) SetATN ¶
func (as *BaseATNState) SetATN(atn *ATN)
func (*BaseATNState) SetNextTokenWithinRule ¶
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet)
func (*BaseATNState) SetRuleIndex ¶
func (as *BaseATNState) SetRuleIndex(v int)
func (*BaseATNState) SetStateNumber ¶
func (as *BaseATNState) SetStateNumber(stateNumber int)
func (*BaseATNState) SetTransitions ¶
func (as *BaseATNState) SetTransitions(t []Transition)
func (*BaseATNState) String ¶
func (as *BaseATNState) String() string
type BaseAbstractPredicateTransition ¶
type BaseAbstractPredicateTransition struct {
*BaseTransition
}
func NewBasePredicateTransition ¶
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition
func (*BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo ¶
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo()
type BaseBlockStartState ¶
type BaseBlockStartState struct { *BaseDecisionState // contains filtered or unexported fields }
BaseBlockStartState is the start of a regular (...) block.
func NewBlockStartState ¶
func NewBlockStartState() *BaseBlockStartState
type BaseDecisionState ¶
type BaseDecisionState struct { *BaseATNState // contains filtered or unexported fields }
func NewBaseDecisionState ¶
func NewBaseDecisionState() *BaseDecisionState
type BaseInterpreterRuleContext ¶
type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}
func NewBaseInterpreterRuleContext ¶
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext
type BaseLexer ¶
type BaseLexer struct { *BaseRecognizer Interpreter ILexerATNSimulator TokenStartCharIndex int TokenStartLine int TokenStartColumn int ActionType int Virt Lexer // The most derived lexer implementation. Allows virtual method calls. // contains filtered or unexported fields }
func NewBaseLexer ¶
func NewBaseLexer(input CharStream) *BaseLexer
func (*BaseLexer) Emit ¶
The standard method called to automatically emit a token at the outermost lexical rule. The token object should point into the char buffer start..stop. If there is a text override in 'text', use that to set the token's text. Override l method to emit custom Token objects or provide a Newfactory. /
func (*BaseLexer) EmitToken ¶
By default does not support multiple emits per NextToken invocation for efficiency reasons. Subclass and override l method, NextToken, and GetToken (to push tokens into a list and pull from that list rather than a single variable as l implementation does). /
func (*BaseLexer) GetAllTokens ¶
Return a list of all Token objects in input char stream. Forces load of all tokens. Does not include EOF token. /
func (*BaseLexer) GetCharIndex ¶
What is the index of the current character of lookahead?///
func (*BaseLexer) GetCharPositionInLine ¶
func (*BaseLexer) GetInputStream ¶
func (b *BaseLexer) GetInputStream() CharStream
func (*BaseLexer) GetInterpreter ¶
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator
func (*BaseLexer) GetSourceName ¶
func (*BaseLexer) GetText ¶
Return the text Matched so far for the current token or any text override. Set the complete text of l token it wipes any previous changes to the text.
func (*BaseLexer) GetTokenFactory ¶
func (b *BaseLexer) GetTokenFactory() TokenFactory
func (*BaseLexer) GetTokenSourceCharStreamPair ¶
func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair
func (*BaseLexer) Recover ¶
func (b *BaseLexer) Recover(re RecognitionException)
Lexers can normally Match any char in it's vocabulary after Matching a token, so do the easy thing and just kill a character and hope it all works out. You can instead use the rule invocation stack to do sophisticated error recovery if you are in a fragment rule. /
func (*BaseLexer) SetChannel ¶
func (*BaseLexer) SetInputStream ¶
func (b *BaseLexer) SetInputStream(input CharStream)
SetInputStream resets the lexer input stream and associated lexer state.
func (*BaseLexer) Skip ¶
func (b *BaseLexer) Skip()
Instruct the lexer to Skip creating a token for current lexer rule and look for another token. NextToken() knows to keep looking when a lexer rule finishes with token set to SKIPTOKEN. Recall that if token==nil at end of any token rule, it creates one for you and emits it. /
type BaseLexerAction ¶
type BaseLexerAction struct {
// contains filtered or unexported fields
}
func NewBaseLexerAction ¶
func NewBaseLexerAction(action int) *BaseLexerAction
type BaseParseTreeListener ¶
type BaseParseTreeListener struct{}
func (*BaseParseTreeListener) EnterEveryRule ¶
func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext)
func (*BaseParseTreeListener) ExitEveryRule ¶
func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext)
func (*BaseParseTreeListener) VisitErrorNode ¶
func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode)
func (*BaseParseTreeListener) VisitTerminal ¶
func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode)
type BaseParseTreeVisitor ¶
type BaseParseTreeVisitor struct{}
func (*BaseParseTreeVisitor) Visit ¶
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{}
func (*BaseParseTreeVisitor) VisitChildren ¶
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{}
func (*BaseParseTreeVisitor) VisitErrorNode ¶
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{}
func (*BaseParseTreeVisitor) VisitTerminal ¶
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{}
type BaseParser ¶
type BaseParser struct { *BaseRecognizer Interpreter *ParserATNSimulator BuildParseTrees bool // contains filtered or unexported fields }
func NewBaseParser ¶
func NewBaseParser(input TokenStream) *BaseParser
p.is all the parsing support code essentially most of it is error recovery stuff.//
func (*BaseParser) AddParseListener ¶
func (p *BaseParser) AddParseListener(listener ParseTreeListener)
Registers {@code listener} to receive events during the parsing process.
<p>To support output-preserving grammar transformations (including but not limited to left-recursion removal, automated left-factoring, and optimized code generation), calls to listener methods during the parse may differ substantially from calls made by {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In particular, rule entry and exit events may occur in a different order during the parse than after the parser. In addition, calls to certain rule entry methods may be omitted.</p>
<p>With the following specific exceptions, calls to listener events are <em>deterministic</em>, i.e. for identical input the calls to listener methods will be the same.</p>
<ul> <li>Alterations to the grammar used to generate code may change the behavior of the listener calls.</li> <li>Alterations to the command line options passed to ANTLR 4 when generating the parser may change the behavior of the listener calls.</li> <li>Changing the version of the ANTLR Tool used to generate the parser may change the behavior of the listener calls.</li> </ul>
@param listener the listener to add
@panics nilPointerException if {@code} listener is {@code nil}
func (*BaseParser) Consume ¶
func (p *BaseParser) Consume() Token
func (*BaseParser) EnterOuterAlt ¶
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int)
func (*BaseParser) EnterRecursionRule ¶
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int)
func (*BaseParser) EnterRule ¶
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int)
func (*BaseParser) ExitRule ¶
func (p *BaseParser) ExitRule()
func (*BaseParser) GetATN ¶
func (p *BaseParser) GetATN() *ATN
func (*BaseParser) GetATNWithBypassAlts ¶
func (p *BaseParser) GetATNWithBypassAlts()
The ATN with bypass alternatives is expensive to create so we create it lazily.
@panics UnsupportedOperationException if the current parser does not implement the {@link //getSerializedATN()} method.
func (*BaseParser) GetCurrentToken ¶
func (p *BaseParser) GetCurrentToken() Token
Match needs to return the current input symbol, which gets put into the label for the associated token ref e.g., x=ID.
func (*BaseParser) GetDFAStrings ¶
func (p *BaseParser) GetDFAStrings() string
For debugging and other purposes.//
func (*BaseParser) GetErrorHandler ¶
func (p *BaseParser) GetErrorHandler() ErrorStrategy
func (*BaseParser) GetExpectedTokens ¶
func (p *BaseParser) GetExpectedTokens() *IntervalSet
Computes the set of input symbols which could follow the current parser state and context, as given by {@link //GetState} and {@link //GetContext}, respectively.
@see ATN//getExpectedTokens(int, RuleContext)
func (*BaseParser) GetExpectedTokensWithinCurrentRule ¶
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet
func (*BaseParser) GetInputStream ¶
func (p *BaseParser) GetInputStream() IntStream
func (*BaseParser) GetInterpreter ¶
func (p *BaseParser) GetInterpreter() *ParserATNSimulator
func (*BaseParser) GetInvokingContext ¶
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext
func (*BaseParser) GetParseListeners ¶
func (p *BaseParser) GetParseListeners() []ParseTreeListener
func (*BaseParser) GetParserRuleContext ¶
func (p *BaseParser) GetParserRuleContext() ParserRuleContext
func (*BaseParser) GetPrecedence ¶
func (p *BaseParser) GetPrecedence() int
func (*BaseParser) GetRuleIndex ¶
func (p *BaseParser) GetRuleIndex(ruleName string) int
Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (*BaseParser) GetRuleInvocationStack ¶
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string
func (*BaseParser) GetSourceName ¶
func (p *BaseParser) GetSourceName() string
func (*BaseParser) GetTokenFactory ¶
func (p *BaseParser) GetTokenFactory() TokenFactory
func (*BaseParser) GetTokenStream ¶
func (p *BaseParser) GetTokenStream() TokenStream
func (*BaseParser) IsExpectedToken ¶
func (p *BaseParser) IsExpectedToken(symbol int) bool
func (*BaseParser) Match ¶
func (p *BaseParser) Match(ttype int) Token
func (*BaseParser) MatchWildcard ¶
func (p *BaseParser) MatchWildcard() Token
func (*BaseParser) NotifyErrorListeners ¶
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException)
func (*BaseParser) Precpred ¶
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool
func (*BaseParser) PushNewRecursionContext ¶
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int)
func (*BaseParser) RemoveParseListener ¶
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener)
Remove {@code listener} from the list of parse listeners.
<p>If {@code listener} is {@code nil} or has not been added as a parse listener, p.method does nothing.</p> @param listener the listener to remove
func (*BaseParser) SetErrorHandler ¶
func (p *BaseParser) SetErrorHandler(e ErrorStrategy)
func (*BaseParser) SetInputStream ¶
func (p *BaseParser) SetInputStream(input TokenStream)
func (*BaseParser) SetParserRuleContext ¶
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext)
func (*BaseParser) SetTokenStream ¶
func (p *BaseParser) SetTokenStream(input TokenStream)
Set the token stream and reset the parser.//
func (*BaseParser) SetTrace ¶
func (p *BaseParser) SetTrace(trace *TraceListener)
During a parse is sometimes useful to listen in on the rule entry and exit events as well as token Matches. p.is for quick and dirty debugging.
func (*BaseParser) TriggerEnterRuleEvent ¶
func (p *BaseParser) TriggerEnterRuleEvent()
Notify any parse listeners of an enter rule event.
func (*BaseParser) TriggerExitRuleEvent ¶
func (p *BaseParser) TriggerExitRuleEvent()
Notify any parse listeners of an exit rule event.
@see //addParseListener
func (*BaseParser) UnrollRecursionContexts ¶
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext)
type BaseParserRuleContext ¶
type BaseParserRuleContext struct { *BaseRuleContext // contains filtered or unexported fields }
func NewBaseParserRuleContext ¶
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext
func (*BaseParserRuleContext) Accept ¶
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{}
func (*BaseParserRuleContext) AddChild ¶
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext
func (*BaseParserRuleContext) AddErrorNode ¶
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl
func (*BaseParserRuleContext) AddTokenNode ¶
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl
func (*BaseParserRuleContext) CopyFrom ¶
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext)
func (*BaseParserRuleContext) EnterRule ¶
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener)
Double dispatch methods for listeners
func (*BaseParserRuleContext) ExitRule ¶
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener)
func (*BaseParserRuleContext) GetChild ¶
func (prc *BaseParserRuleContext) GetChild(i int) Tree
func (*BaseParserRuleContext) GetChildCount ¶
func (prc *BaseParserRuleContext) GetChildCount() int
func (*BaseParserRuleContext) GetChildOfType ¶
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext
func (*BaseParserRuleContext) GetChildren ¶
func (prc *BaseParserRuleContext) GetChildren() []Tree
func (*BaseParserRuleContext) GetPayload ¶
func (prc *BaseParserRuleContext) GetPayload() interface{}
func (*BaseParserRuleContext) GetRuleContext ¶
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext
func (*BaseParserRuleContext) GetSourceInterval ¶
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval
func (*BaseParserRuleContext) GetStart ¶
func (prc *BaseParserRuleContext) GetStart() Token
func (*BaseParserRuleContext) GetStop ¶
func (prc *BaseParserRuleContext) GetStop() Token
func (*BaseParserRuleContext) GetText ¶
func (prc *BaseParserRuleContext) GetText() string
func (*BaseParserRuleContext) GetToken ¶
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode
func (*BaseParserRuleContext) GetTokens ¶
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode
func (*BaseParserRuleContext) GetTypedRuleContext ¶
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext
func (*BaseParserRuleContext) GetTypedRuleContexts ¶
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext
func (*BaseParserRuleContext) RemoveLastChild ¶
func (prc *BaseParserRuleContext) RemoveLastChild()
* Used by EnterOuterAlt to toss out a RuleContext previously added as we entered a rule. If we have // label, we will need to remove generic ruleContext object. /
func (*BaseParserRuleContext) SetException ¶
func (prc *BaseParserRuleContext) SetException(e RecognitionException)
func (*BaseParserRuleContext) SetStart ¶
func (prc *BaseParserRuleContext) SetStart(t Token)
func (*BaseParserRuleContext) SetStop ¶
func (prc *BaseParserRuleContext) SetStop(t Token)
func (*BaseParserRuleContext) String ¶
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string
func (*BaseParserRuleContext) ToStringTree ¶
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string
type BasePredictionContext ¶
type BasePredictionContext struct {
// contains filtered or unexported fields
}
func NewBasePredictionContext ¶
func NewBasePredictionContext(cachedHash int) *BasePredictionContext
type BaseRecognitionException ¶
type BaseRecognitionException struct {
// contains filtered or unexported fields
}
func NewBaseRecognitionException ¶
func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException
func (*BaseRecognitionException) GetInputStream ¶
func (b *BaseRecognitionException) GetInputStream() IntStream
func (*BaseRecognitionException) GetMessage ¶
func (b *BaseRecognitionException) GetMessage() string
func (*BaseRecognitionException) GetOffendingToken ¶
func (b *BaseRecognitionException) GetOffendingToken() Token
func (*BaseRecognitionException) String ¶
func (b *BaseRecognitionException) String() string
type BaseRecognizer ¶
type BaseRecognizer struct { RuleNames []string LiteralNames []string SymbolicNames []string GrammarFileName string // contains filtered or unexported fields }
func NewBaseRecognizer ¶
func NewBaseRecognizer() *BaseRecognizer
func (*BaseRecognizer) Action ¶
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int)
func (*BaseRecognizer) AddErrorListener ¶
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener)
func (*BaseRecognizer) GetErrorHeader ¶
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string
What is the error header, normally line/character position information?//
func (*BaseRecognizer) GetErrorListenerDispatch ¶
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener
func (*BaseRecognizer) GetLiteralNames ¶
func (b *BaseRecognizer) GetLiteralNames() []string
func (*BaseRecognizer) GetRuleIndexMap ¶
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int
Get a map from rule names to rule indexes.
<p>Used for XPath and tree pattern compilation.</p>
func (*BaseRecognizer) GetRuleNames ¶
func (b *BaseRecognizer) GetRuleNames() []string
func (*BaseRecognizer) GetState ¶
func (b *BaseRecognizer) GetState() int
func (*BaseRecognizer) GetSymbolicNames ¶
func (b *BaseRecognizer) GetSymbolicNames() []string
func (*BaseRecognizer) GetTokenErrorDisplay ¶
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string
How should a token be displayed in an error message? The default
is to display just the text, but during development you might want to have a lot of information spit out. Override in that case to use t.String() (which, for CommonToken, dumps everything about the token). This is better than forcing you to override a method in your token objects because you don't have to go modify your lexer so that it creates a NewJava type.
@deprecated This method is not called by the ANTLR 4 Runtime. Specific implementations of {@link ANTLRErrorStrategy} may provide a similar feature when necessary. For example, see {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
func (*BaseRecognizer) GetTokenNames ¶
func (b *BaseRecognizer) GetTokenNames() []string
func (*BaseRecognizer) GetTokenType ¶
func (b *BaseRecognizer) GetTokenType(tokenName string) int
func (*BaseRecognizer) Precpred ¶
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool
func (*BaseRecognizer) RemoveErrorListeners ¶
func (b *BaseRecognizer) RemoveErrorListeners()
func (*BaseRecognizer) Sempred ¶
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool
subclass needs to override these if there are sempreds or actions that the ATN interp needs to execute
func (*BaseRecognizer) SetState ¶
func (b *BaseRecognizer) SetState(v int)
type BaseRewriteOperation ¶
type BaseRewriteOperation struct {
// contains filtered or unexported fields
}
func (*BaseRewriteOperation) Execute ¶
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int
func (*BaseRewriteOperation) GetIndex ¶
func (op *BaseRewriteOperation) GetIndex() int
func (*BaseRewriteOperation) GetInstructionIndex ¶
func (op *BaseRewriteOperation) GetInstructionIndex() int
func (*BaseRewriteOperation) GetOpName ¶
func (op *BaseRewriteOperation) GetOpName() string
func (*BaseRewriteOperation) GetText ¶
func (op *BaseRewriteOperation) GetText() string
func (*BaseRewriteOperation) GetTokens ¶
func (op *BaseRewriteOperation) GetTokens() TokenStream
func (*BaseRewriteOperation) SetIndex ¶
func (op *BaseRewriteOperation) SetIndex(val int)
func (*BaseRewriteOperation) SetInstructionIndex ¶
func (op *BaseRewriteOperation) SetInstructionIndex(val int)
func (*BaseRewriteOperation) SetOpName ¶
func (op *BaseRewriteOperation) SetOpName(val string)
func (*BaseRewriteOperation) SetText ¶
func (op *BaseRewriteOperation) SetText(val string)
func (*BaseRewriteOperation) SetTokens ¶
func (op *BaseRewriteOperation) SetTokens(val TokenStream)
func (*BaseRewriteOperation) String ¶
func (op *BaseRewriteOperation) String() string
type BaseRuleContext ¶
type BaseRuleContext struct { RuleIndex int // contains filtered or unexported fields }
func NewBaseRuleContext ¶
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext
func (*BaseRuleContext) GetAltNumber ¶
func (b *BaseRuleContext) GetAltNumber() int
func (*BaseRuleContext) GetBaseRuleContext ¶
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext
func (*BaseRuleContext) GetInvokingState ¶
func (b *BaseRuleContext) GetInvokingState() int
func (*BaseRuleContext) GetParent ¶
func (b *BaseRuleContext) GetParent() Tree
func (*BaseRuleContext) GetRuleIndex ¶
func (b *BaseRuleContext) GetRuleIndex() int
func (*BaseRuleContext) IsEmpty ¶
func (b *BaseRuleContext) IsEmpty() bool
A context is empty if there is no invoking state meaning nobody call current context.
func (*BaseRuleContext) SetAltNumber ¶
func (b *BaseRuleContext) SetAltNumber(altNumber int)
func (*BaseRuleContext) SetInvokingState ¶
func (b *BaseRuleContext) SetInvokingState(t int)
func (*BaseRuleContext) SetParent ¶
func (b *BaseRuleContext) SetParent(v Tree)
type BaseSingletonPredictionContext ¶
type BaseSingletonPredictionContext struct { *BasePredictionContext // contains filtered or unexported fields }
func NewBaseSingletonPredictionContext ¶
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext
func (*BaseSingletonPredictionContext) GetParent ¶
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext
func (*BaseSingletonPredictionContext) String ¶
func (b *BaseSingletonPredictionContext) String() string
type BaseToken ¶
type BaseToken struct {
// contains filtered or unexported fields
}
func (*BaseToken) GetChannel ¶
func (*BaseToken) GetInputStream ¶
func (b *BaseToken) GetInputStream() CharStream
func (*BaseToken) GetSource ¶
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair
func (*BaseToken) GetTokenIndex ¶
func (*BaseToken) GetTokenSource ¶
func (b *BaseToken) GetTokenSource() TokenSource
func (*BaseToken) GetTokenType ¶
func (*BaseToken) SetTokenIndex ¶
type BaseTransition ¶
type BaseTransition struct {
// contains filtered or unexported fields
}
func NewBaseTransition ¶
func NewBaseTransition(target ATNState) *BaseTransition
func (*BaseTransition) Matches ¶
func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
type BasicBlockStartState ¶
type BasicBlockStartState struct {
*BaseBlockStartState
}
func NewBasicBlockStartState ¶
func NewBasicBlockStartState() *BasicBlockStartState
type BasicState ¶
type BasicState struct {
*BaseATNState
}
func NewBasicState ¶
func NewBasicState() *BasicState
type BitSet ¶
type BitSet struct {
// contains filtered or unexported fields
}
func PredictionModeGetAlts ¶
Gets the complete set of represented alternatives for a collection of alternative subsets. This method returns the union of each {@link BitSet} in {@code altsets}.
@param altsets a collection of alternative subsets @return the set of represented alternatives in {@code altsets}
func PredictionModegetConflictingAltSubsets ¶
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet
This func gets the conflicting alt subsets from a configuration set. For each configuration {@code c} in {@code configs}:
<pre> map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not alt and not pred </pre>
type BlockEndState ¶
type BlockEndState struct { *BaseATNState // contains filtered or unexported fields }
BlockEndState is a terminal node of a simple (a|b|c) block.
func NewBlockEndState ¶
func NewBlockEndState() *BlockEndState
type BlockStartState ¶
type BlockStartState interface { DecisionState // contains filtered or unexported methods }
type CharStream ¶
type CommonToken ¶
type CommonToken struct {
*BaseToken
}
func NewCommonToken ¶
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken
func (*CommonToken) GetText ¶
func (c *CommonToken) GetText() string
func (*CommonToken) SetText ¶
func (c *CommonToken) SetText(text string)
func (*CommonToken) String ¶
func (c *CommonToken) String() string
type CommonTokenFactory ¶
type CommonTokenFactory struct {
// contains filtered or unexported fields
}
CommonTokenFactory is the default TokenFactory implementation.
func NewCommonTokenFactory ¶
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory
func (*CommonTokenFactory) Create ¶
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
type CommonTokenStream ¶
type CommonTokenStream struct {
// contains filtered or unexported fields
}
CommonTokenStream is an implementation of TokenStream that loads tokens from a TokenSource on-demand and places the tokens in a buffer to provide access to any previous token by index. This token stream ignores the value of Token.getChannel. If your parser requires the token stream filter tokens to only those on a particular channel, such as Token.DEFAULT_CHANNEL or Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
func NewCommonTokenStream ¶
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream
func (*CommonTokenStream) Consume ¶
func (c *CommonTokenStream) Consume()
func (*CommonTokenStream) Fill ¶
func (c *CommonTokenStream) Fill()
Fill gets all tokens from the lexer until EOF.
func (*CommonTokenStream) Get ¶
func (c *CommonTokenStream) Get(index int) Token
func (*CommonTokenStream) GetAllText ¶
func (c *CommonTokenStream) GetAllText() string
func (*CommonTokenStream) GetAllTokens ¶
func (c *CommonTokenStream) GetAllTokens() []Token
func (*CommonTokenStream) GetHiddenTokensToLeft ¶
func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token
GetHiddenTokensToLeft collects all tokens on channel to the left of the current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is -1, it finds any non default channel token.
func (*CommonTokenStream) GetHiddenTokensToRight ¶
func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token
GetHiddenTokensToRight collects all tokens on a specified channel to the right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or EOF. If channel is -1, it finds any non-default channel token.
func (*CommonTokenStream) GetSourceName ¶
func (c *CommonTokenStream) GetSourceName() string
func (*CommonTokenStream) GetTextFromInterval ¶
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string
func (*CommonTokenStream) GetTextFromRuleContext ¶
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
func (*CommonTokenStream) GetTextFromTokens ¶
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string
func (*CommonTokenStream) GetTokenSource ¶
func (c *CommonTokenStream) GetTokenSource() TokenSource
func (*CommonTokenStream) GetTokens ¶
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token
GetTokens gets all tokens from start to stop inclusive.
func (*CommonTokenStream) Index ¶
func (c *CommonTokenStream) Index() int
func (*CommonTokenStream) LA ¶
func (c *CommonTokenStream) LA(i int) int
func (*CommonTokenStream) LB ¶
func (c *CommonTokenStream) LB(k int) Token
func (*CommonTokenStream) LT ¶
func (c *CommonTokenStream) LT(k int) Token
func (*CommonTokenStream) Mark ¶
func (c *CommonTokenStream) Mark() int
func (*CommonTokenStream) NextTokenOnChannel ¶
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int
NextTokenOnChannel returns the index of the next token on channel given a starting index. Returns i if tokens[i] is on channel. Returns -1 if there are no tokens on channel between i and EOF.
func (*CommonTokenStream) Release ¶
func (c *CommonTokenStream) Release(marker int)
func (*CommonTokenStream) Seek ¶
func (c *CommonTokenStream) Seek(index int)
func (*CommonTokenStream) SetTokenSource ¶
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource)
SetTokenSource resets the c token stream by setting its token source.
func (*CommonTokenStream) Size ¶
func (c *CommonTokenStream) Size() int
func (*CommonTokenStream) Sync ¶
func (c *CommonTokenStream) Sync(i int) bool
Sync makes sure index i in tokens has a token and returns true if a token is located at index i and otherwise false.
type ConsoleErrorListener ¶
type ConsoleErrorListener struct {
*DefaultErrorListener
}
func NewConsoleErrorListener ¶
func NewConsoleErrorListener() *ConsoleErrorListener
func (*ConsoleErrorListener) SyntaxError ¶
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
{@inheritDoc}
<p> This implementation prints messages to {@link System//err} containing the values of {@code line}, {@code charPositionInLine}, and {@code msg} using the following format.</p>
<pre> line <em>line</em>:<em>charPositionInLine</em> <em>msg</em> </pre>
type DFA ¶
type DFA struct {
// contains filtered or unexported fields
}
func NewDFA ¶
func NewDFA(atnStartState DecisionState, decision int) *DFA
func (*DFA) ToLexerString ¶
type DFASerializer ¶
type DFASerializer struct {
// contains filtered or unexported fields
}
DFASerializer is a DFA walker that knows how to dump them to serialized strings.
func NewDFASerializer ¶
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer
func (*DFASerializer) GetStateString ¶
func (d *DFASerializer) GetStateString(s *DFAState) string
func (*DFASerializer) String ¶
func (d *DFASerializer) String() string
type DFAState ¶
type DFAState struct {
// contains filtered or unexported fields
}
DFAState represents a set of possible ATN configurations. As Aho, Sethi, Ullman p. 117 says: "The DFA uses its state to keep track of all possible states the ATN can be in after reading each input symbol. That is to say, after reading input a1a2..an, the DFA is in a state that represents the subset T of the states of the ATN that are reachable from the ATN's start state along some path labeled a1a2..an." In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of states the ATN could be in. We need to track the alt predicted by each state as well, however. More importantly, we need to maintain a stack of states, tracking the closure operations as they jump from rule to rule, emulating rule invocations (method calls). I have to add a stack to simulate the proper lookahead sequences for the underlying LL grammar from which the ATN was derived.
I use a set of ATNConfig objects, not simple states. An ATNConfig is both a state (ala normal conversion) and a RuleContext describing the chain of rules (if any) followed to arrive at that state.
A DFAState may have multiple references to a particular state, but with different ATN contexts (with same or different alts) meaning that state was reached via a different set of rule invocations.
func NewDFAState ¶
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState
type DecisionState ¶
type DecisionState interface { ATNState // contains filtered or unexported methods }
type DefaultErrorListener ¶
type DefaultErrorListener struct { }
func NewDefaultErrorListener ¶
func NewDefaultErrorListener() *DefaultErrorListener
func (*DefaultErrorListener) ReportAmbiguity ¶
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
func (*DefaultErrorListener) ReportAttemptingFullContext ¶
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
func (*DefaultErrorListener) ReportContextSensitivity ¶
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
func (*DefaultErrorListener) SyntaxError ¶
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
type DefaultErrorStrategy ¶
type DefaultErrorStrategy struct {
// contains filtered or unexported fields
}
This is the default implementation of {@link ANTLRErrorStrategy} used for error Reporting and recovery in ANTLR parsers.
func NewDefaultErrorStrategy ¶
func NewDefaultErrorStrategy() *DefaultErrorStrategy
func (*DefaultErrorStrategy) GetExpectedTokens ¶
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
func (*DefaultErrorStrategy) GetMissingSymbol ¶
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token
Conjure up a missing token during error recovery.
The recognizer attempts to recover from single missing symbols. But, actions might refer to that missing symbol. For example, x=ID {f($x)}. The action clearly assumes that there has been an identifier Matched previously and that $x points at that token. If that token is missing, but the next token in the stream is what we want we assume that d token is missing and we keep going. Because we have to return some token to replace the missing token, we have to conjure one up. This method gives the user control over the tokens returned for missing tokens. Mostly, you will want to create something special for identifier tokens. For literals such as '{' and ',', the default action in the parser or tree parser works. It simply creates a CommonToken of the appropriate type. The text will be the token. If you change what tokens must be created by the lexer, override d method to create the appropriate tokens.
func (*DefaultErrorStrategy) GetTokenErrorDisplay ¶
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string
How should a token be displayed in an error message? The default is to display just the text, but during development you might want to have a lot of information spit out. Override in that case to use t.String() (which, for CommonToken, dumps everything about the token). This is better than forcing you to override a method in your token objects because you don't have to go modify your lexer so that it creates a NewJava type.
func (*DefaultErrorStrategy) InErrorRecoveryMode ¶
func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool
func (*DefaultErrorStrategy) Recover ¶
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException)
{@inheritDoc}
<p>The default implementation reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set--loosely the set of tokens that can follow the current rule.</p>
func (*DefaultErrorStrategy) RecoverInline ¶
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token
<p>The default implementation attempts to recover from the mismatched input by using single token insertion and deletion as described below. If the recovery attempt fails, d method panics an {@link InputMisMatchException}.</p>
<p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
<p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the right token, however, then assume {@code LA(1)} is some extra spurious token and delete it. Then consume and return the next token (which was the {@code LA(2)} token) as the successful result of the Match operation.</p>
<p>This recovery strategy is implemented by {@link //singleTokenDeletion}.</p>
<p><strong>MISSING TOKEN</strong> (single token insertion)</p>
<p>If current token (at {@code LA(1)}) is consistent with what could come after the expected {@code LA(1)} token, then assume the token is missing and use the parser's {@link TokenFactory} to create it on the fly. The "insertion" is performed by returning the created token as the successful result of the Match operation.</p>
<p>This recovery strategy is implemented by {@link //singleTokenInsertion}.</p>
<p><strong>EXAMPLE</strong></p>
<p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When the parser returns from the nested call to {@code expr}, it will have call chain:</p>
<pre> stat &rarr expr &rarr atom </pre>
and it will be trying to Match the {@code ')'} at d point in the derivation:
<pre> => ID '=' '(' INT ')' ('+' atom)* ” ^ </pre>
The attempt to Match {@code ')'} will fail when it sees {@code ”} and call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} is in the set of tokens that can follow the {@code ')'} token reference in rule {@code atom}. It can assume that you forgot the {@code ')'}.
func (*DefaultErrorStrategy) ReportError ¶
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException)
{@inheritDoc}
<p>The default implementation returns immediately if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} and dispatches the Reporting task based on the runtime type of {@code e} according to the following table.</p>
<ul> <li>{@link NoViableAltException}: Dispatches the call to {@link //ReportNoViableAlternative}</li> <li>{@link InputMisMatchException}: Dispatches the call to {@link //ReportInputMisMatch}</li> <li>{@link FailedPredicateException}: Dispatches the call to {@link //ReportFailedPredicate}</li> <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report the exception</li> </ul>
func (*DefaultErrorStrategy) ReportFailedPredicate ¶
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException)
This is called by {@link //ReportError} when the exception is a {@link FailedPredicateException}.
@see //ReportError
@param recognizer the parser instance @param e the recognition exception
func (*DefaultErrorStrategy) ReportInputMisMatch ¶
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException)
This is called by {@link //ReportError} when the exception is an {@link InputMisMatchException}.
@see //ReportError
@param recognizer the parser instance @param e the recognition exception
func (*DefaultErrorStrategy) ReportMatch ¶
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser)
{@inheritDoc}
<p>The default implementation simply calls {@link //endErrorCondition}.</p>
func (*DefaultErrorStrategy) ReportMissingToken ¶
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser)
This method is called to Report a syntax error which requires the insertion of a missing token into the input stream. At the time d method is called, the missing token has not yet been inserted. When d method returns, {@code recognizer} is in error recovery mode.
<p>This method is called when {@link //singleTokenInsertion} identifies single-token insertion as a viable recovery strategy for a mismatched input error.</p>
<p>The default implementation simply returns if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to enter error recovery mode, followed by calling {@link Parser//NotifyErrorListeners}.</p>
@param recognizer the parser instance
func (*DefaultErrorStrategy) ReportNoViableAlternative ¶
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException)
This is called by {@link //ReportError} when the exception is a {@link NoViableAltException}.
@see //ReportError
@param recognizer the parser instance @param e the recognition exception
func (*DefaultErrorStrategy) ReportUnwantedToken ¶
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser)
This method is called to Report a syntax error which requires the removal of a token from the input stream. At the time d method is called, the erroneous symbol is current {@code LT(1)} symbol and has not yet been removed from the input stream. When d method returns, {@code recognizer} is in error recovery mode.
<p>This method is called when {@link //singleTokenDeletion} identifies single-token deletion as a viable recovery strategy for a mismatched input error.</p>
<p>The default implementation simply returns if the handler is already in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to enter error recovery mode, followed by calling {@link Parser//NotifyErrorListeners}.</p>
@param recognizer the parser instance
func (*DefaultErrorStrategy) SingleTokenDeletion ¶
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token
This method implements the single-token deletion inline error recovery strategy. It is called by {@link //recoverInline} to attempt to recover from mismatched input. If this method returns nil, the parser and error handler state will not have changed. If this method returns non-nil, {@code recognizer} will <em>not</em> be in error recovery mode since the returned token was a successful Match.
<p>If the single-token deletion is successful, d method calls {@link //ReportUnwantedToken} to Report the error, followed by {@link Parser//consume} to actually "delete" the extraneous token. Then, before returning {@link //ReportMatch} is called to signal a successful Match.</p>
@param recognizer the parser instance @return the successfully Matched {@link Token} instance if single-token deletion successfully recovers from the mismatched input, otherwise {@code nil}
func (*DefaultErrorStrategy) SingleTokenInsertion ¶
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool
This method implements the single-token insertion inline error recovery strategy. It is called by {@link //recoverInline} if the single-token deletion strategy fails to recover from the mismatched input. If this method returns {@code true}, {@code recognizer} will be in error recovery mode.
<p>This method determines whether or not single-token insertion is viable by checking if the {@code LA(1)} input symbol could be successfully Matched if it were instead the {@code LA(2)} symbol. If d method returns {@code true}, the caller is responsible for creating and inserting a token with the correct type to produce d behavior.</p>
@param recognizer the parser instance @return {@code true} if single-token insertion is a viable recovery strategy for the current mismatched input, otherwise {@code false}
func (*DefaultErrorStrategy) Sync ¶
func (d *DefaultErrorStrategy) Sync(recognizer Parser)
The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure that the current lookahead symbol is consistent with what were expecting at d point in the ATN. You can call d anytime but ANTLR only generates code to check before subrules/loops and each iteration.
<p>Implements Jim Idle's magic Sync mechanism in closures and optional subrules. E.g.,</p>
<pre> a : Sync ( stuff Sync )* Sync : {consume to what can follow Sync} </pre>
At the start of a sub rule upon error, {@link //Sync} performs single token deletion, if possible. If it can't do that, it bails on the current rule and uses the default error recovery, which consumes until the reSynchronization set of the current rule.
<p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block with an empty alternative), then the expected set includes what follows the subrule.</p>
<p>During loop iteration, it consumes until it sees a token that can start a sub rule or what follows loop. Yes, that is pretty aggressive. We opt to stay in the loop as long as possible.</p>
<p><strong>ORIGINS</strong></p>
<p>Previous versions of ANTLR did a poor job of their recovery within loops. A single mismatch token or missing token would force the parser to bail out of the entire rules surrounding the loop. So, for rule</p>
<pre> classfunc : 'class' ID '{' member* '}' </pre>
input with an extra token between members would force the parser to consume until it found the next class definition rather than the next member definition of the current class.
<p>This functionality cost a little bit of effort because the parser has to compare token set at the start of the loop and at each iteration. If for some reason speed is suffering for you, you can turn off d functionality by simply overriding d method as a blank { }.</p>
type DiagnosticErrorListener ¶
type DiagnosticErrorListener struct { *DefaultErrorListener // contains filtered or unexported fields }
func NewDiagnosticErrorListener ¶
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener
func (*DiagnosticErrorListener) ReportAmbiguity ¶
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
func (*DiagnosticErrorListener) ReportAttemptingFullContext ¶
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
func (*DiagnosticErrorListener) ReportContextSensitivity ¶
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
type DoubleDict ¶
type DoubleDict struct {
// contains filtered or unexported fields
}
func NewDoubleDict ¶
func NewDoubleDict() *DoubleDict
func (*DoubleDict) Get ¶
func (d *DoubleDict) Get(a, b int) interface{}
type EmptyPredictionContext ¶
type EmptyPredictionContext struct {
*BaseSingletonPredictionContext
}
func NewEmptyPredictionContext ¶
func NewEmptyPredictionContext() *EmptyPredictionContext
func (*EmptyPredictionContext) GetParent ¶
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext
func (*EmptyPredictionContext) String ¶
func (e *EmptyPredictionContext) String() string
type EpsilonTransition ¶
type EpsilonTransition struct { *BaseTransition // contains filtered or unexported fields }
func NewEpsilonTransition ¶
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition
func (*EpsilonTransition) Matches ¶
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*EpsilonTransition) String ¶
func (t *EpsilonTransition) String() string
type ErrorListener ¶
type ErrorListener interface { SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) }
type ErrorNode ¶
type ErrorNode interface { TerminalNode // contains filtered or unexported methods }
type ErrorNodeImpl ¶
type ErrorNodeImpl struct {
*TerminalNodeImpl
}
func NewErrorNodeImpl ¶
func NewErrorNodeImpl(token Token) *ErrorNodeImpl
func (*ErrorNodeImpl) Accept ¶
func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{}
type ErrorStrategy ¶
type ErrorStrategy interface { RecoverInline(Parser) Token Recover(Parser, RecognitionException) Sync(Parser) InErrorRecoveryMode(Parser) bool ReportError(Parser, RecognitionException) ReportMatch(Parser) // contains filtered or unexported methods }
type FailedPredicateException ¶
type FailedPredicateException struct { *BaseRecognitionException // contains filtered or unexported fields }
func NewFailedPredicateException ¶
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException
type FileStream ¶
type FileStream struct { *InputStream // contains filtered or unexported fields }
func NewFileStream ¶
func NewFileStream(fileName string) (*FileStream, error)
func (*FileStream) GetSourceName ¶
func (f *FileStream) GetSourceName() string
type IATNSimulator ¶
type ILexerATNSimulator ¶
type ILexerATNSimulator interface { IATNSimulator Match(input CharStream, mode int) int GetCharPositionInLine() int GetLine() int GetText(input CharStream) string Consume(input CharStream) // contains filtered or unexported methods }
type InputMisMatchException ¶
type InputMisMatchException struct {
*BaseRecognitionException
}
func NewInputMisMatchException ¶
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException
This signifies any kind of mismatched input exceptions such as when the current input does not Match the expected token.
type InputStream ¶
type InputStream struct {
// contains filtered or unexported fields
}
func NewInputStream ¶
func NewInputStream(data string) *InputStream
func (*InputStream) Consume ¶
func (is *InputStream) Consume()
func (*InputStream) GetSourceName ¶
func (*InputStream) GetSourceName() string
func (*InputStream) GetTextFromInterval ¶
func (is *InputStream) GetTextFromInterval(i *Interval) string
func (*InputStream) GetTextFromTokens ¶
func (is *InputStream) GetTextFromTokens(start, stop Token) string
func (*InputStream) Index ¶
func (is *InputStream) Index() int
func (*InputStream) LA ¶
func (is *InputStream) LA(offset int) int
func (*InputStream) LT ¶
func (is *InputStream) LT(offset int) int
func (*InputStream) Mark ¶
func (is *InputStream) Mark() int
mark/release do nothing we have entire buffer
func (*InputStream) Release ¶
func (is *InputStream) Release(marker int)
func (*InputStream) Seek ¶
func (is *InputStream) Seek(index int)
func (*InputStream) Size ¶
func (is *InputStream) Size() int
func (*InputStream) String ¶
func (is *InputStream) String() string
type InsertAfterOp ¶
type InsertAfterOp struct {
BaseRewriteOperation
}
func NewInsertAfterOp ¶
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp
func (*InsertAfterOp) String ¶
func (op *InsertAfterOp) String() string
type InsertBeforeOp ¶
type InsertBeforeOp struct {
BaseRewriteOperation
}
func NewInsertBeforeOp ¶
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp
func (*InsertBeforeOp) String ¶
func (op *InsertBeforeOp) String() string
type InterpreterRuleContext ¶
type InterpreterRuleContext interface { ParserRuleContext }
type IntervalSet ¶
type IntervalSet struct {
// contains filtered or unexported fields
}
func NewIntervalSet ¶
func NewIntervalSet() *IntervalSet
func (*IntervalSet) String ¶
func (i *IntervalSet) String() string
func (*IntervalSet) StringVerbose ¶
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string
type LL1Analyzer ¶
type LL1Analyzer struct {
// contains filtered or unexported fields
}
func NewLL1Analyzer ¶
func NewLL1Analyzer(atn *ATN) *LL1Analyzer
func (*LL1Analyzer) Look ¶
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
* Compute set of tokens that can follow {@code s} in the ATN in the specified {@code ctx}.
<p>If {@code ctx} is {@code nil} and the end of the rule containing {@code s} is reached, {@link Token//EPSILON} is added to the result set. If {@code ctx} is not {@code nil} and the end of the outermost rule is reached, {@link Token//EOF} is added to the result set.</p>
@param s the ATN state @param stopState the ATN state to stop at. This can be a {@link BlockEndState} to detect epsilon paths through a closure. @param ctx the complete parser context, or {@code nil} if the context should be ignored
@return The set of tokens that can follow {@code s} in the ATN in the specified {@code ctx}. /
type Lexer ¶
type Lexer interface { TokenSource Recognizer Emit() Token SetChannel(int) PushMode(int) PopMode() int SetType(int) SetMode(int) }
type LexerATNConfig ¶
type LexerATNConfig struct { *BaseATNConfig // contains filtered or unexported fields }
func NewLexerATNConfig1 ¶
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig
func NewLexerATNConfig2 ¶
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig
func NewLexerATNConfig3 ¶
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig
func NewLexerATNConfig4 ¶
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig
func NewLexerATNConfig5 ¶
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig
func NewLexerATNConfig6 ¶
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig
type LexerATNSimulator ¶
type LexerATNSimulator struct { *BaseATNSimulator Line int CharPositionInLine int MatchCalls int // contains filtered or unexported fields }
func NewLexerATNSimulator ¶
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator
func (*LexerATNSimulator) Consume ¶
func (l *LexerATNSimulator) Consume(input CharStream)
func (*LexerATNSimulator) GetCharPositionInLine ¶
func (l *LexerATNSimulator) GetCharPositionInLine() int
func (*LexerATNSimulator) GetLine ¶
func (l *LexerATNSimulator) GetLine() int
func (*LexerATNSimulator) GetText ¶
func (l *LexerATNSimulator) GetText(input CharStream) string
Get the text Matched so far for the current token.
func (*LexerATNSimulator) GetTokenName ¶
func (l *LexerATNSimulator) GetTokenName(tt int) string
func (*LexerATNSimulator) Match ¶
func (l *LexerATNSimulator) Match(input CharStream, mode int) int
func (*LexerATNSimulator) MatchATN ¶
func (l *LexerATNSimulator) MatchATN(input CharStream) int
type LexerAction ¶
type LexerAction interface {
// contains filtered or unexported methods
}
type LexerActionExecutor ¶
type LexerActionExecutor struct {
// contains filtered or unexported fields
}
func LexerActionExecutorappend ¶
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor
Creates a {@link LexerActionExecutor} which executes the actions for the input {@code lexerActionExecutor} followed by a specified {@code lexerAction}.
@param lexerActionExecutor The executor for actions already traversed by the lexer while Matching a token within a particular {@link LexerATNConfig}. If this is {@code nil}, the method behaves as though it were an empty executor. @param lexerAction The lexer action to execute after the actions specified in {@code lexerActionExecutor}.
@return A {@link LexerActionExecutor} for executing the combine actions of {@code lexerActionExecutor} and {@code lexerAction}.
func NewLexerActionExecutor ¶
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor
type LexerChannelAction ¶
type LexerChannelAction struct { *BaseLexerAction // contains filtered or unexported fields }
Implements the {@code channel} lexer action by calling {@link Lexer//setChannel} with the assigned channel. Constructs a New{@code channel} action with the specified channel value. @param channel The channel value to pass to {@link Lexer//setChannel}.
func NewLexerChannelAction ¶
func NewLexerChannelAction(channel int) *LexerChannelAction
func (*LexerChannelAction) String ¶
func (l *LexerChannelAction) String() string
type LexerCustomAction ¶
type LexerCustomAction struct { *BaseLexerAction // contains filtered or unexported fields }
func NewLexerCustomAction ¶
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction
type LexerDFASerializer ¶
type LexerDFASerializer struct {
*DFASerializer
}
func NewLexerDFASerializer ¶
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer
func (*LexerDFASerializer) String ¶
func (l *LexerDFASerializer) String() string
type LexerIndexedCustomAction ¶
type LexerIndexedCustomAction struct { *BaseLexerAction // contains filtered or unexported fields }
Constructs a Newindexed custom action by associating a character offset with a {@link LexerAction}.
<p>Note: This class is only required for lexer actions for which {@link LexerAction//isPositionDependent} returns {@code true}.</p>
@param offset The offset into the input {@link CharStream}, relative to the token start index, at which the specified lexer action should be executed. @param action The lexer action to execute at a particular offset in the input {@link CharStream}.
func NewLexerIndexedCustomAction ¶
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction
type LexerModeAction ¶
type LexerModeAction struct { *BaseLexerAction // contains filtered or unexported fields }
Implements the {@code mode} lexer action by calling {@link Lexer//mode} with the assigned mode.
func NewLexerModeAction ¶
func NewLexerModeAction(mode int) *LexerModeAction
func (*LexerModeAction) String ¶
func (l *LexerModeAction) String() string
type LexerMoreAction ¶
type LexerMoreAction struct {
*BaseLexerAction
}
func NewLexerMoreAction ¶
func NewLexerMoreAction() *LexerMoreAction
func (*LexerMoreAction) String ¶
func (l *LexerMoreAction) String() string
type LexerNoViableAltException ¶
type LexerNoViableAltException struct { *BaseRecognitionException // contains filtered or unexported fields }
func NewLexerNoViableAltException ¶
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException
func (*LexerNoViableAltException) String ¶
func (l *LexerNoViableAltException) String() string
type LexerPopModeAction ¶
type LexerPopModeAction struct {
*BaseLexerAction
}
Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
<p>The {@code popMode} command does not have any parameters, so l action is implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
func NewLexerPopModeAction ¶
func NewLexerPopModeAction() *LexerPopModeAction
func (*LexerPopModeAction) String ¶
func (l *LexerPopModeAction) String() string
type LexerPushModeAction ¶
type LexerPushModeAction struct { *BaseLexerAction // contains filtered or unexported fields }
Implements the {@code pushMode} lexer action by calling {@link Lexer//pushMode} with the assigned mode.
func NewLexerPushModeAction ¶
func NewLexerPushModeAction(mode int) *LexerPushModeAction
func (*LexerPushModeAction) String ¶
func (l *LexerPushModeAction) String() string
type LexerSkipAction ¶
type LexerSkipAction struct {
*BaseLexerAction
}
Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
<p>The {@code Skip} command does not have any parameters, so l action is implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
func NewLexerSkipAction ¶
func NewLexerSkipAction() *LexerSkipAction
func (*LexerSkipAction) String ¶
func (l *LexerSkipAction) String() string
type LexerTypeAction ¶
type LexerTypeAction struct { *BaseLexerAction // contains filtered or unexported fields }
Implements the {@code type} lexer action by calling {@link Lexer//setType}
with the assigned type.
func NewLexerTypeAction ¶
func NewLexerTypeAction(thetype int) *LexerTypeAction
func (*LexerTypeAction) String ¶
func (l *LexerTypeAction) String() string
type LoopEndState ¶
type LoopEndState struct { *BaseATNState // contains filtered or unexported fields }
LoopEndState marks the end of a * or + loop.
func NewLoopEndState ¶
func NewLoopEndState() *LoopEndState
type NoViableAltException ¶
type NoViableAltException struct { *BaseRecognitionException // contains filtered or unexported fields }
func NewNoViableAltException ¶
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException
Indicates that the parser could not decide which of two or more paths to take based upon the remaining input. It tracks the starting token of the offending input and also knows where the parser was in the various paths when the error. Reported by ReportNoViableAlternative()
type NotSetTransition ¶
type NotSetTransition struct {
*SetTransition
}
func NewNotSetTransition ¶
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition
func (*NotSetTransition) Matches ¶
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*NotSetTransition) String ¶
func (t *NotSetTransition) String() string
type OR ¶
type OR struct {
// contains filtered or unexported fields
}
func NewOR ¶
func NewOR(a, b SemanticContext) *OR
type OrderedATNConfigSet ¶
type OrderedATNConfigSet struct {
*BaseATNConfigSet
}
func NewOrderedATNConfigSet ¶
func NewOrderedATNConfigSet() *OrderedATNConfigSet
type ParseCancellationException ¶
type ParseCancellationException struct { }
func NewParseCancellationException ¶
func NewParseCancellationException() *ParseCancellationException
type ParseTree ¶
type ParseTree interface { SyntaxTree Accept(Visitor ParseTreeVisitor) interface{} GetText() string ToStringTree([]string, Recognizer) string }
func TreesDescendants ¶
func TreesFindAllTokenNodes ¶
func TreesfindAllNodes ¶
func TreesfindAllRuleNodes ¶
type ParseTreeListener ¶
type ParseTreeListener interface { VisitTerminal(node TerminalNode) VisitErrorNode(node ErrorNode) EnterEveryRule(ctx ParserRuleContext) ExitEveryRule(ctx ParserRuleContext) }
type ParseTreeVisitor ¶
type ParseTreeVisitor interface { Visit(tree ParseTree) interface{} VisitChildren(node RuleNode) interface{} VisitTerminal(node TerminalNode) interface{} VisitErrorNode(node ErrorNode) interface{} }
type ParseTreeWalker ¶
type ParseTreeWalker struct { }
func NewParseTreeWalker ¶
func NewParseTreeWalker() *ParseTreeWalker
func (*ParseTreeWalker) EnterRule ¶
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode)
Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} then by triggering the event specific to the given parse tree node
func (*ParseTreeWalker) ExitRule ¶
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode)
Exits a grammar rule by first triggering the event specific to the given parse tree node then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
func (*ParseTreeWalker) Walk ¶
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree)
Performs a walk on the given parse tree starting at the root and going down recursively with depth-first search. On each node, EnterRule is called before recursively walking down into child nodes, then ExitRule is called after the recursive call to wind up.
type Parser ¶
type Parser interface { Recognizer GetInterpreter() *ParserATNSimulator GetTokenStream() TokenStream GetTokenFactory() TokenFactory GetParserRuleContext() ParserRuleContext SetParserRuleContext(ParserRuleContext) Consume() Token GetParseListeners() []ParseTreeListener GetErrorHandler() ErrorStrategy SetErrorHandler(ErrorStrategy) GetInputStream() IntStream GetCurrentToken() Token GetExpectedTokens() *IntervalSet NotifyErrorListeners(string, Token, RecognitionException) IsExpectedToken(int) bool GetPrecedence() int GetRuleInvocationStack(ParserRuleContext) []string }
type ParserATNSimulator ¶
type ParserATNSimulator struct { *BaseATNSimulator // contains filtered or unexported fields }
func NewParserATNSimulator ¶
func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator
func (*ParserATNSimulator) AdaptivePredict ¶
func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int
func (*ParserATNSimulator) GetAltThatFinishedDecisionEntryRule ¶
func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int
func (*ParserATNSimulator) GetPredictionMode ¶
func (p *ParserATNSimulator) GetPredictionMode() int
func (*ParserATNSimulator) GetTokenName ¶
func (p *ParserATNSimulator) GetTokenName(t int) string
func (*ParserATNSimulator) ReportAmbiguity ¶
func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
If context sensitive parsing, we know it's ambiguity not conflict//
func (*ParserATNSimulator) ReportAttemptingFullContext ¶
func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int)
func (*ParserATNSimulator) ReportContextSensitivity ¶
func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int)
func (*ParserATNSimulator) SetPredictionMode ¶
func (p *ParserATNSimulator) SetPredictionMode(v int)
type ParserRuleContext ¶
type ParserRuleContext interface { RuleContext SetException(RecognitionException) AddTokenNode(token Token) *TerminalNodeImpl AddErrorNode(badToken Token) *ErrorNodeImpl EnterRule(listener ParseTreeListener) ExitRule(listener ParseTreeListener) SetStart(Token) GetStart() Token SetStop(Token) GetStop() Token AddChild(child RuleContext) RuleContext RemoveLastChild() }
type PlusBlockStartState ¶
type PlusBlockStartState struct { *BaseBlockStartState // contains filtered or unexported fields }
PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a decision state; we don't use it for code generation. Somebody might need it, it is included for completeness. In reality, PlusLoopbackState is the real decision-making node for A+.
func NewPlusBlockStartState ¶
func NewPlusBlockStartState() *PlusBlockStartState
type PlusLoopbackState ¶
type PlusLoopbackState struct {
*BaseDecisionState
}
PlusLoopbackState is a decision state for A+ and (A|B)+. It has two transitions: one to the loop back to start of the block, and one to exit.
func NewPlusLoopbackState ¶
func NewPlusLoopbackState() *PlusLoopbackState
type PrecedencePredicate ¶
type PrecedencePredicate struct {
// contains filtered or unexported fields
}
func NewPrecedencePredicate ¶
func NewPrecedencePredicate(precedence int) *PrecedencePredicate
func PrecedencePredicatefilterPrecedencePredicates ¶
func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate
func (*PrecedencePredicate) String ¶
func (p *PrecedencePredicate) String() string
type PrecedencePredicateTransition ¶
type PrecedencePredicateTransition struct { *BaseAbstractPredicateTransition // contains filtered or unexported fields }
func NewPrecedencePredicateTransition ¶
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition
func (*PrecedencePredicateTransition) Matches ¶
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*PrecedencePredicateTransition) String ¶
func (t *PrecedencePredicateTransition) String() string
type PredPrediction ¶
type PredPrediction struct {
// contains filtered or unexported fields
}
PredPrediction maps a predicate to a predicted alternative.
func NewPredPrediction ¶
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction
func (*PredPrediction) String ¶
func (p *PredPrediction) String() string
type Predicate ¶
type Predicate struct {
// contains filtered or unexported fields
}
func NewPredicate ¶
type PredicateTransition ¶
type PredicateTransition struct { *BaseAbstractPredicateTransition // contains filtered or unexported fields }
func NewPredicateTransition ¶
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition
func (*PredicateTransition) Matches ¶
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*PredicateTransition) String ¶
func (t *PredicateTransition) String() string
type PredictionContext ¶
type PredictionContext interface { GetParent(int) PredictionContext String() string // contains filtered or unexported methods }
func SingletonBasePredictionContextCreate ¶
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext
type PredictionContextCache ¶
type PredictionContextCache struct {
// contains filtered or unexported fields
}
func NewPredictionContextCache ¶
func NewPredictionContextCache() *PredictionContextCache
func (*PredictionContextCache) Get ¶
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext
type ProxyErrorListener ¶
type ProxyErrorListener struct { *DefaultErrorListener // contains filtered or unexported fields }
func NewProxyErrorListener ¶
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener
func (*ProxyErrorListener) ReportAmbiguity ¶
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
func (*ProxyErrorListener) ReportAttemptingFullContext ¶
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
func (*ProxyErrorListener) ReportContextSensitivity ¶
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
func (*ProxyErrorListener) SyntaxError ¶
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
type RangeTransition ¶
type RangeTransition struct { *BaseTransition // contains filtered or unexported fields }
func NewRangeTransition ¶
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition
func (*RangeTransition) Matches ¶
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*RangeTransition) String ¶
func (t *RangeTransition) String() string
type RecognitionException ¶
type Recognizer ¶
type Recognizer interface { GetLiteralNames() []string GetSymbolicNames() []string GetRuleNames() []string Sempred(RuleContext, int, int) bool Precpred(RuleContext, int) bool GetState() int SetState(int) Action(RuleContext, int, int) AddErrorListener(ErrorListener) RemoveErrorListeners() GetATN() *ATN GetErrorListenerDispatch() ErrorListener }
type ReplaceOp ¶
type ReplaceOp struct { BaseRewriteOperation LastIndex int }
I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp instructions.
func NewReplaceOp ¶
func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp
type RewriteOperation ¶
type RewriteOperation interface { // Execute the rewrite operation by possibly adding to the buffer. // Return the index of the next token to operate on. Execute(buffer *bytes.Buffer) int String() string GetInstructionIndex() int GetIndex() int GetText() string GetOpName() string GetTokens() TokenStream SetInstructionIndex(val int) SetIndex(int) SetText(string) SetOpName(string) SetTokens(TokenStream) }
type RuleContext ¶
type RuleNode ¶
type RuleNode interface { ParseTree GetRuleContext() RuleContext GetBaseRuleContext() *BaseRuleContext }
type RuleStartState ¶
type RuleStartState struct { *BaseATNState // contains filtered or unexported fields }
func NewRuleStartState ¶
func NewRuleStartState() *RuleStartState
type RuleStopState ¶
type RuleStopState struct {
*BaseATNState
}
RuleStopState is the last node in the ATN for a rule, unless that rule is the start symbol. In that case, there is one transition to EOF. Later, we might encode references to all calls to this rule to compute FOLLOW sets for error handling.
func NewRuleStopState ¶
func NewRuleStopState() *RuleStopState
type RuleTransition ¶
type RuleTransition struct { *BaseTransition // contains filtered or unexported fields }
func NewRuleTransition ¶
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition
func (*RuleTransition) Matches ¶
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
type SemanticContext ¶
type SemanticContext interface { String() string // contains filtered or unexported methods }
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
func SemanticContextandContext ¶
func SemanticContextandContext(a, b SemanticContext) SemanticContext
func SemanticContextorContext ¶
func SemanticContextorContext(a, b SemanticContext) SemanticContext
type SetTransition ¶
type SetTransition struct {
*BaseTransition
}
func NewSetTransition ¶
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition
func (*SetTransition) Matches ¶
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*SetTransition) String ¶
func (t *SetTransition) String() string
type SimState ¶
type SimState struct {
// contains filtered or unexported fields
}
func NewSimState ¶
func NewSimState() *SimState
type SingletonPredictionContext ¶
type SingletonPredictionContext interface { PredictionContext }
type StarBlockStartState ¶
type StarBlockStartState struct {
*BaseBlockStartState
}
StarBlockStartState is the block that begins a closure loop.
func NewStarBlockStartState ¶
func NewStarBlockStartState() *StarBlockStartState
type StarLoopEntryState ¶
type StarLoopEntryState struct { *BaseDecisionState // contains filtered or unexported fields }
func NewStarLoopEntryState ¶
func NewStarLoopEntryState() *StarLoopEntryState
type StarLoopbackState ¶
type StarLoopbackState struct {
*BaseATNState
}
func NewStarLoopbackState ¶
func NewStarLoopbackState() *StarLoopbackState
type SyntaxTree ¶
type TerminalNode ¶
type TerminalNodeImpl ¶
type TerminalNodeImpl struct {
// contains filtered or unexported fields
}
func NewTerminalNodeImpl ¶
func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl
func (*TerminalNodeImpl) Accept ¶
func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{}
func (*TerminalNodeImpl) GetChild ¶
func (t *TerminalNodeImpl) GetChild(i int) Tree
func (*TerminalNodeImpl) GetChildCount ¶
func (t *TerminalNodeImpl) GetChildCount() int
func (*TerminalNodeImpl) GetChildren ¶
func (t *TerminalNodeImpl) GetChildren() []Tree
func (*TerminalNodeImpl) GetParent ¶
func (t *TerminalNodeImpl) GetParent() Tree
func (*TerminalNodeImpl) GetPayload ¶
func (t *TerminalNodeImpl) GetPayload() interface{}
func (*TerminalNodeImpl) GetSourceInterval ¶
func (t *TerminalNodeImpl) GetSourceInterval() *Interval
func (*TerminalNodeImpl) GetSymbol ¶
func (t *TerminalNodeImpl) GetSymbol() Token
func (*TerminalNodeImpl) GetText ¶
func (t *TerminalNodeImpl) GetText() string
func (*TerminalNodeImpl) SetChildren ¶
func (t *TerminalNodeImpl) SetChildren(tree []Tree)
func (*TerminalNodeImpl) SetParent ¶
func (t *TerminalNodeImpl) SetParent(tree Tree)
func (*TerminalNodeImpl) String ¶
func (t *TerminalNodeImpl) String() string
func (*TerminalNodeImpl) ToStringTree ¶
func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string
type Token ¶
type Token interface { GetSource() *TokenSourceCharStreamPair GetTokenType() int GetChannel() int GetStart() int GetStop() int GetLine() int GetColumn() int GetText() string SetText(s string) GetTokenIndex() int SetTokenIndex(v int) GetTokenSource() TokenSource GetInputStream() CharStream }
type TokenFactory ¶
type TokenFactory interface {
Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
}
TokenFactory creates CommonToken objects.
type TokenSource ¶
type TokenSource interface { NextToken() Token Skip() More() GetLine() int GetCharPositionInLine() int GetInputStream() CharStream GetSourceName() string GetTokenFactory() TokenFactory // contains filtered or unexported methods }
type TokenSourceCharStreamPair ¶
type TokenSourceCharStreamPair struct {
// contains filtered or unexported fields
}
type TokenStream ¶
type TokenStream interface { IntStream LT(k int) Token Get(index int) Token GetTokenSource() TokenSource SetTokenSource(TokenSource) GetAllText() string GetTextFromInterval(*Interval) string GetTextFromRuleContext(RuleContext) string GetTextFromTokens(Token, Token) string }
type TokenStreamRewriter ¶
type TokenStreamRewriter struct {
// contains filtered or unexported fields
}
func NewTokenStreamRewriter ¶
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter
func (*TokenStreamRewriter) AddToProgram ¶
func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation)
func (*TokenStreamRewriter) Delete ¶
func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int)
func (*TokenStreamRewriter) DeleteDefault ¶
func (tsr *TokenStreamRewriter) DeleteDefault(from, to int)
func (*TokenStreamRewriter) DeleteDefaultPos ¶
func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int)
func (*TokenStreamRewriter) DeleteProgram ¶
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string)
Reset the program so that no instructions exist
func (*TokenStreamRewriter) DeleteProgramDefault ¶
func (tsr *TokenStreamRewriter) DeleteProgramDefault()
func (*TokenStreamRewriter) DeleteToken ¶
func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token)
func (*TokenStreamRewriter) DeleteTokenDefault ¶
func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token)
func (*TokenStreamRewriter) GetLastRewriteTokenIndex ¶
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int
func (*TokenStreamRewriter) GetLastRewriteTokenIndexDefault ¶
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int
func (*TokenStreamRewriter) GetProgram ¶
func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation
func (*TokenStreamRewriter) GetText ¶
func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string
Return the text from the original tokens altered per the instructions given to this rewriter.
func (*TokenStreamRewriter) GetTextDefault ¶
func (tsr *TokenStreamRewriter) GetTextDefault() string
Return the text from the original tokens altered per the instructions given to this rewriter.
func (*TokenStreamRewriter) GetTokenStream ¶
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream
func (*TokenStreamRewriter) InitializeProgram ¶
func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation
func (*TokenStreamRewriter) InsertAfter ¶
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string)
func (*TokenStreamRewriter) InsertAfterDefault ¶
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string)
func (*TokenStreamRewriter) InsertAfterToken ¶
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string)
func (*TokenStreamRewriter) InsertBefore ¶
func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string)
func (*TokenStreamRewriter) InsertBeforeDefault ¶
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string)
func (*TokenStreamRewriter) InsertBeforeToken ¶
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string)
func (*TokenStreamRewriter) Replace ¶
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string)
func (*TokenStreamRewriter) ReplaceDefault ¶
func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string)
func (*TokenStreamRewriter) ReplaceDefaultPos ¶
func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string)
func (*TokenStreamRewriter) ReplaceToken ¶
func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string)
func (*TokenStreamRewriter) ReplaceTokenDefault ¶
func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string)
func (*TokenStreamRewriter) ReplaceTokenDefaultPos ¶
func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string)
func (*TokenStreamRewriter) Rollback ¶
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int)
Rollback the instruction stream for a program so that the indicated instruction (via instructionIndex) is no longer in the stream. UNTESTED!
func (*TokenStreamRewriter) RollbackDefault ¶
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int)
func (*TokenStreamRewriter) SetLastRewriteTokenIndex ¶
func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int)
type TokensStartState ¶
type TokensStartState struct {
*BaseDecisionState
}
TokensStartState is the Tokens rule start state linking to each lexer rule start state.
func NewTokensStartState ¶
func NewTokensStartState() *TokensStartState
type TraceListener ¶
type TraceListener struct {
// contains filtered or unexported fields
}
func NewTraceListener ¶
func NewTraceListener(parser *BaseParser) *TraceListener
func (*TraceListener) EnterEveryRule ¶
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext)
func (*TraceListener) ExitEveryRule ¶
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext)
func (*TraceListener) VisitErrorNode ¶
func (t *TraceListener) VisitErrorNode(_ ErrorNode)
func (*TraceListener) VisitTerminal ¶
func (t *TraceListener) VisitTerminal(node TerminalNode)
type Transition ¶
type Tree ¶
type Tree interface { GetParent() Tree SetParent(Tree) GetPayload() interface{} GetChild(i int) Tree GetChildCount() int GetChildren() []Tree }
func TreesGetChildren ¶
Return ordered list of all children of this node
func TreesgetAncestors ¶
Return a list of all ancestors of this node. The first node of
list is the root and the last is the parent of this node.
type WildcardTransition ¶
type WildcardTransition struct {
*BaseTransition
}
func NewWildcardTransition ¶
func NewWildcardTransition(target ATNState) *WildcardTransition
func (*WildcardTransition) Matches ¶
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool
func (*WildcardTransition) String ¶
func (t *WildcardTransition) String() string
Source Files ¶
- atn.go
- atn_config.go
- atn_config_set.go
- atn_deserialization_options.go
- atn_deserializer.go
- atn_simulator.go
- atn_state.go
- atn_type.go
- char_stream.go
- common_token_factory.go
- common_token_stream.go
- dfa.go
- dfa_serializer.go
- dfa_state.go
- diagnostic_error_listener.go
- error_listener.go
- error_strategy.go
- errors.go
- file_stream.go
- input_stream.go
- int_stream.go
- interval_set.go
- lexer.go
- lexer_action.go
- lexer_action_executor.go
- lexer_atn_simulator.go
- ll1_analyzer.go
- parser.go
- parser_atn_simulator.go
- parser_rule_context.go
- prediction_context.go
- prediction_mode.go
- recognizer.go
- rule_context.go
- semantic_context.go
- token.go
- token_source.go
- token_stream.go
- tokenstream_rewriter.go
- trace_listener.go
- transition.go
- tree.go
- trees.go
- utils.go
- utils_set.go