Documentation ¶
Overview ¶
Package pi provides the main interactive parser structure for running the parse The piv sub-package provides the GUI for constructing and testing a parser
Index ¶
- Constants
- Variables
- func OpenStdParsers() error
- func VersionInfo() string
- type FileState
- func (fs *FileState) Init()
- func (fs *FileState) LexAtEnd() bool
- func (fs *FileState) LexErrString() string
- func (fs *FileState) LexHasErrs() bool
- func (fs *FileState) LexLine(ln int) lex.Line
- func (fs *FileState) LexLineString() string
- func (fs *FileState) LexNextSrcLine() string
- func (fs *FileState) ParseAtEnd() bool
- func (fs *FileState) ParseErrString() string
- func (fs *FileState) ParseHasErrs() bool
- func (fs *FileState) ParseNextSrcLine() string
- func (fs *FileState) ParseRuleString(full bool) string
- func (fs *FileState) PassTwoErrString() string
- func (fs *FileState) PassTwoHasErrs() bool
- func (fs *FileState) SetSrc(src *[][]rune, fname string)
- type LangFlags
- type LangProps
- type LangSupport
- type Parser
- func (pr *Parser) DoPassTwo(fs *FileState)
- func (pr *Parser) Init()
- func (pr *Parser) InitAll(fs *FileState)
- func (pr *Parser) LexAll(fs *FileState)
- func (pr *Parser) LexInit(fs *FileState)
- func (pr *Parser) LexLine(fs *FileState, ln int) lex.Line
- func (pr *Parser) LexNext(fs *FileState) *lex.Rule
- func (pr *Parser) LexNextLine(fs *FileState) *lex.Rule
- func (pr *Parser) LexRun(fs *FileState)
- func (pr *Parser) OpenJSON(filename string) error
- func (pr *Parser) ParseNext(fs *FileState) *parse.Rule
- func (pr *Parser) ParserInit(fs *FileState) bool
- func (pr *Parser) SaveGrammar(filename string) error
- func (pr *Parser) SaveJSON(filename string) error
Constants ¶
const ( Version = "v0.5.1" GitCommit = "f99e938" // the commit JUST BEFORE the release VersionDate = "2018-12-09 10:34" // UTC )
Variables ¶
var KiT_LangFlags = kit.Enums.AddEnum(LangFlagsN, false, nil)
var StdLangProps = map[filecat.Supported]LangProps{ filecat.Ada: {filecat.Ada, "--", "", "", nil, nil}, filecat.Bash: {filecat.Bash, "# ", "", "", nil, nil}, filecat.Csh: {filecat.Csh, "# ", "", "", nil, nil}, filecat.C: {filecat.C, "// ", "/* ", " */", nil, nil}, filecat.CSharp: {filecat.CSharp, "// ", "/* ", " */", nil, nil}, filecat.D: {filecat.D, "// ", "/* ", " */", nil, nil}, filecat.ObjC: {filecat.ObjC, "// ", "/* ", " */", nil, nil}, filecat.Go: {filecat.Go, "// ", "/* ", " */", []LangFlags{IndentTab}, nil}, filecat.Java: {filecat.Java, "// ", "/* ", " */", nil, nil}, filecat.JavaScript: {filecat.JavaScript, "// ", "/* ", " */", nil, nil}, filecat.Eiffel: {filecat.Eiffel, "--", "", "", nil, nil}, filecat.Haskell: {filecat.Haskell, "--", "{- ", "-}", nil, nil}, filecat.Lisp: {filecat.Lisp, "; ", "", "", nil, nil}, filecat.Lua: {filecat.Lua, "--", "---[[ ", "--]]", nil, nil}, filecat.Makefile: {filecat.Makefile, "# ", "", "", []LangFlags{IndentTab}, nil}, filecat.Matlab: {filecat.Matlab, "% ", "%{ ", " %}", nil, nil}, filecat.OCaml: {filecat.OCaml, "", "(* ", " *)", nil, nil}, filecat.Pascal: {filecat.Pascal, "// ", " ", " }", nil, nil}, filecat.Perl: {filecat.Perl, "# ", "", "", nil, nil}, filecat.Python: {filecat.Python, "# ", "", "", []LangFlags{IndentSpace}, nil}, filecat.Php: {filecat.Php, "// ", "/* ", " */", nil, nil}, filecat.R: {filecat.R, "# ", "", "", nil, nil}, filecat.Ruby: {filecat.Ruby, "# ", "", "", nil, nil}, filecat.Rust: {filecat.Rust, "// ", "/* ", " */", nil, nil}, filecat.Scala: {filecat.Scala, "// ", "/* ", " */", nil, nil}, filecat.Html: {filecat.Html, "", "<!-- ", " -->", nil, nil}, filecat.TeX: {filecat.TeX, "% ", "", "", nil, nil}, filecat.Markdown: {filecat.Markdown, "", "<!--- ", " -->", []LangFlags{IndentSpace}, nil}, }
StdLangProps is the standard compiled-in set of langauge properties
var TheLangSupport = LangSupport{}
Functions ¶
func OpenStdParsers ¶
func OpenStdParsers() error
OpenStdParsers opens all the standard parsers for languages, from the langs/ directory
Types ¶
type FileState ¶
type FileState struct { Src lex.File `json:"-" xml:"-" desc:"the source to be parsed -- also holds the full lexed tokens"` LexState lex.State `json:"_" xml:"-" desc:"state for lexing"` TwoState lex.TwoState `json:"-" xml:"-" desc:"state for second pass nesting depth and EOS matching"` ParseState parse.State `json:"_" xml:"-" desc:"state for parsing"` Ast parse.Ast `json:"_" xml:"-" desc:"ast output tree from parsing"` }
FileState is the parsing state information for a given file
func NewFileState ¶
func NewFileState() *FileState
NewFileState returns a new initialized file state
func (*FileState) LexErrString ¶
LexErrString returns all the lexing errors as a string
func (*FileState) LexHasErrs ¶
LexHasErrs returns true if there were errors from lexing
func (*FileState) LexLine ¶
LexLine returns the lexing output for given line, combining comments and all other tokens and allocating new memory using clone
func (*FileState) LexLineString ¶
LexLineString returns a string rep of the current lexing output for the current line
func (*FileState) LexNextSrcLine ¶
LexNextSrcLine returns the next line of source that the lexer is currently at
func (*FileState) ParseAtEnd ¶
ParseAtEnd returns true if parsing state is now at end of source
func (*FileState) ParseErrString ¶
ParseErrString returns all the parsing errors as a string
func (*FileState) ParseHasErrs ¶
ParseHasErrs returns true if there were errors from parsing
func (*FileState) ParseNextSrcLine ¶
ParseNextSrcLine returns the next line of source that the parser is currently at
func (*FileState) ParseRuleString ¶
RuleString returns the rule info for entire source -- if full then it includes the full stack at each point -- otherwise just the top of stack
func (*FileState) PassTwoErrString ¶
PassTwoErrString returns all the pass two errors as a string
func (*FileState) PassTwoHasErrs ¶
PassTwoHasErrs returns true if there were errors from pass two processing
type LangFlags ¶
type LangFlags int
LangFlags are special properties of a given language
const ( // NoFlags = nothing special NoFlags LangFlags = iota // IndentSpace means that spaces must be used for this language IndentSpace // IndentTab means that tabs must be used for this language IndentTab LangFlagsN )
LangFlags
func (LangFlags) MarshalJSON ¶
func (*LangFlags) UnmarshalJSON ¶
type LangProps ¶
type LangProps struct { Lang filecat.Supported `desc:"language -- must be a supported one from Supported list"` CommentLn string `desc:"character(s) that start a single-line comment -- if empty then multi-line comment syntax will be used"` CommentSt string `desc:"character(s) that start a multi-line comment or one that requires both start and end"` CommentEd string `desc:"character(s) that end a multi-line comment or one that requires both start and end"` Flags []LangFlags `desc:"special properties for this language"` Parser *Parser `json:"-" xml:"-" desc:"parser for this language"` }
LangProps contains properties of languages supported by the Pi parser framework
type LangSupport ¶
type LangSupport struct { }
LangSupport provides general support for supported languages. e.g., looking up lexers and parsers by name. Implements the lex.LangLexer interface to provide access to other Guest Lexers
type Parser ¶
type Parser struct { Lexer lex.Rule `desc:"lexer rules for first pass of lexing file"` PassTwo lex.PassTwo `desc:"second pass after lexing -- computes nesting depth and EOS finding"` Parser parse.Rule `desc:"parser rules for parsing lexed tokens"` Filename string `desc:"file name for overall parser"` }
Parser is the overall parser for managing the parsing
func (*Parser) Init ¶
func (pr *Parser) Init()
Init initializes the parser -- must be called after creation
func (*Parser) InitAll ¶
InitAll initializes everything about the parser -- call this when setting up a new parser after it has been loaded etc
func (*Parser) LexLine ¶
LexLine runs lexer for given single line of source, returns merged regular and token comment lines, cloned and ready for use
func (*Parser) LexNext ¶
LexNext does next step of lexing -- returns lowest-level rule that matched, and nil when nomatch err or at end of source input
func (*Parser) LexNextLine ¶
LexNextLine does next line of lexing -- returns lowest-level rule that matched at end, and nil when nomatch err or at end of source input
func (*Parser) OpenJSON ¶
OpenJSON opens lexer and parser rules to current filename, in a standard JSON-formatted file
func (*Parser) ParseNext ¶
ParseNext does next step of parsing -- returns lowest-level rule that matched or nil if no match error or at end
func (*Parser) ParserInit ¶
ParserInit initializes the parser prior to running
func (*Parser) SaveGrammar ¶
SaveGrammar saves lexer and parser grammar rules to BNF-like .pig file
Directories ¶
Path | Synopsis |
---|---|
cmd
|
|
Package lex provides all the lexing functions that transform text into lexical tokens, using token types defined in the pi/token package.
|
Package lex provides all the lexing functions that transform text into lexical tokens, using token types defined in the pi/token package. |
Package parse does the parsing stage after lexing Package parse does the parsing stage after lexing, using a top-down recursive-descent (TDRD) strategy, with a special reverse mode to deal with left-associative binary expressions which otherwise end up being right-associative for TDRD parsing.
|
Package parse does the parsing stage after lexing Package parse does the parsing stage after lexing, using a top-down recursive-descent (TDRD) strategy, with a special reverse mode to deal with left-associative binary expressions which otherwise end up being right-associative for TDRD parsing. |
Package piv provides the PiView object for the full GUI view of the interactive parser (pi) system.
|
Package piv provides the PiView object for the full GUI view of the interactive parser (pi) system. |
Package token defines a complete set of all lexical tokens for any kind of language! It is based on the alecthomas/chroma / pygments lexical tokens plus all the more detailed tokens needed for actually parsing languages
|
Package token defines a complete set of all lexical tokens for any kind of language! It is based on the alecthomas/chroma / pygments lexical tokens plus all the more detailed tokens needed for actually parsing languages |