Documentation ¶
Index ¶
Constants ¶
const ( SWIFT_ALPHA = EOF + iota + 1 // 'A' - 'Z' SWIFT_CHARACTER // 'A' - 'Z', '0' - '9' SWIFT_DECIMAL // '0' - '9', ',' SWIFT_NUMERIC // '0' - '9' SWIFT_ALPHANUMERIC // all characters from charset SWIFT_DATASET_START SWIFT_TAG_SEPARATOR SWIFT_MESSAGE_SEPARATOR )
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Iterator ¶
type Iterator struct {
// contains filtered or unexported fields
}
A Iterator iterates over a slice of Tokens
func NewIterator ¶
NewIterator returns a fully populated TokenIterator
type StringLexer ¶
type StringLexer struct {
// contains filtered or unexported fields
}
A StringLexer is a HBCI data element lexer based on an input string
func NewStringLexer ¶
func NewStringLexer(name, input string) *StringLexer
NewStringLexer creates a new scanner for the input string.
func (*StringLexer) HasNext ¶
func (l *StringLexer) HasNext() bool
HasNext returns true if there are tokens left, false if EOF has reached
func (*StringLexer) Next ¶
func (l *StringLexer) Next() Token
Next returns the next item from the input.
func (*StringLexer) SetEntryPoint ¶
func (l *StringLexer) SetEntryPoint(entryPoint StringLexerStateFn)
SetEntryPoint sets the initial state of the lexer. The lexer will reset itself to use the new entryPoint properly
type StringLexerStateFn ¶
type StringLexerStateFn func(*StringLexer) StringLexerStateFn
StringLexerStateFn represents a state function for the lexer.
type SwiftLexer ¶
type SwiftLexer struct {
*StringLexer
}
A SwiftLexer parses the given input and emits SWIFT tokens
func NewSwiftLexer ¶
func NewSwiftLexer(name, input string) *SwiftLexer
NewSwiftLexer returns a SwiftLexer ready for parsing the given input string
type Token ¶
type Token interface { Type() Type Value() string Pos() int IsSyntaxSymbol() bool Children() Tokens RawTokens() Tokens }
Token represents a HBCI token.
func NewGroupToken ¶
NewGroupToken returns a Token composed of a group of sub tokens and with the given type typ. The Value method of such a Token returns the values of all sub tokens appended in the order provided by tokens.
type TokenLexer ¶
type TokenLexer struct {
// contains filtered or unexported fields
}
TokenLexer represents a lexer which composes grouped tokens out of tokens. It is also capable to componse tokens out of already composed tokens.
func NewTokenLexer ¶
func NewTokenLexer(name string, input Tokens) *TokenLexer
NewTokenLexer returns a ready to use lexer which is fed by tokens and emits grouped tokens.
func (*TokenLexer) HasNext ¶
func (l *TokenLexer) HasNext() bool
HasNext returns true if there are tokens left, false if EOF has reached
func (*TokenLexer) Next ¶
func (l *TokenLexer) Next() Token
Next returns the next item from the input.
type Tokens ¶
type Tokens []Token
Tokens represent a collection of Token. It defines convenient methods on top of the collection.
type Type ¶
type Type int
Type identifies the type of lex tokens.
const ( ERROR Type = iota // error occurred; DATA_ELEMENT // Datenelement (DE) DATA_ELEMENT_SEPARATOR // Datenelement (DE)-Trennzeichen DATA_ELEMENT_GROUP // Datenelementgruppe (DEG) GROUP_DATA_ELEMENT // Gruppendatenelement (GD) GROUP_DATA_ELEMENT_SEPARATOR // Gruppendatenelement (GD)-Trennzeichen SEGMENT // Segment SEGMENT_HEADER // Segmentende-Zeichen SEGMENT_END_MARKER // Segmentende-Zeichen BINARY_DATA_LENGTH // Binärdaten Länge BINARY_DATA // Binärdaten BINARY_DATA_MARKER // Binärdatenkennzeichen ALPHA_NUMERIC // an TEXT // txt DTAUS_CHARSET // dta NUMERIC // num: 0-9 without leading 0 DIGIT // dig: 0-9 with optional leading 0 FLOAT // float YES_NO // jn DATE // dat VIRTUAL_DATE // vdat TIME // tim IDENTIFICATION // id COUNTRY_CODE // ctr: ISO 3166-1 numeric CURRENCY // cur: ISO 4217 VALUE // wrt EOF )