Documentation
¶
Overview ¶
Package bytes provide a library for working with byte or slice of bytes.
Index ¶
- func CutUntilToken(text, token []byte, startAt int, checkEsc bool) (cut []byte, pos int, found bool)
- func EncloseRemove(text, leftToken, rightToken []byte) (cut []byte, found bool)
- func EncloseToken(text, token, leftcap, rightcap []byte) (newtext []byte, found bool)
- func InReplace(text, allowed []byte, c byte) []byte
- func Indexes(text, token []byte) (idxs []int)
- func IsTokenAt(text, token []byte, p int) bool
- func MergeSpaces(in []byte) (out []byte)
- func ReadHexByte(data []byte, x int) (b byte, ok bool)
- func RemoveSpaces(in []byte) (out []byte)
- func SkipAfterToken(text, token []byte, startAt int, checkEsc bool) (int, bool)
- func SnippetByIndexes(s []byte, indexes []int, sniplen int) (snippets [][]byte)
- func SplitEach(data []byte, n int) (chunks [][]byte)
- func TokenFind(text, token []byte, startat int) (at int)
- func TrimNull(in []byte) (out []byte)
- func WordIndexes(s []byte, word []byte) (idxs []int)
- type Parser
- func (bp *Parser) AddDelimiters(delims []byte)
- func (bp *Parser) Delimiters() []byte
- func (bp *Parser) Read() (token []byte, d byte)
- func (bp *Parser) ReadLine() (line []byte, c byte)
- func (bp *Parser) ReadN(n int) (token []byte, d byte)
- func (bp *Parser) ReadNoSpace() (token []byte, d byte)
- func (bp *Parser) Remaining() []byte
- func (bp *Parser) RemoveDelimiters(delims []byte)
- func (bp *Parser) Reset(content, delims []byte)
- func (bp *Parser) SetDelimiters(delims []byte)
- func (bp *Parser) Skip() (c byte)
- func (bp *Parser) SkipHorizontalSpaces() (n int, c byte)
- func (bp *Parser) SkipLine() (c byte)
- func (bp *Parser) SkipN(n int) (c byte)
- func (bp *Parser) SkipSpaces() (n int, c byte)
- func (bp *Parser) Stop() (remain []byte, pos int)
- func (bp *Parser) UnreadN(n int) byte
Examples ¶
- CutUntilToken
- EncloseRemove
- EncloseToken
- InReplace
- Indexes
- IsTokenAt
- MergeSpaces
- Parser.AddDelimiters
- Parser.Delimiters
- Parser.Read
- Parser.ReadLine
- Parser.ReadN
- Parser.ReadNoSpace
- Parser.Remaining
- Parser.RemoveDelimiters
- Parser.Reset
- Parser.SetDelimiters
- Parser.Skip
- Parser.SkipHorizontalSpaces
- Parser.SkipLine
- Parser.SkipN
- Parser.SkipSpaces
- Parser.Stop
- Parser.UnreadN
- ReadHexByte
- RemoveSpaces
- SkipAfterToken
- SnippetByIndexes
- SplitEach
- TokenFind
- TrimNull
- WordIndexes
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func CutUntilToken ¶
func CutUntilToken(text, token []byte, startAt int, checkEsc bool) (cut []byte, pos int, found bool)
CutUntilToken cut text until we found token.
If token found, it will return all bytes before token, position of byte after token, and true.
If no token found, it will return false.
If checkEsc is true, token that is prefixed with escaped character ('\') will be skipped, the escape character will be removed.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := []byte(`\\abc \def \deg`) cut, pos, found := libbytes.CutUntilToken(text, nil, 0, false) fmt.Printf("'%s' %d %t\n", cut, pos, found) cut, pos, found = libbytes.CutUntilToken(text, []byte("def"), 0, false) fmt.Printf("'%s' %d %t\n", cut, pos, found) cut, pos, found = libbytes.CutUntilToken(text, []byte("def"), 0, true) fmt.Printf("'%s' %d %t\n", cut, pos, found) cut, pos, found = libbytes.CutUntilToken(text, []byte("ef"), -1, true) fmt.Printf("'%s' %d %t\n", cut, pos, found) cut, pos, found = libbytes.CutUntilToken(text, []byte("hi"), 0, true) fmt.Printf("'%s' %d %t\n", cut, pos, found) }
Output: '\\abc \def \deg' -1 false '\\abc \' 10 true '\abc def \deg' 15 false '\abc \d' 10 true '\abc \def \deg' 15 false
func EncloseRemove ¶
EncloseRemove given a text, find the leftToken and rightToken and cut the content in between them and return it with status true. Keep doing it until no more leftToken and rightToken found.
If no leftToken or rightToken is found, it will return text as is and false.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := []byte(`[[ A ]]-[[ B ]] C`) got, isCut := libbytes.EncloseRemove(text, []byte("[["), []byte("]]")) fmt.Printf("'%s' %t\n", got, isCut) got, isCut = libbytes.EncloseRemove(text, []byte("[["), []byte("}}")) fmt.Printf("'%s' %t\n", got, isCut) text = []byte(`// Copyright 2016-2018 "Shulhan <ms@kilabit.info>". All rights reserved.`) got, isCut = libbytes.EncloseRemove(text, []byte("<"), []byte(">")) fmt.Printf("'%s' %t\n", got, isCut) got, isCut = libbytes.EncloseRemove(text, []byte(`"`), []byte(`"`)) fmt.Printf("'%s' %t\n", got, isCut) got, isCut = libbytes.EncloseRemove(text, []byte(`/`), []byte(`/`)) fmt.Printf("'%s' %t\n", got, isCut) text = []byte(`/* TEST */`) got, isCut = libbytes.EncloseRemove(text, []byte(`/*`), []byte(`*/`)) fmt.Printf("'%s' %t\n", got, isCut) }
Output: '- C' true '[[ A ]]-[[ B ]] C' false '// Copyright 2016-2018 "Shulhan ". All rights reserved.' true '// Copyright 2016-2018 . All rights reserved.' true ' Copyright 2016-2018 "Shulhan <ms@kilabit.info>". All rights reserved.' true '' true
func EncloseToken ¶
EncloseToken find "token" in "text" and enclose it with bytes from "leftcap" and "rightcap". If at least one token found, it will return modified text with true status. If no token is found, it will return the same text with false status.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := []byte(`// Copyright 2016-2018 "Shulhan <ms@kilabit.info>". All rights reserved.`) got, isChanged := libbytes.EncloseToken(text, []byte(`"`), []byte(`\`), []byte(`_`)) fmt.Printf("%t '%s'\n", isChanged, got) got, isChanged = libbytes.EncloseToken(text, []byte(`_`), []byte(`-`), []byte(`-`)) fmt.Printf("%t '%s'\n", isChanged, got) got, isChanged = libbytes.EncloseToken(text, []byte(`/`), []byte(`\`), nil) fmt.Printf("%t '%s'\n", isChanged, got) got, isChanged = libbytes.EncloseToken(text, []byte(`<`), []byte(`<`), []byte(` `)) fmt.Printf("%t '%s'\n", isChanged, got) }
Output: true '// Copyright 2016-2018 \"_Shulhan <ms@kilabit.info>\"_. All rights reserved.' false '// Copyright 2016-2018 "Shulhan <ms@kilabit.info>". All rights reserved.' true '\/\/ Copyright 2016-2018 "Shulhan <ms@kilabit.info>". All rights reserved.' true '// Copyright 2016-2018 "Shulhan << ms@kilabit.info>". All rights reserved.'
func InReplace ¶
InReplace replace any characters in "text" that is not in "allowed" with character "c". The replacement occur inside the "text" backing storage, which means the passed "text" will changes and returned.
Example ¶
package main import ( "fmt" "git.sr.ht/~shulhan/pakakeh.go/lib/ascii" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := libbytes.InReplace([]byte{}, []byte(ascii.LettersNumber), '_') fmt.Printf("%q\n", text) text = libbytes.InReplace([]byte("/a/b/c"), []byte(ascii.LettersNumber), '_') fmt.Printf("%q\n", text) _ = libbytes.InReplace(text, []byte(ascii.LettersNumber), '/') fmt.Printf("%q\n", text) }
Output: "" "_a_b_c" "/a/b/c"
func Indexes ¶
Indexes returns the index of the all instance of "token" in "text", or nil if no "token" found.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { fmt.Println(libbytes.Indexes([]byte(""), []byte("moo"))) fmt.Println(libbytes.Indexes([]byte("moo moomoo"), []byte{})) fmt.Println(libbytes.Indexes([]byte("moo moomoo"), []byte("moo"))) }
Output: [] [] [0 4 7]
func IsTokenAt ¶
IsTokenAt return true if `text` at index `p` match with `token`, otherwise it will return false. Empty token always return false.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := []byte("Hello, world") tokenWorld := []byte("world") tokenWorlds := []byte("worlds") tokenEmpty := []byte{} fmt.Printf("%t\n", libbytes.IsTokenAt(text, tokenEmpty, 6)) fmt.Printf("%t\n", libbytes.IsTokenAt(text, tokenWorld, -1)) fmt.Printf("%t\n", libbytes.IsTokenAt(text, tokenWorld, 6)) fmt.Printf("%t\n", libbytes.IsTokenAt(text, tokenWorld, 7)) fmt.Printf("%t\n", libbytes.IsTokenAt(text, tokenWorld, 8)) fmt.Printf("%t\n", libbytes.IsTokenAt(text, tokenWorlds, 8)) }
Output: false false false true false false
func MergeSpaces ¶
MergeSpaces convert sequences of white spaces into single space ' '.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { fmt.Printf("%s\n", libbytes.MergeSpaces([]byte(""))) fmt.Printf("%s\n", libbytes.MergeSpaces([]byte(" \t\v\r\n\r\n\fa \t\v\r\n\r\n\f"))) }
Output: a
func ReadHexByte ¶
ReadHexByte read two hexadecimal characters from "data" start from index "x" and convert them to byte. It will return the byte and true if its read exactly two hexadecimal characters, otherwise it will return 0 and false.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { fmt.Println(libbytes.ReadHexByte([]byte{}, 0)) fmt.Println(libbytes.ReadHexByte([]byte("x0"), 0)) fmt.Println(libbytes.ReadHexByte([]byte("00"), 0)) fmt.Println(libbytes.ReadHexByte([]byte("01"), 0)) fmt.Println(libbytes.ReadHexByte([]byte("10"), 0)) fmt.Println(libbytes.ReadHexByte([]byte("1A"), 0)) fmt.Println(libbytes.ReadHexByte([]byte("1a"), 0)) fmt.Println(libbytes.ReadHexByte([]byte("1a"), -1)) }
Output: 0 false 0 false 0 true 1 true 16 true 26 true 26 true 0 false
func RemoveSpaces ¶
RemoveSpaces remove all spaces from input in.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( in = []byte(" a\nb\tc d\r") out = libbytes.RemoveSpaces(in) ) fmt.Printf("%s\n", out) }
Output: abcd
func SkipAfterToken ¶
SkipAfterToken skip all bytes until matched "token" is found and return the index after the token and boolean true.
If "checkEsc" is true, token that is prefixed with escaped character '\' will be considered as non-match token.
If no token found it will return -1 and boolean false.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := []byte(`abc \def ghi`) fmt.Println(libbytes.SkipAfterToken(text, []byte("def"), -1, false)) fmt.Println(libbytes.SkipAfterToken(text, []byte("def"), 0, true)) fmt.Println(libbytes.SkipAfterToken(text, []byte("deg"), 0, false)) fmt.Println(libbytes.SkipAfterToken(text, []byte("deg"), 0, true)) fmt.Println(libbytes.SkipAfterToken(text, []byte("ef"), 0, true)) fmt.Println(libbytes.SkipAfterToken(text, []byte("hi"), 0, true)) }
Output: 8 true -1 false -1 false -1 false 8 true 12 true
func SnippetByIndexes ¶
SnippetByIndexes take snippet in between of each index with minimum snippet length. The sniplen is the length before and after index, not the length of all snippet.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { s := []byte("// Copyright 2018, Shulhan <ms@kilabit.info>. All rights reserved.") indexes := []int{3, 20, len(s) - 4} snippets := libbytes.SnippetByIndexes(s, indexes, 5) for _, snip := range snippets { fmt.Printf("%s\n", snip) } }
Output: // Copyr 18, Shulha reserved.
func SplitEach ¶
SplitEach split the data into n number of bytes. If n is less or equal than zero, it will return the data as chunks.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var data = []byte(`Hello`) fmt.Printf("%s\n", libbytes.SplitEach(data, 0)) fmt.Printf("%s\n", libbytes.SplitEach(data, 1)) fmt.Printf("%s\n", libbytes.SplitEach(data, 2)) fmt.Printf("%s\n", libbytes.SplitEach(data, 5)) fmt.Printf("%s\n", libbytes.SplitEach(data, 10)) }
Output: [Hello] [H e l l o] [He ll o] [Hello] [Hello]
func TokenFind ¶
TokenFind return the first index of matched token in text, start at custom index. If "startat" parameter is less than 0, then it will be set to 0. If token is empty or no token found it will return -1.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := []byte("// Copyright 2018, Shulhan <ms@kilabit.info>. All rights reserved.") fmt.Println(libbytes.TokenFind(text, []byte{}, 0)) fmt.Println(libbytes.TokenFind(text, []byte("right"), -1)) fmt.Println(libbytes.TokenFind(text, []byte("."), 0)) fmt.Println(libbytes.TokenFind(text, []byte("."), 42)) fmt.Println(libbytes.TokenFind(text, []byte("."), 48)) fmt.Println(libbytes.TokenFind(text, []byte("d."), 0)) }
Output: -1 7 38 44 65 64
func TrimNull ¶
TrimNull remove 0 value ("\0" or NULL in C) at leading and trailing of in.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var in = []byte{0, 'H', 'e', 'l', 'l', 'o', 0, 0} in = libbytes.TrimNull(in) fmt.Printf(`%s`, in) }
Output: Hello
func WordIndexes ¶
WordIndexes returns the index of the all instance of word in s as long as word is separated by space or at the beginning or end of s.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { text := []byte("moo moomoo moo") fmt.Println(libbytes.WordIndexes(text, []byte("mo"))) fmt.Println(libbytes.WordIndexes(text, []byte("moo"))) fmt.Println(libbytes.WordIndexes(text, []byte("mooo"))) }
Output: [] [0 11] []
Types ¶
type Parser ¶
type Parser struct {
// contains filtered or unexported fields
}
Parser implement tokenize parser for stream of byte using one or more delimiters as separator between token.
func (*Parser) AddDelimiters ¶
AddDelimiters add another delimiters to the current parser.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(` a = b ; c = d `) delims = []byte(`=`) parser = libbytes.NewParser(content, delims) ) token, d := parser.ReadNoSpace() fmt.Printf("%s:%c\n", token, d) parser.AddDelimiters([]byte{';'}) token, d = parser.ReadNoSpace() fmt.Printf("%s:%c\n", token, d) }
Output: a:= b:;
func (*Parser) Delimiters ¶
Delimiters return the copy of current delimiters.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(`a=b;c=d;`) delims = []byte{'=', ';'} parser = libbytes.NewParser(content, delims) ) fmt.Printf("%s\n", parser.Delimiters()) }
Output: =;
func (*Parser) Read ¶
Read read a token until one of the delimiters found. If one of delimiter match, it will return it as d. When end of content encountered, the returned token may be not empty but the d will be zero.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte("a = b; ") delims = []byte{'=', ';'} parser = libbytes.NewParser(content, delims) ) token, c := parser.Read() fmt.Printf("token:'%s' c:%q\n", token, c) token, c = parser.Read() fmt.Printf("token:'%s' c:%q\n", token, c) token, c = parser.Read() fmt.Printf("token:'%s' c:%q\n", token, c) }
Output: token:'a ' c:'=' token:' b' c:';' token:' ' c:'\x00'
func (*Parser) ReadLine ¶
ReadLine read until it found new line ('\n') or end of content, ignoring all delimiters. The returned line will not contain '\n'.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte("a=b;\nc=d;") delims = []byte{'=', ';'} parser = libbytes.NewParser(content, delims) ) token, c := parser.ReadLine() fmt.Printf("token:%s c:%q\n", token, c) token, c = parser.ReadLine() fmt.Printf("token:%s c:%q\n", token, c) }
Output: token:a=b; c:'\n' token:c=d; c:'\x00'
func (*Parser) ReadN ¶
ReadN read exactly n characters ignoring the delimiters. It will return the token and the character after n or 0 if end-of-content.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(`a=b;c=d;`) delims = []byte{'=', ';'} parser = libbytes.NewParser(content, delims) ) token, c := parser.ReadN(2) fmt.Printf("token:%s c:%q\n", token, c) token, c = parser.ReadN(0) fmt.Printf("token:%s c:%q\n", token, c) token, c = parser.ReadN(10) fmt.Printf("token:%s c:%q\n", token, c) }
Output: token:a= c:'b' token: c:'b' token:b;c=d; c:'\x00'
func (*Parser) ReadNoSpace ¶
ReadNoSpace read the next token by ignoring the leading spaces, even if its one of the delimiter. The returned token will have no trailing spaces.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(` a = b ;`) delims = []byte(`=;`) parser = libbytes.NewParser(content, delims) ) for { token, d := parser.ReadNoSpace() fmt.Printf("%s:%q\n", token, d) if d == 0 { break } } }
Output: a:'=' b:';' :'\x00'
func (*Parser) Remaining ¶
Remaining return the copy of un-parsed content.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(` a = b ;`) delims = []byte(`=;`) parser = libbytes.NewParser(content, delims) ) token, d := parser.ReadNoSpace() remain := parser.Remaining() fmt.Printf("token:%s d:%c remain:%s\n", token, d, remain) }
Output: token:a d:= remain: b ;
func (*Parser) RemoveDelimiters ¶
RemoveDelimiters remove delimiters delims from current delimiters.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(` a = b ; c = d `) delims = []byte(`=;`) parser = libbytes.NewParser(content, delims) ) token, _ := parser.ReadNoSpace() fmt.Printf("%s\n", token) parser.RemoveDelimiters([]byte{';'}) token, _ = parser.ReadNoSpace() fmt.Printf("%s\n", token) }
Output: a b ; c
func (*Parser) Reset ¶
Reset the Parser by setting all internal state to new content and delimiters.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(`a.b.c;`) delims = []byte(`.`) parser = libbytes.NewParser(content, delims) ) parser.Read() parser.Reset(content, delims) remain, pos := parser.Stop() fmt.Printf("remain:%s pos:%d\n", remain, pos) }
Output: remain:a.b.c; pos:0
func (*Parser) SetDelimiters ¶
SetDelimiters replace the current delimiters with delims.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(`a.b.c;`) delims = []byte(`.`) parser = libbytes.NewParser(content, delims) token []byte ) token, _ = parser.Read() fmt.Println(string(token)) parser.SetDelimiters([]byte(`;`)) token, _ = parser.Read() fmt.Println(string(token)) }
Output: a b.c
func (*Parser) Skip ¶
Skip skip parsing token until one of the delimiters found or end-of-content.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(`a = b; c = d;`) delims = []byte{'=', ';'} parser = libbytes.NewParser(content, delims) token []byte ) parser.Skip() token, _ = parser.ReadNoSpace() fmt.Println(string(token)) parser.Skip() token, _ = parser.ReadNoSpace() fmt.Println(string(token)) parser.Skip() token, _ = parser.ReadNoSpace() fmt.Println(string(token)) }
Output: b d
func (*Parser) SkipHorizontalSpaces ¶
SkipHorizontalSpaces skip space (" "), tab ("\t"), carriage return ("\r"), and form feed ("\f") characters; and return the number of space skipped and first non-space character or 0 if it reach end-of-content.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(" \t\r\fA. \nB.") delims = []byte{'.'} parser = libbytes.NewParser(content, delims) n int ) n, _ = parser.SkipHorizontalSpaces() token, d := parser.Read() fmt.Printf("n:%d token:%s delim:%q\n", n, token, d) n, _ = parser.SkipHorizontalSpaces() token, d = parser.Read() // The token include \n. fmt.Printf("n:%d token:%s delim:%q\n", n, token, d) n, _ = parser.SkipHorizontalSpaces() token, d = parser.Read() // The token include \n. fmt.Printf("n:%d token:%s delim:%q\n", n, token, d) }
Output: n:4 token:A delim:'.' n:1 token: B delim:'.' n:0 token: delim:'\x00'
func (*Parser) SkipLine ¶
SkipLine skip all characters until new line. It will return 0 if EOF.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte("a\nb\nc\nd e\n") delims = []byte("\n") parser = libbytes.NewParser(content, delims) ) parser.SkipLine() token, _ := parser.Read() fmt.Printf("token:'%s'\n", token) parser.SkipLine() token, _ = parser.Read() fmt.Printf("token:'%s'\n", token) }
Output: token:'b' token:'d e'
func (*Parser) SkipN ¶
SkipN skip exactly N characters ignoring delimiters. It will return the next character after N or 0 if it reach end-of-content.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(`a=b;c=d;`) delims = []byte{'=', ';'} parser = libbytes.NewParser(content, delims) token []byte c byte ) c = parser.SkipN(2) fmt.Printf("Skip: %c\n", c) token, _ = parser.ReadNoSpace() fmt.Println(string(token)) c = parser.SkipN(2) fmt.Printf("Skip: %c\n", c) token, _ = parser.ReadNoSpace() fmt.Println(string(token)) _ = parser.SkipN(2) token, _ = parser.ReadNoSpace() fmt.Println(string(token)) }
Output: Skip: b b Skip: d d
func (*Parser) SkipSpaces ¶
SkipSpaces skip all spaces character (' ', '\f', '\n', '\r', '\t') and return the number of spaces skipped and first non-space character or 0 if it reach end-of-content.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(" \t\r\fA. \nB.") delims = []byte{'.'} parser = libbytes.NewParser(content, delims) n int ) n, _ = parser.SkipSpaces() token, d := parser.Read() fmt.Printf("n:%d token:%s delim:%q\n", n, token, d) n, _ = parser.SkipSpaces() token, d = parser.Read() // The token include \n. fmt.Printf("n:%d token:%s delim:%q\n", n, token, d) n, _ = parser.SkipSpaces() token, d = parser.Read() // The token include \n. fmt.Printf("n:%d token:%s delim:%q\n", n, token, d) }
Output: n:4 token:A delim:'.' n:2 token:B delim:'.' n:0 token: delim:'\x00'
func (*Parser) Stop ¶
Stop the parser, return the remaining unparsed content and its last position, and then call Reset to reset the internal state back to zero.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( content = []byte(`a.b.c;`) delims = []byte(`.`) parser = libbytes.NewParser(content, delims) remain []byte pos int ) parser.Read() remain, pos = parser.Stop() fmt.Printf("remain:%s pos:%d\n", remain, pos) parser.Reset(content, []byte(`;`)) parser.Read() remain, pos = parser.Stop() fmt.Printf("remain:%s pos:%d\n", remain, pos) }
Output: remain:b.c; pos:2 remain: pos:6
func (*Parser) UnreadN ¶
UnreadN unread N characters and return the character its pointed to. If N greater than current position index, it will reset the read pointer index back to zero.
Example ¶
package main import ( "fmt" libbytes "git.sr.ht/~shulhan/pakakeh.go/lib/bytes" ) func main() { var ( parser = libbytes.NewParser([]byte(`a,b.c/d`), []byte(`,./`)) token []byte c byte ) parser.Read() parser.Read() parser.Read() parser.Read() // All content should be readed now. c = parser.UnreadN(2) // Move the index to '/'. fmt.Printf("UnreadN(2): %c\n", c) token, c = parser.Read() fmt.Printf("Read: %s %c\n", token, c) // Position 99 greater than current index, this will reset index to 0. c = parser.UnreadN(99) fmt.Printf("UnreadN(99): %c\n", c) token, c = parser.Read() fmt.Printf("Read: %s %c\n", token, c) }
Output: UnreadN(2): / Read: / UnreadN(99): a Read: a ,