vfsindex

package module
v0.1.18 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 29, 2021 License: Apache-2.0 Imports: 32 Imported by: 1

README

vfs-index

vfs-index is simple indexer for simple data collection(json , csv, msgpack...) on OS's virtual file system. this indexer is not requred daemon process, no lock.

$ cat test.json
[
  {
    "id": 130988433,
    "name": "2011_04_24-1.m4v"
  },
  {
    "id": 130988434,
    "name": "2011_04_24-2.mp4"
  },
  {
    "id": 130988435,
    "name": "2011_04_24-3.mp4"
  },

this data is bigger ( 100 milion row ?..) if you search jq, very slow.

indexing this data. if indexing stop by ctrl-c , indexing will continue on run next index command.

$ go get github.com/kazu/vfs-index/cmd/vfs-index
$ vfs-index index --index=../idx --column=name --table=test --data=./
100% |██████████████████████████████████████████████████| (2316/2316, 2072 it/s) [1s:0s]
$

search data

$ vfs-index search  -q='name.search("2011_04")' --index=../idx --column=name --table=test --data=./
{"id":130988433,"name":"2011_04_24-1.m4v"}
{"id":130988434,"name":"2011_04_24-2.mp4"}
{"id":130988435,"name":"2011_04_24-3.mp4"}
index merging done [==============================================================] 67507 / 67507

search by number attributes/field

$ vfs-index search  -q='id == 130988433' --index=../idx --column=name --table=test --data=./
{"id":130988433,"name":"2011_04_24-1.m4v"}

merge index

$ vfs-index merge --index=../idx --column=name --table=test --data=./
index merging done [==============================================================] 67507 / 67507

support query.

if attribute/field is number, can select by Arithmetic comparison. support comparation ops. == >= < <= > .

$ vfs-index search -q='id == 130988471' --index=../idx --table=test --data=./

string search

$ vfs-index search -q='name.search("フロンターレ")' --index=../idx --table=test --data=./

use in golang.

import package

import vfs "github.com/kazu/vfs-index"

indexing

func DefaultOption() vfs.Option {
	return vfs.RootDir("/Users/xtakei/vfs-idx")
}


idx, e := vfs.Open("/Users/xtakei/example/data", DefaultOption())
e = idx.Regist("test", "id")

searching

func DefaultOption() vfs.Option {
	return vfs.RootDir("/Users/xtakei/vfs-idx")
}

// search number index
idx, e := vfs.Open("/Users/xtakei/example/data", DefaultOption())
sCond := idx.On("test", vfs.ReaderColumn("id"), vfs.Output(vfs.MapInfOutput))

record := sCond.Select(func(m vfs.SearchCondElem2) bool {
    return m.Op("id", "<",  122878513)
}).First()

// search by matching substring
sCondName := idx.On("test", vfs.ReaderColumn("name"), vfs.Output(vfs.MapInfOutput))
matches2 := sCondName.Match("ロシア人").All()

index merging stop after 1 minutes.


idx, e := vfs.Open("/Users/xtakei/example/data", DefaultOption())
sCond := idx.On("test", vfs.ReaderColumn("id"), vfs.Output(vfs.MapInfOutput))
sCond.StartMerging()
time.Sleep(1 * time.Minute)
sCond.CancelAndWait()

TODO

  • write file list
  • read file list
  • write number column index
  • read number column index
  • write tri-gram clumn index
  • support string search
  • support index merging
  • add comment
  • csv support
  • msgpack support
  • support compression data ( lz4(?) )

Documentation

Index

Constants

View Source
const (
	RECORD_WRITING byte = iota
	RECORD_WRITTEN
	RECORD_MERGING
	RECORD_MERGED
)
View Source
const (
	JsonOutput = iota
	MapInfOutput
)
View Source
const (
	KeyStateGot int = 1
	// KeyStateRun int = 2
	// KeyStateFlase int = 3
	KeyStateFinish int = 4
)
View Source
const (
	MAX_IDX_CACHE      = 512
	MIN_NEGATIVE_CACHE = 8
)
View Source
const (
	RECORDS_INIT = 64
)

Variables

View Source
var (
	ErrInvalidTableName = errors.New("table name is invalid")
	ErrNotFoundFile     = errors.New("file not found")
	ErrInvalidIdxName   = errors.New("idx file name is invalid")
	ErrNotHasColumn     = errors.New("record dosent have this column")
	ErrNotIndexDir      = errors.New("Indexfile must be Index top directory")
	ErrStopTraverse     = errors.New("stop traverse")

	ErrParameterInvalid = errors.New("parameter invalid")
	ErrNotSupported     = errors.New("not supported")

	ErrMustCsvHeader = errors.New("not set csv header")
)
View Source
var CsvHeader string
View Source
var DefaultDecoder []Decoder = []Decoder{
	Decoder{
		FileType: "csv",
		Encoder: func(v interface{}) ([]byte, error) {
			return json.Marshal(v)
		},
		Decoder: func(raw []byte, v interface{}) error {

			return nil
		},
		Tokenizer: func(ctx context.Context, rio io.Reader, f *File) <-chan *Record {
			ch := make(chan *Record, 5)

			go func() {
				buf, err := ioutil.ReadAll(rio)

				if err != nil {
					defer close(ch)

				}

				s := string(buf)

				lines := strings.Split(s, "\n")
				CsvHeader = lines[0]
				lines = lines[1:]
				cur := len(CsvHeader) + 1
				for _, line := range lines {
					ch <- &Record{fileID: f.id, offset: int64(cur), size: int64(len(line))}
					cur += len(line) + 1
				}
				close(ch)
			}()
			return ch
		},
	},
	Decoder{
		FileType: "json",
		Encoder: func(v interface{}) ([]byte, error) {
			b, e := json.Marshal(v)
			if e != nil {
				return b, e
			}
			var out bytes.Buffer
			json.Indent(&out, b, "", "\t")
			return out.Bytes(), e

		},
		Decoder: func(raw []byte, v interface{}) error {
			e := json.Unmarshal(raw, v)
			if e != nil {
				return e
			}
			if value, ok := v.(*(map[string]interface{})); ok {
				for key, v := range *value {
					if f64, ok := v.(float64); ok {
						(*value)[key] = uint64(f64)
					}
				}
			}
			return nil

		},
		Tokenizer: func(ctx context.Context, rio io.Reader, f *File) <-chan *Record {
			ch := make(chan *Record, 100)
			go func() {
				dec := json.NewDecoder(rio)

				var rec *Record

				nest := int(0)
				defer close(ch)
				for {
					token, err := dec.Token()
					if err == io.EOF {
						break
					}
					switch token {
					case json.Delim('{'):
						nest++
						if nest == 1 {
							rec = &Record{fileID: f.id, offset: dec.InputOffset() - 1}
						}

					case json.Delim('}'):
						nest--
						if nest == 0 {
							rec.size = dec.InputOffset() - rec.offset
							ch <- rec
						}
					}
					select {
					case <-ctx.Done():
						return
					default:
					}
				}

			}()
			return ch
		},
	},
}
View Source
var GGlobCache map[string][]string = map[string][]string{}
View Source
var LogWriter io.StringWriter = os.Stderr
View Source
var Opt optionState = optionState{
	// contains filtered or unexported fields
}
View Source
var StringOp map[string]CondOp = map[string]CondOp{
	"==": CondOpEq,
	"<=": CondOpLe,
	"<":  CondOpLt,
	">=": CondOpGe,
	">":  CondOpGe,
}
View Source
var ZERO_TIME time.Time = time.Time{}

Functions

func AddingDir

func AddingDir(s string, n int) string

func ColumnPath

func ColumnPath(tdir, col string, isNum bool) string

func ColumnPathWithStatus

func ColumnPathWithStatus(tdir, col string, isNum bool, s, e string, status byte) string

func DecodeTri added in v0.1.7

func DecodeTri(v uint64) (s string)

func EncodeTri

func EncodeTri(s string) (result []string)

func FileExist

func FileExist(filename string) bool

func FileListPath

func FileListPath(tabledir string) string

func FileListPathWithAdding

func FileListPathWithAdding(tabledir string, s, e uint64, usePid bool) string

func FileMtime

func FileMtime(filename string) time.Time

func GetInode

func GetInode(info os.FileInfo) uint64

func InitIdxCaches

func InitIdxCaches(i *IdxCaches)

func IsEqQRecord added in v0.1.7

func IsEqQRecord(src, dst *query.Record) bool

func JoinExt

func JoinExt(s ...string) string

func LessEqString

func LessEqString(s, d string) (isLess bool)

LessEqString ... compare strings . if equal or less , return true

func Log

func Log(l LogLevel, f string, args ...interface{})

func LogIsDebug added in v0.1.7

func LogIsDebug() bool

func MesureElapsed added in v0.1.7

func MesureElapsed() func(string) string

func SafeRename

func SafeRename(src, dst string) error

func SaveCmdConfig added in v0.1.14

func SaveCmdConfig(dir string, conf *ConfigFile) error

func TriKeys

func TriKeys(s string) (result []uint64)

func TrimFilePathSuffix

func TrimFilePathSuffix(path string) string

func Untar added in v0.1.6

func Untar(tarName, xpath string) (err error)

Types

type BufWriterIO

type BufWriterIO struct {
	*os.File
	// contains filtered or unexported fields
}

func NewBufWriterIO

func NewBufWriterIO(o *os.File, n int) *BufWriterIO

func (*BufWriterIO) Flush

func (b *BufWriterIO) Flush() (e error)

func (*BufWriterIO) Write

func (b *BufWriterIO) Write(p []byte) (n int, e error)

func (*BufWriterIO) WriteAt

func (b *BufWriterIO) WriteAt(p []byte, offset int64) (n int, e error)

type ColGetter added in v0.1.8

type ColGetter func() *Column

type Column

type Column struct {
	Table   string
	Name    string
	Dir     string
	Flist   *FileList
	IsNum   bool
	Dirties Records
	// contains filtered or unexported fields
}

func NewColumn

func NewColumn(flist *FileList, table, col string) *Column

func (*Column) CleanDirTest added in v0.1.14

func (c *Column) CleanDirTest(mode int)

func (*Column) CleanDirs added in v0.1.12

func (c *Column) CleanDirs() (cnt int)

func (*Column) Path

func (c *Column) Path() string

func (*Column) RecordEqInt

func (c *Column) RecordEqInt(v int) (record *Record)

func (*Column) TableDir

func (c *Column) TableDir() string

func (*Column) Update

func (c *Column) Update(d time.Duration) error

func (*Column) WriteDirties

func (c *Column) WriteDirties()

type CondFn

type CondFn func(f *IndexFile) CondType

CondFn ... function to check condition in Select()

type CondOp

type CondOp byte
const (
	CondOpEq CondOp = iota
	CondOpLe
	CondOpLt
	CondOpGe
	CondOpGt
)

type CondType

type CondType byte

CondType ... Condition Type to traversse in Select()

const (
	CondTrue CondType = iota
	CondSkip
	CondFalse
	CondLazy
)

type ConfigBase added in v0.1.14

type ConfigBase struct {
	Dir      string
	IndexDir string
	Table    string
}

type ConfigFile added in v0.1.14

type ConfigFile struct {
	Name2Index map[string]*ConfigIndex
}

func LoadCmdConfig added in v0.1.14

func LoadCmdConfig(dir string) (*ConfigFile, error)

type ConfigIndex added in v0.1.14

type ConfigIndex struct {
	Path string
	ConfigBase
}

type CountFn added in v0.1.18

type CountFn func(SkipFn) int

CountFn .. function to retrun count of record

type Decoder

type Decoder struct {
	FileType  string
	Decoder   func([]byte, interface{}) error
	Encoder   func(interface{}) ([]byte, error)
	Tokenizer func(context.Context, io.Reader, *File) <-chan *Record
}

func GetDecoder

func GetDecoder(fname string) (dec Decoder, e error)

GetDecoder ... return format Decoder/Encoder from fname(file name)

type File

type File struct {
	// contains filtered or unexported fields
}

func FileFromFbs

func FileFromFbs(r io.Reader) *File

func NewFile

func NewFile(id uint64, name string, index_at int64) *File

func (*File) Records

func (f *File) Records(ctx context.Context, dir string) <-chan *Record

FIXME: support other format

func (*File) ToFbs

func (f *File) ToFbs(l *FileList) []byte

func (*File) Write

func (f *File) Write(l *FileList) error

type FileList

type FileList struct {
	Dir       string
	IndexedAt time.Time
	Files     []*File
}

func CreateFileList

func CreateFileList(tdir string) (flist *FileList, err error)

func OpenFileList

func OpenFileList(tdir string) (flist *FileList)

func (*FileList) FPath

func (l *FileList) FPath(id uint64) (path string, e error)

func (*FileList) Reload

func (l *FileList) Reload() error

func (*FileList) Store

func (l *FileList) Store()

func (*FileList) Update

func (flist *FileList) Update()

type GetColumn

type GetColumn func() *Column

type GlobCache

type GlobCache struct {
	Keys  []string
	ReqCh chan GlobRequest
	// contains filtered or unexported fields
}

func (*GlobCache) Add

func (g *GlobCache) Add(key, value string)

func (*GlobCache) Finish

func (g *GlobCache) Finish(key string)

func (*GlobCache) Get

func (g *GlobCache) Get(pat string) *query.PathInfoList

func (*GlobCache) GetCh

func (g *GlobCache) GetCh(pat string) <-chan string

func (*GlobCache) PrepareRead

func (g *GlobCache) PrepareRead(pat string) *query.PathInfoList

func (GlobCache) Run

func (g GlobCache) Run(key string)

func (*GlobCache) Start

func (g *GlobCache) Start() error

type GlobRequest

type GlobRequest struct {
	// contains filtered or unexported fields
}

type IdxCache

type IdxCache struct {
	FirstEnd Range
	Pos      RecordPos
}

type IdxCaches

type IdxCaches struct {
	// contains filtered or unexported fields
}

func NewIdxCaches

func NewIdxCaches() *IdxCaches

type IdxInfo

type IdxInfo struct {
	// contains filtered or unexported fields
}

type IdxPathInfo

type IdxPathInfo string

func (IdxPathInfo) Greater

func (s IdxPathInfo) Greater(d IdxPathInfo) bool

func (IdxPathInfo) Info

func (p IdxPathInfo) Info() (col string, isNum bool, first, last, fileID uint64, offset int64)

func (IdxPathInfo) IsMerged

func (p IdxPathInfo) IsMerged() bool

func (IdxPathInfo) Less

func (s IdxPathInfo) Less(d IdxPathInfo) bool

col, isNum, first, last, fileID , offset

func (IdxPathInfo) TDir

func (p IdxPathInfo) TDir() string

func (IdxPathInfo) Table

func (p IdxPathInfo) Table() string

type IdxWriter

type IdxWriter struct {
	IsNum        bool
	ValueEncoder func(r *Record) []string
}

type IndexFile

type IndexFile struct {
	Path  string
	Ftype IndexFileType
	// contains filtered or unexported fields
}

IndexFile ... file entity of index file in index table directories

func ListMergedIndex added in v0.1.2

func ListMergedIndex(c *Column, fn CondFn, opts ...SelectOption) (result []*IndexFile)

func NewIndexFile

func NewIndexFile(c *Column, path string) *IndexFile

func OpenIndexFile

func OpenIndexFile(c *Column) (idxFile *IndexFile)

func (IndexFile) Column added in v0.1.12

func (f IndexFile) Column() *Column

func (*IndexFile) CountNearByKeyFn added in v0.1.18

func (f *IndexFile) CountNearByKeyFn(key uint64, less bool) CountFn

CountNearByKeyFn ... return function count to matching near

func (*IndexFile) FindByKey

func (f *IndexFile) FindByKey(key uint64) (result []*IndexFile)

func (*IndexFile) FindNearByKey

func (f *IndexFile) FindNearByKey(key uint64, less bool) (results []*IndexFile)

func (*IndexFile) First

func (f *IndexFile) First() *IndexFile

First ... Find first IndexFile.

func (*IndexFile) FirstRecord

func (f *IndexFile) FirstRecord() *Record

func (*IndexFile) IdxInfo

func (f *IndexFile) IdxInfo() IndexPathInfo

func (*IndexFile) Init

func (f *IndexFile) Init()

func (*IndexFile) IsType

func (f *IndexFile) IsType(t IndexFileType) bool

func (*IndexFile) KeyRecord

func (f *IndexFile) KeyRecord() (result *query.InvertedMapNum)

func (*IndexFile) KeyRecords

func (f *IndexFile) KeyRecords() *query.KeyRecordList

func (*IndexFile) Last

func (f *IndexFile) Last() *IndexFile

First ... Find first IndexFile.

func (*IndexFile) LastRecord

func (f *IndexFile) LastRecord() *Record

func (*IndexFile) RecordByKey2 added in v0.1.8

func (f *IndexFile) RecordByKey2(key uint64) RecordFn

RecordByKey2 ... return function getting slice of query.Record Deprecated: RecordByKey

should use recordByKey

func (*IndexFile) RecordNearByKeyFn added in v0.1.18

func (f *IndexFile) RecordNearByKeyFn(key uint64, less bool) RecordFn

RecordNearByKeyFn ... return function near matching of query.Record

func (*IndexFile) Select

func (f *IndexFile) Select(opts ...SelectOption) (err error)

type IndexFileType

type IndexFileType int
const (
	IdxFileType_None IndexFileType = 0
	IdxFileType_Dir  IndexFileType = 1 << iota
	IdxFileType_Merge
	IdxFileType_Write
	IdxFileType_MyColum
	IdxFileType_NoComplete
)

type IndexPathInfo

type IndexPathInfo struct {
	// contains filtered or unexported fields
}

func NewIndexInfo

func NewIndexInfo(fileID uint64, offset int64, first uint64, last uint64) IndexPathInfo

type Indexer

type Indexer struct {
	Root string
	Cols map[string]*Column
	// contains filtered or unexported fields
}

func Open

func Open(dpath string, opts ...Option) (*Indexer, error)

Open ... open index. dpath is data directory,

func (*Indexer) On

func (idx *Indexer) On(table string, opts ...Option) *SearchCond

On ... return SearchCond(Search Element) , table is table name, column is set by ReaderColumn("column name")

func (*Indexer) Regist

func (idx *Indexer) Regist(table, col string) error

Regist ... indexing specified table , col (column)

type InfoFn added in v0.1.8

type InfoFn func(RecordInfoArg)

InfoFn ... function to find record infomation

type KeySetter added in v0.1.8

type KeySetter func(interface{}) []uint64

type LogLevel

type LogLevel int
const (
	LOG_ERROR LogLevel = iota
	LOG_WARN
	LOG_DEBUG
)
var CurrentLogLoevel LogLevel = LOG_DEBUG

type Option

type Option func(*optionState)

type ReaderOpt map[string]string

func EnableCleanAfterMerge added in v0.1.12

func EnableCleanAfterMerge(t bool) Option

func MergeDuration

func MergeDuration(d time.Duration) Option

func MergeOnSearch

func MergeOnSearch(enable bool) Option

MergeOnSearch ... enable to merge index on search

func Output

func Output(t Outputer) Option

func ReaderColumn

func ReaderColumn(s string) Option

ReaderColumn ... config for columname for search/read

func RegitConcurrent

func RegitConcurrent(n int) Option

func RootDir

func RootDir(s string) Option

RootDir ... set index top directory

type Outputer

type Outputer byte

type ParentDirs added in v0.1.14

type ParentDirs struct {
	// contains filtered or unexported fields
}

func NewParentDirs added in v0.1.14

func NewParentDirs(base string, dirs []string) (pdirs ParentDirs)

func (ParentDirs) Has added in v0.1.14

func (pdir ParentDirs) Has(dir string) bool

type ProgressBar

type ProgressBar struct {
	// contains filtered or unexported fields
}

func NewProgressBar

func NewProgressBar(opts ...mpb.ContainerOption) (bar ProgressBar)

func (*ProgressBar) Add

func (p *ProgressBar) Add(name string, total int) (bar *mpb.Bar)

func (*ProgressBar) Done

func (p *ProgressBar) Done()

type Range

type Range struct {
	// contains filtered or unexported fields
}

type RangeCur

type RangeCur struct {
	Range
	// contains filtered or unexported fields
}

type Record

type Record struct {
	// contains filtered or unexported fields
}

func NewRecord

func NewRecord(id uint64, offset, size int64) *Record

func RecordFromFbs

func RecordFromFbs(r io.Reader) *Record

func (*Record) IsExist

func (r *Record) IsExist(c *Column) bool

func (*Record) Raw

func (r *Record) Raw(c *Column) (data []byte)

func (*Record) StrValue

func (r *Record) StrValue(c *Column) string

func (*Record) ToFbs

func (r *Record) ToFbs(inf interface{}) []byte

func (*Record) Uint64Value

func (r *Record) Uint64Value(c *Column) uint64

func (*Record) Write

func (r *Record) Write(c *Column) error

Write ... write column index

type RecordFn

type RecordFn func(SkipFn) []*query.Record

RecordFn .. function to retrun record slices

type RecordInfoArg added in v0.1.8

type RecordInfoArg struct {
	// contains filtered or unexported fields
}

RecordInfoArg ... params for InfoFn

type RecordPos

type RecordPos struct {
	// contains filtered or unexported fields
}

type Records

type Records []*Record

func NewRecords

func NewRecords(n int) Records

func (Records) Add

func (recs Records) Add(r *Record) Records

type ResultFn added in v0.1.8

type ResultFn func(SkipFn)

ResultFn ... function to find record with SkipFn

type ResultOpt

type ResultOpt func(*Column, []*query.Record) interface{}

func ResultOutput

func ResultOutput(name string) ResultOpt

type RowIndex

type RowIndex struct {
	// contains filtered or unexported fields
}

type SearchCond

type SearchCond struct {
	Err error
	// contains filtered or unexported fields
}

SearchCond .. saerch condition object.

func (*SearchCond) CancelAndWait

func (cond *SearchCond) CancelAndWait()

CancelAndWait ... wait for canceld backgraound routine( mainly merging index)

func (*SearchCond) Column

func (cond *SearchCond) Column() *Column

func (*SearchCond) FindBy

func (cond *SearchCond) FindBy(col string, kInf interface{}) (sfinder *SearchFinder)

func (*SearchCond) IndexFile added in v0.1.12

func (cond *SearchCond) IndexFile() *IndexFile

func (*SearchCond) Match

func (f *SearchCond) Match(s string) *SearchFinder

func (*SearchCond) Query

func (f *SearchCond) Query(s string) (r *SearchFinder)

func (*SearchCond) ReloadFileList added in v0.1.9

func (cond *SearchCond) ReloadFileList()

func (*SearchCond) Select added in v0.1.8

func (cond *SearchCond) Select(fn func(SearchElem) bool) (sfinder *SearchFinder)

func (*SearchCond) StartMerging

func (cond *SearchCond) StartMerging()

type SearchElem added in v0.1.8

type SearchElem struct {
	Column ColGetter
	// contains filtered or unexported fields
}

func (SearchElem) Op added in v0.1.8

func (cond SearchElem) Op(col, op string, v interface{}) (result bool)

type SearchFinder

type SearchFinder struct {
	// contains filtered or unexported fields
}

func NewSearchFinder added in v0.1.8

func NewSearchFinder(c *Column) *SearchFinder

func (*SearchFinder) All

func (sf *SearchFinder) All(opts ...ResultOpt) interface{}

func (*SearchFinder) And

func (sf *SearchFinder) And(i int, key uint64) (result SkipFn)

func (*SearchFinder) Count added in v0.1.8

func (sf *SearchFinder) Count() (cnt int)

func (*SearchFinder) First

func (sf *SearchFinder) First(opts ...ResultOpt) interface{}

func (*SearchFinder) Last

func (sf *SearchFinder) Last(opts ...ResultOpt) interface{}

func (*SearchFinder) Limit added in v0.1.8

func (sf *SearchFinder) Limit(n int) *SearchFinder

func (*SearchFinder) MergeAsAnd added in v0.1.11

func (sf *SearchFinder) MergeAsAnd(src *SearchFinder)

func (*SearchFinder) Records

func (sf *SearchFinder) Records() (recs []*query.Record)

type SearchFn added in v0.1.18

type SearchFn struct {
	RecFn RecordFn
	CntFn CountFn
	// contains filtered or unexported fields
}

SearchFn ... aggreate RecordFn and CountFn

func (SearchFn) Do added in v0.1.18

func (fn SearchFn) Do(skipFn SkipFn) interface{}

type SearchMode

type SearchMode byte
const (
	SEARCH_INIT SearchMode = iota
	SEARCH_START
	SEARCH_ASC
	SEARCH_DESC
	SEARCH_ALL
	SEARCH_FINISH
)

type SelectOpt

type SelectOpt struct {
	// contains filtered or unexported fields
}

SelectOpt ... sarch options for select.

type SelectOption

type SelectOption func(*SelectOpt)

SelectOption ... for setting option parameter in Select()

func OptAsc

func OptAsc(isAsc bool) SelectOption

OptAsc ... set option of order to search in Select()

func OptCcondFn

func OptCcondFn(c CondFn) SelectOption

OptCcondFn ... set option of CondFn in Select()

func OptRange

func OptRange(start, last uint64) SelectOption

OptRange ... set option of range Select()

func OptTraverse

func OptTraverse(fn TraverseFn) SelectOption

OptTraverse ... set option of TraverseFnin Select()

type SkipFn

type SkipFn func(int) SkipType

SkipFn .. function to filter record result

type SkipType added in v0.1.6

type SkipType byte
const (
	SkipFalse SkipType = iota
	SkipTrue
	SkipFinish
)

func EmptySkip

func EmptySkip(i int) SkipType

func (SkipType) String added in v0.1.7

func (i SkipType) String() string

type TraverseFn

type TraverseFn func(f *IndexFile) error

TraverseFn ... function to traverse in Select()

type ValueGetter added in v0.1.8

type ValueGetter func(string, CondOp) *SearchFinder

Directories

Path Synopsis
cmd

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL