Documentation ¶
Index ¶
- Constants
- Variables
- func DataToHint(path string) (err error)
- func DataToHintDir(path string, start, end int) (err error)
- func DataToHintFile(path string) (err error)
- func FreeMem()
- func GetBucketDir(numBucket, bucketID int) string
- func GetBucketPath(bucketID int) string
- func Getvhash(value []byte) uint16
- func IsValidKeyString(key string) bool
- func NeedCompress(header []byte) bool
- func NewdataStore(bucketID int, home string) *dataStore
- func ParsePathString(pathStr string, buf []int) ([]int, error)
- func ParsePathUint64(khash uint64, buf []int) []int
- func StartCpuProfile(name string) *os.File
- func StopCpuProfile(f *os.File)
- func WakeupFlush()
- func WriteHeapProfile(name string)
- type Bucket
- type BucketInfo
- type BucketStat
- type CollisionTable
- type DBLocalConfig
- type DU
- type DataConfig
- type DataStreamReader
- type DataStreamWriter
- type GCFileState
- type GCMgr
- func (mgr *GCMgr) AfterBucket(bkt *Bucket)
- func (mgr *GCMgr) BeforeBucket(bkt *Bucket, startChunkID, endChunkID int, merge bool)
- func (mgr *GCMgr) UpdateCollision(bkt *Bucket, ki *KeyInfo, oldPos, newPos Position, rec *Record)
- func (mgr *GCMgr) UpdateHtreePos(bkt *Bucket, ki *KeyInfo, oldPos, newPos Position)
- type GCState
- type HStore
- func (store *HStore) ChangeRoute(newConf config.DBRouteConfig) (loaded, unloaded []int, err error)
- func (store *HStore) Close()
- func (store *HStore) Flusher()
- func (store *HStore) GC(bucketID, beginChunkID, endChunkID, noGCDays int, merge, pretend bool) (begin, end int, err error)
- func (store *HStore) GCStat() (int, *GCState)
- func (store *HStore) Get(ki *KeyInfo, memOnly bool) (payload *Payload, pos Position, err error)
- func (store *HStore) GetBucketInfo(bucketID int) *BucketInfo
- func (store *HStore) GetCollisionsByBucket(bucketID int) (content []byte)
- func (store *HStore) GetDU() (du *DU)
- func (store *HStore) GetNumCmdByBuckets() (counts [][]int64)
- func (store *HStore) GetRecordByKeyHash(ki *KeyInfo) (*Record, bool, error)
- func (store *HStore) HintDumper(interval time.Duration)
- func (store *HStore) Incr(ki *KeyInfo, value int) int
- func (store *HStore) ListDir(ki *KeyInfo) ([]byte, error)
- func (store *HStore) ListUpper(ki *KeyInfo) ([]byte, error)
- func (store *HStore) NumKey() (n int)
- func (store *HStore) Set(ki *KeyInfo, p *Payload) error
- type HStoreConfig
- type HTree
- type HTreeConfig
- type HTreeItem
- type HTreeReq
- type HashFuncType
- type HintBuffer
- type HintConfig
- type HintID
- type HintItem
- type HintItemMeta
- type HintStatus
- type HtreeDerivedConfig
- type ItemFunc
- type KeyInfo
- type KeyPos
- type Meta
- type Node
- type NodeInfo
- type Payload
- type Position
- type Record
- type SliceHeader
- func (sh *SliceHeader) Get(req *HTreeReq) (exist bool)
- func (sh *SliceHeader) Iter(f ItemFunc, ni *NodeInfo)
- func (sh *SliceHeader) Remove(ki *KeyInfo, oldPos Position) (oldm HTreeItem, removed bool)
- func (sh *SliceHeader) Set(req *HTreeReq) (oldm HTreeItem, exist bool)
- func (sh *SliceHeader) ToBytes() (b []byte)
- type WriteRecord
Constants ¶
View Source
const ( HTREE_SUFFIX = "hash" HINT_SUFFIX = "s" MERGED_HINT_SUFFIX = "m" )
View Source
const ( BUCKET_STAT_EMPTY = iota BUCKET_STAT_NOT_EMPTY BUCKET_STAT_READY )
View Source
const ( MAX_NUM_CHUNK = 998 MAX_NUM_SPLIT = 998 )
View Source
const ( SecsBeforeDumpDefault = int64(5) HintStateIdle = 0 )
View Source
const ( HintStateDump = 1 << iota HintStateMerge HintStateGC )
View Source
const ( HINTFILE_HEAD_SIZE = 16 HINTITEM_HEAD_SIZE = 23 HINTINDEX_ROW_SIZE = 4096 )
View Source
const ( BUCKET_SIZE = 16 MAX_DEPTH = 8 ThresholdListKeyDefault = uint32(64 * 4) ThresholdBigHash = 64 * 4 )
View Source
const ( FLAG_INCR = 0x00000204 FLAG_COMPRESS = 0x00010000 FLAG_CLIENT_COMPRESS = 0x00000010 COMPRESS_RATIO_LIMIT = 0.7 TRY_COMPRESS_SIZE = 1024 * 10 PADDING = 256 HEADER_SIZE = 512 )
View Source
const LEN_USE_C_FIND = 100
View Source
const (
MAX_KEY_LEN = 250
)
View Source
const TREE_ITEM_HEAD_SIZE = 11
Variables ¶
View Source
var ( DefaultHintConfig = HintConfig{ NoMerged: false, SplitCapStr: "1M", IndexIntervalSizeStr: "4K", MergeInterval: 1, } DefaultHTreeConfig HTreeConfig = HTreeConfig{ TreeHeight: 3, TreeDump: 3, } DefaultDataConfig = DataConfig{ DataFileMaxStr: "4000M", CheckVHash: false, FlushInterval: 0, FlushWakeStr: "0", NoGCDays: 0, NotCompress: map[string]bool{ "audio/wave": true, "audio/mpeg": true, }, } DefaultDBLocalConfig = DBLocalConfig{ Home: "./testdb", } )
View Source
var ( GCWriteBufferSizeDefault = uint32(1 << 20) GCWriteBufferSize = GCWriteBufferSizeDefault )
View Source
var (
SecsBeforeDump = SecsBeforeDumpDefault // may change in test
)
Functions ¶
func DataToHint ¶
func DataToHintDir ¶
func DataToHintFile ¶
func GetBucketDir ¶
func GetBucketPath ¶
func IsValidKeyString ¶
func NeedCompress ¶
func NewdataStore ¶
func ParsePathUint64 ¶
func StartCpuProfile ¶
func StopCpuProfile ¶
func WakeupFlush ¶
func WakeupFlush()
func WriteHeapProfile ¶
func WriteHeapProfile(name string)
Types ¶
type Bucket ¶
type Bucket struct { BucketInfo GCHistory []GCState // contains filtered or unexported fields }
type BucketInfo ¶
type BucketStat ¶
type CollisionTable ¶
type DBLocalConfig ¶
type DBLocalConfig struct {
Home string `yaml:",omitempty"`
}
type DU ¶
type DataConfig ¶
type DataConfig struct { FlushWake int64 `yaml:"-"` // after set to flush buffer, wake up flush go routine if buffer size > this DataFileMax int64 `yaml:"-"` // data rotate when reach the size CheckVHash bool `yaml:"check_vhash,omitempty"` // not really set if vhash is the same FlushInterval int `yaml:"flush_interval,omitempty"` // the flush go routine run at this interval NoGCDays int `yaml:"no_gc_days,omitempty"` // not data files whose mtime in recent NoGCDays days FlushWakeStr string `yaml:"flush_wake_str"` // DataFileMaxStr string `yaml:"datafile_max_str,omitempty"` NotCompress map[string]bool `yaml:"not_compress,omitempty"` // kind do not compress }
type DataStreamReader ¶
type DataStreamReader struct {
// contains filtered or unexported fields
}
func (*DataStreamReader) Close ¶
func (stream *DataStreamReader) Close() error
func (*DataStreamReader) Next ¶
func (stream *DataStreamReader) Next() (res *Record, offset uint32, sizeBroken uint32, err error)
func (*DataStreamReader) Offset ¶
func (stream *DataStreamReader) Offset() uint32
type DataStreamWriter ¶
type DataStreamWriter struct {
// contains filtered or unexported fields
}
func GetStreamWriter ¶
func GetStreamWriter(path string, isappend bool) (*DataStreamWriter, error)
func (*DataStreamWriter) Append ¶
func (stream *DataStreamWriter) Append(rec *Record) (offset uint32, err error)
func (*DataStreamWriter) Close ¶
func (stream *DataStreamWriter) Close() error
func (*DataStreamWriter) Offset ¶
func (stream *DataStreamWriter) Offset() uint32
type GCFileState ¶
type GCFileState struct { NumBefore int64 NumReleased int64 NumReleasedDeleted int64 SizeBefore int64 SizeReleased int64 SizeDeleted int64 SizeBroken int64 NumNotInHtree int64 }
func (*GCFileState) String ¶
func (s *GCFileState) String() string
type GCMgr ¶
type GCMgr struct {
// contains filtered or unexported fields
}
func (*GCMgr) AfterBucket ¶
func (*GCMgr) BeforeBucket ¶
func (*GCMgr) UpdateCollision ¶
type HStore ¶
type HStore struct {
// contains filtered or unexported fields
}
func (*HStore) ChangeRoute ¶
func (store *HStore) ChangeRoute(newConf config.DBRouteConfig) (loaded, unloaded []int, err error)
func (*HStore) GetBucketInfo ¶
func (store *HStore) GetBucketInfo(bucketID int) *BucketInfo
func (*HStore) GetCollisionsByBucket ¶
func (*HStore) GetNumCmdByBuckets ¶
func (*HStore) GetRecordByKeyHash ¶
func (*HStore) HintDumper ¶
type HStoreConfig ¶
type HStoreConfig struct { config.DBRouteConfig `yaml:"-"` // from route table DBLocalConfig `yaml:"local,omitempty"` DataConfig `yaml:"data,omitempty"` HintConfig `yaml:"hint,omitempty"` HTreeConfig `yaml:"htree,omitempty"` }
var ( KHASH_LENS = [8]int{8, 8, 7, 7, 6, 6, 5, 5} Conf *HStoreConfig )
func (*HStoreConfig) InitDefault ¶
func (c *HStoreConfig) InitDefault()
func (*HStoreConfig) InitTree ¶
func (c *HStoreConfig) InitTree() error
must be called before use NumBucket => TreeDepth => (TreeKeyHashLen & TreeKeyHashMask)
type HTreeConfig ¶
type HTreeConfig struct { TreeHeight int `yaml:"tree_height,omitempty"` TreeDump int `yaml:"tree_dump,omitempty"` HtreeDerivedConfig `yaml:"-"` }
type HTreeItem ¶
type HTreeItem HintItemMeta
type HashFuncType ¶
type HintBuffer ¶
type HintBuffer struct {
// contains filtered or unexported fields
}
func NewHintBuffer ¶
func NewHintBuffer() *HintBuffer
func (*HintBuffer) Dump ¶
func (h *HintBuffer) Dump(path string) (index *hintFileIndex, err error)
func (*HintBuffer) Get ¶
func (h *HintBuffer) Get(keyhash uint64, key string) (it *HintItem, iscollision bool)
func (*HintBuffer) SetMaxOffset ¶
func (h *HintBuffer) SetMaxOffset(offset uint32)
type HintConfig ¶
type HintConfig struct { NoMerged bool `yaml:"hint_no_merged,omitempty"` // merge only used to find collision, but not dump idx.m to save disk space MergeInterval int `yaml:"hint_merge_interval,omitempty"` // merge after rotating each MergeInterval chunk IndexIntervalSize int64 `yaml:"-"` // max diff of offsets of two adjacent hint index items SplitCap int64 `yaml:"-"` // pre alloc SplitCap slot for each split, when slots are all filled, slot is dumped SplitCapStr string `yaml:"hint_split_cap_str,omitempty"` IndexIntervalSizeStr string `yaml:"hint_index_interval_str,omitempty"` }
type HintItem ¶
type HintItem struct { HintItemMeta Key string }
type HtreeDerivedConfig ¶
type KeyInfo ¶
func NewKeyInfoFromBytes ¶
type KeyPos ¶
type KeyPos struct { KeyPathBuf [16]int KeyPath []int // need depth BucketID int KeyPathInBucket []int }
computed once, before being routed to a bucket
type Payload ¶
func GetPayloadForDelete ¶
func GetPayloadForDelete() *Payload
func (*Payload) CalcValueHash ¶
func (p *Payload) CalcValueHash()
func (*Payload) Decompress ¶
func (*Payload) DiffSizeAfterDecompressed ¶
func (*Payload) IsCompressed ¶
func (*Payload) RawValueSize ¶
type Record ¶
func (*Record) TryCompress ¶
func (rec *Record) TryCompress()
type SliceHeader ¶
func (*SliceHeader) Get ¶
func (sh *SliceHeader) Get(req *HTreeReq) (exist bool)
func (*SliceHeader) Iter ¶
func (sh *SliceHeader) Iter(f ItemFunc, ni *NodeInfo)
func (*SliceHeader) Remove ¶
func (sh *SliceHeader) Remove(ki *KeyInfo, oldPos Position) (oldm HTreeItem, removed bool)
func (*SliceHeader) ToBytes ¶
func (sh *SliceHeader) ToBytes() (b []byte)
type WriteRecord ¶
type WriteRecord struct {
// contains filtered or unexported fields
}
func (*WriteRecord) String ¶
func (wrec *WriteRecord) String() string
Click to show internal directories.
Click to hide internal directories.