Documentation ¶
Overview ¶
Package leveldb is a implementation for LevelDB database.
Index ¶
- Constants
- Variables
- func IsCorrupt(err error) bool
- type Batch
- type Comparator
- type CompressionType
- type DB
- func (db *DB) All(opts *ReadOptions) Iterator
- func (db *DB) Close() error
- func (db *DB) CompactRange(start, limit []byte) error
- func (db *DB) Delete(key []byte, opts *WriteOptions) error
- func (db *DB) Find(start []byte, opts *ReadOptions) Iterator
- func (db *DB) Get(key []byte, opts *ReadOptions) ([]byte, error)
- func (db *DB) GetSnapshot() *Snapshotdeprecated
- func (db *DB) Prefix(prefix []byte, opts *ReadOptions) Iterator
- func (db *DB) Put(key, value []byte, opts *WriteOptions) error
- func (db *DB) Range(start, limit []byte, opts *ReadOptions) Iterator
- func (db *DB) Snapshot() *Snapshot
- func (db *DB) Write(batch Batch, opts *WriteOptions) error
- type File
- type FileSystem
- type Filter
- type Generator
- type Iterator
- type Logger
- type Options
- type ReadOptions
- type Reader
- type Snapshot
- func (ss *Snapshot) All(opts *ReadOptions) Iterator
- func (ss *Snapshot) Close() error
- func (ss *Snapshot) Dup() *Snapshotdeprecated
- func (ss *Snapshot) Find(start []byte, opts *ReadOptions) Iterator
- func (ss *Snapshot) Get(key []byte, opts *ReadOptions) ([]byte, error)
- func (ss *Snapshot) Prefix(prefix []byte, opts *ReadOptions) Iterator
- func (ss *Snapshot) Range(start, limit []byte, opts *ReadOptions) Iterator
- func (ss *Snapshot) Snapshot() *Snapshot
- type WriteOptions
Constants ¶
const ( // MaxCompactionConcurrency maximize compaction concurrency as possible. // Caution that compaction is a disk drive sensitive task, max compaction // concurrency usually doesn't mean good performance. MaxCompactionConcurrency = compaction.MaxCompactionConcurrency )
Variables ¶
var ( ErrNotFound = errors.ErrNotFound // key not found ErrDBExists = errors.ErrDBExists ErrDBMissing = errors.ErrDBMissing ErrDBClosed = errors.ErrDBClosed )
var DiscardLogger = logger.Discard
DiscardLogger is a nop Logger.
Functions ¶
Types ¶
type Batch ¶
type Batch struct {
// contains filtered or unexported fields
}
Batch holds a collection of updates to apply atomatically to a DB.
type Comparator ¶
type Comparator interface { // Name returns the name of this comparator. A DB created with one comparator // can't be opened using a comparator with different name. // // Client should switch to a new name if the comparator implementation changes // in a way that cause the relative order of any two keys varied. // // Names starting with "leveldb." are reserved and should not used by clients. Name() string // Compare returns a value 'less than', 'equal to' or 'greater than' 0 depending // on whether a is 'less than', 'equal to' or 'greater than' b. Compare(a, b []byte) int // AppendSuccessor appends a possibly shortest byte sequence in range [start, limit) // to dst. Empty limit acts as infinite large. In particularly, if limit equals to // start, it returns append(dst, start). AppendSuccessor(dst, start, limit []byte) []byte // MakePrefixSuccessor returns a byte sequence 'limit' such that all byte sequences // falling in [prefix, limit) have 'prefix' as prefix. Zero length 'limit' acts as // infinite large. MakePrefixSuccessor(prefix []byte) []byte }
Comparator defines a total order over keys in LevelDB database. Methods of a comparator may be called by concurrent goroutines.
var BytewiseComparator Comparator = keys.BytewiseComparator
BytewiseComparator is an lexicographic ordering comparator, it has same ordring with bytes.Compare.
type CompressionType ¶
type CompressionType int
CompressionType defines compression methods to compress a table block.
const ( // DefaultCompression defaults to SnappyCompression for now. DefaultCompression CompressionType = iota // NoCompression means no compression for table block. NoCompression // SnappyCompression uses snappy compression to compress table block // before store it to file. SnappyCompression )
type DB ¶
type DB struct {
// contains filtered or unexported fields
}
DB represents a opened LevelDB database.
func (*DB) All ¶
func (db *DB) All(opts *ReadOptions) Iterator
All returns an iterator catching all keys in db.
func (*DB) Close ¶
Close closes the opened LevelDB database. Make sure that there are no ongoing accesses and outstanding iterators. All operations after this call will get error ErrDBClosed. A judicious caller should call this function after all ongoing accesses is done and all outstanding iterators is releases, and should not call any functions of this db instance after it is closed.
func (*DB) CompactRange ¶
CompactRange compacts keys in range [start, limit] to max level these keys reside in currently.
Zero length start acts as infinite small, zero length limit acts as infinite large.
func (*DB) Delete ¶
func (db *DB) Delete(key []byte, opts *WriteOptions) error
Delete deletes the database entry for given key. It is not an error if db does not contain that key.
func (*DB) Find ¶
func (db *DB) Find(start []byte, opts *ReadOptions) Iterator
Find returns an iterator catching all keys greater than or equal to start in db. Zero length start acts as infinite small.
func (*DB) Get ¶
func (db *DB) Get(key []byte, opts *ReadOptions) ([]byte, error)
Get gets value for given key. It returns ErrNotFound if db does not contain that key.
func (*DB) GetSnapshot
deprecated
func (*DB) Prefix ¶
func (db *DB) Prefix(prefix []byte, opts *ReadOptions) Iterator
Prefix returns an iterator catching all keys having prefix as prefix.
func (*DB) Put ¶
func (db *DB) Put(key, value []byte, opts *WriteOptions) error
Put stores a key/value pair in DB.
func (*DB) Range ¶
func (db *DB) Range(start, limit []byte, opts *ReadOptions) Iterator
Range returns an iterator catching all keys in range [start, limit). Zero length start acts as infinite small, zero length limit acts as infinite large.
type File ¶
type File interface { io.Reader io.Writer io.Seeker io.Closer io.ReaderAt Truncate(size int64) error Sync() error }
File defines methods on one file.
type FileSystem ¶
type FileSystem interface { // Open opens a file using specified flag. Open(name string, flag int) (File, error) // Lock locks a file for exclusive usage. If the file is locked by // someone else, failed with an error instead of blocking. If ok, // returns an io.Closer to let caller to release underly locker. Lock(name string) (io.Closer, error) // Exists returns true if the named file exists. Exists(name string) bool // MkdirAll creates a directory and all necessary parents. MkdirAll(path string) error // Lists returns all contents of the directory. List(dir string) ([]string, error) // Remove removes named file or directory. Remove(filename string) error // Rename renames(moves) oldpath to newpath. If newpath already exists, // Rename replaces it. Rename(oldpath, newpath string) error }
FileSystem defines methods for hierarchical file storage.
var DefaultFileSystem FileSystem = internalFileSystem{file.DefaultFileSystem}
DefaultFileSystem is the file system provided by os package.
type Filter ¶
type Filter interface { // Name returns the name of this filter. // // Note that if the encoding of filter data changes in an incompatible // way, the name returned by this method must be changed. Otherwise, // old incompatible filter data may be passed to methods of this filter. Name() string // Append appends filter data summarized from keys to buf. Append(buf *bytes.Buffer, keys [][]byte) // Contains returns a boolean indicating whether key is possibly // falling in 'data', which is generated by Generator. Contains(data, key []byte) bool // NewGenerator creates a new generator to generate filter data // for bunch of keys. This method is optional, an implementation // can safely return nil, in this case, a fallback generator is // created. NewGenerator() Generator }
Filter defines methods to create a small summarized data from large set of keys, and check whether a key is possibly contained in the summarized data. Many goroutines may call methods of this filter concurrently.
func NewBloomFilter ¶
NewBloomFilter creates a bloom filter using bits per key approximately to the specified number. Bloom filters using different bitsPerKey are compatible with each other. So it is ok to open a database created with bloom filter using different bitsPerKey.
type Generator ¶
type Generator interface { // Name returns the name of underlying filter. Name() string // Reset resets generator to its initial state. Reset() // Empty returns whether any key has been added to this generator. Empty() bool // Add adds key to this generator for summarization. Add(key []byte) // Append appends filter data summarized from keys added before. Append(buf *bytes.Buffer) }
Generator is a filter data generator.
type Iterator ¶
type Iterator interface { // Next moves to next entry. It returns whether such entry exists. // If it is the first seek method called, it behaves as First(). Next() bool // Prev moves to previous entry. It returns whether such entry exists. // If it is the first seek method called, it behaves as Last(). Prev() bool // First moves to the first entry. It returns whether such entry exists. First() bool // Last moves to the last entry. It returns whether such entry exists. Last() bool // Seek moves the iterator to the first key that equal to or greater than // key. It returns whether such entry exists. Seek(key []byte) bool // Valid returns whether the iterator is point to a valid entry. Valid() bool // Key returns the key of current entry. The behaviour is undefined if // current status of iterator is invalid. Key() []byte // Value returns the value of current entry. The behaviour is undefined // if current status of iterator is invalid. Value() []byte // Err returns error we encounters so far. Seek methods, First, Last and Seek may // clear this error. Err() error // Closes releases any resources hold by this iterator, and returns // any error it encounters so far. The behaviour is undefined if you // call any methods after this iterator has been closed. Close() error }
Iterator defines methods to iterate through a set of data. The initial status of an newly created iterator is invalid. Client must call one of the seek methods before using. All iterators returned from this package are not designed for concurrent usage.
type Options ¶
type Options struct { // Comparator defines the total order over keys in the database. // // The default comparator is BytewiseComparator, which uses the same ordering // as bytes.Compare. Comparator Comparator // Compression type used to compress blocks. // // The default value points to SnappyCompression. Compression CompressionType // MaxFileSize enforces size limitation for files created by LevelDB. // // Defaults to 2MiB. MaxFileSize int64 // MaxGrandparentOverlapBytes limits number of overlap bytes with level+2 a // compacted file produced in compaction level => level+1 can have. // // Defaults to 10 * MaxFileSize. MaxGrandparentOverlapBytes int64 // MaxExpandedCompactionBytes limits number of bytes of input files when expanding // input files in compacting level. // // Defaults to 25 * MaxFileSize. MaxExpandedCompactionBytes int64 // BlockSize specifies the minimum uncompressed size in bytes for a table block. // // The default value is 4KiB. BlockSize int // BlockRestartInterval specifies the number of keys between restart points // for delta encoding of keys in a block. // // The default value is 16. BlockRestartInterval int // BlockCompressionRatio specifies a minimal compression ratio for table blocks. // Uncompressed block will be written to table file if its compression ratio is // this value. // // The default value is 8.0/7.0, which means that uncompressed block will be written // to table file unless at least 1/8 of raw data were compressed out. BlockCompressionRatio float64 // WriteBufferSize is the amount of data to build up in memory (backed by // an unsorted log on disk) before converting to a sorted on-disk file. // // Larger values increase performance, especially during bulk loads. Up to two // write buffers may be held in memory at the same time, so you may wish to // adjust this parameter to control memory usage. Also, a larger write buffer // will result in a longer recovery time the next time the database is opened. // // The default value is 4MiB. WriteBufferSize int // MaxOpenFiles is the number of open files that can be used this db instance. // You may need to increase this if your database has a large number of files. // // The default value is 1000. MaxOpenFiles int // BlockCacheCapacity specifies the capacity in bytes for block cache. // // The default value is 8MiB. BlockCacheCapacity int // CompactionConcurrency specifies max allowed concurrent compactions. // // The default value is 1, use MaxCompactionConcurrency to maximize compaction // concurrency as poosible. CompactionConcurrency int // CompactionBytesPerSeek states that one seek cost approximately equal time // to compact specified number of data. // // We decide to compact a file after a certain number of overlap seeks, this // way for keys in range we reduce potential seeks by one after compaction. // We use CompactionBytesPerSeek and MinimalAllowedOverlapSeeks to calculate // the number of allowed overlap seeks for a file. // // The default value is 16KiB, which means that one seek cost approximately // equal time to compact 16KiB data. CompactionBytesPerSeek int // MinimalAllowedOverlapSeeks specifies minimal allowed overlap seeks per table file. // // The default value is 100. MinimalAllowedOverlapSeeks int // IterationBytesPerSampleSeek specifies average iteration bytes for one sample // seek to detect overlap file. // // The default value is 1MiB. IterationBytesPerSampleSeek int // Level0CompactionFiles specifies that a compaction for level-0 is triggered if // there are more than this number of files in level-0. // // The default value is 4. Level0CompactionFiles int // Level0SlowdownWriteFiles specifies that writes will be slowdown if there are // more than this number of files in level-0. // // The default value is Level0CompactionFiles + 4. Level0SlowdownWriteFiles int // Level0StopWriteFiles specifies that writes will be stopped if there are more // than this number of files in level-0. // // The default value is Level0SlowdownWriteFiles + 4. Level0StopWriteFiles int // Filter specifies a Filter to filter out unnecessary disk reads when looking for // a specific key. The filter is also used to generate filter data when building // table files. // // The default value is nil. Filter Filter // Logger specifies a place that all internal progress/error information generated // by this db instance will be written to. // // The default value is a file named "LOG" stored under this db directory. You can // suppress logging by using DiscardLogger. Logger Logger // FileSystem defines a hierarchical file storage interface. // // The default file system is built around os package. FileSystem FileSystem // CreateIfMissing specifies whether to create one if the database does not exist. // // The default value is false. CreateIfMissing bool // ErrorIfExists specifies whether to report a error if the database already exists. // // The default value is false. ErrorIfExists bool }
Options contains options controlling various parts of the db instance.
type ReadOptions ¶
type ReadOptions struct { // DontFillCache specifies whether data read in this operation // should be cached in memory. If true, data read from underlying // storage will not be cahced in memory for later reading, but // if the data is already cached in memory, it will be used by // this operation. DontFillCache bool // VerifyChecksums specifies whether data read from underlying // storage should be verified against saved checksums. Note that // it never verify data cached in memory. VerifyChecksums bool }
ReadOptions contains options controlling behaviours of read operations.
type Reader ¶
type Reader interface { Get(key []byte, opts *ReadOptions) ([]byte, error) All(opts *ReadOptions) Iterator Find(start []byte, opts *ReadOptions) Iterator Range(start, limit []byte, opts *ReadOptions) Iterator Prefix(prefix []byte, opts *ReadOptions) Iterator Snapshot() *Snapshot }
Reader provides an unified interface to read database and its snapshot.
type Snapshot ¶
type Snapshot struct {
// contains filtered or unexported fields
}
Snapshot is a readonly and frozen state of LevelDB in particular moment.
func (*Snapshot) All ¶
func (ss *Snapshot) All(opts *ReadOptions) Iterator
All returns an iterator catching all keys in this snapshot.
func (*Snapshot) Find ¶
func (ss *Snapshot) Find(start []byte, opts *ReadOptions) Iterator
Find returns an iterator catching all keys greater than or equal to start in this snapshot. Zero length start acts as infinite small.
func (*Snapshot) Get ¶
func (ss *Snapshot) Get(key []byte, opts *ReadOptions) ([]byte, error)
Get gets value for given key. It returns ErrNotFound if this snapshot does not contain that key.
func (*Snapshot) Prefix ¶
func (ss *Snapshot) Prefix(prefix []byte, opts *ReadOptions) Iterator
Prefix returns an iterator catching all keys having prefix as prefix in this snapshot.
type WriteOptions ¶
type WriteOptions struct { // Sync specifies whether to synchronize the write from OS cache to // underlying storage before the write is considered complete. // Setting Sync to true may result in slower writes. // // If Sync is false, and the machine crashes, some recent writes may // be lost. Note that if it is just the process crashes, no writes will // be lost. // // In other words, a write with false Sync has similar crash semantics // as the "write()" system call. A write with true Sync has similar crash // semantics to a "write()" system call followed by "fsync()". Sync bool }
WriteOptions contains options controlling write operations: Put, Delete, and Write.