bitcask_go

package module
v0.0.0-...-d2c5623 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 7, 2023 License: GPL-3.0, GPL-3.0-or-later Imports: 18 Imported by: 0

README

Bitcask

This is a key-value (KV) database based on Bitcask implemented in Golang.

Introduction

Bitcask is a log-structured storage engine that writes all data to an append-only log file and uses an in-memory index for improved read performance.

The database offers the following features:

  • High Performance: With the Bitcask storage engine, it provides fast read and write operations.
  • Easy to Use: It offers a simple API that allows users to store and retrieve key-value pairs easily.
  • Persistent Storage: Data is persisted to disk to prevent data loss.
  • Concurrency Safety: It supports concurrent read and write operations and ensures data consistency using locking mechanisms.
  • Transaction Support: It provides a transaction mechanism (WriteBatch) to ensure data consistency.
  • Backup and Merge Support: It offers backup and merge mechanisms, and the merged data generates hint files for faster startup.
  • Multiple Memory Index Support: It supports multiple memory indexes, including B-tree, Adaptive Radix Tree (ART), B+ tree, etc.
  • HTTP API: It provides an HTTP API for connecting to Bitcask using an HTTP client.
  • Redis Protocol Compatibility: It supports the Redis protocol, allowing connection to Bitcask using a Redis client.

Installation

go get github.com/xiecang/bitcask

Usage Example

The following example demonstrates how to use the Bitcask storage engine.

var options = bitcask.Options{
    DirPath:     "/tmp/bitcask-go",
    MaxFileSize: 256 * 1024 * 1024,
    SyncWrites:  false,
    IndexType:   bitcask.BTree,
}
db, err := bitcask.Open(options)
if err != nil {
    panic(err)
}

var (
    key = []byte("key")
    value = []byte("bitcask-go")
)

// write data
if err = db.Put(key, value); err != nil {
    panic(err)
}

// read data
v, err := db.Get(key)
if err != nil {
    panic(err)
}

fmt.Printf("set value: %s, get value: %s\n", value, v)

// delete data
if err = db.Delete(key); err != nil {
    panic(err)
}

// read data
v, err = db.Get(key)
fmt.Printf("got error: %v\n", err)

Contribution

Contributions to this project are welcome! If you find any issues or have suggestions for improvements, please raise an issue or submit a pull request.

License

GNU General Public License v3.0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	ErrKeyIsEmpty               = errors.New("key is empty")
	ErrIndexUpdateFailed        = errors.New("failed to update index")
	ErrKeyNotFound              = errors.New("key not found")
	ErrFileNotFound             = errors.New("file not found")
	ErrDataDirectoryCorrupted   = errors.New("the database directory maybe corrupted")
	ErrExceedMaxBatchSize       = errors.New("exceed max batch size")
	ErrMergeInProgress          = errors.New("merge in progress")
	ErrDatabaseIsUsing          = errors.New("the database directory is used by another process")
	ErrMergeThresholdNotReached = errors.New("the merge threshold does not reach the option")
	ErrInsufficientDiskSpace    = errors.New("insufficient disk space")
)
View Source
var DefaultOptions = Options{
	DirPath:                filepath.Join(os.TempDir(), "bitcask-go"),
	MaxFileSize:            256 * 1024 * 1024,
	SyncWrites:             false,
	BytesPerSync:           0,
	IndexType:              BTree,
	MMapAtStartup:          true,
	DataFileMergeThreshold: 0.5,
}
View Source
var DefaultWriteBatchOptions = WriteBatchOption{
	MaxBatchSize: 10000,
	SyncWrites:   true,
}

Functions

This section is empty.

Types

type DB

type DB struct {
	// contains filtered or unexported fields
}

DB bitcask 存储引擎

func Open

func Open(options Options) (*DB, error)

Open 打开 bitcask 数据库存储引擎

func (*DB) Backup

func (db *DB) Backup(dir string) error

Backup 备份数据库, 将数据库文件拷贝到新目录

func (*DB) Close

func (db *DB) Close() error

Close 关闭数据库

func (*DB) Delete

func (db *DB) Delete(key []byte) error

Delete 根据 key 删除对应的数据

func (*DB) Fold

func (db *DB) Fold(fn func(key, value []byte) bool) error

Fold 遍历数据库中的所有 key-value, fn 返回 true 时继续遍历,返回 false 时停止遍历

func (*DB) Get

func (db *DB) Get(key []byte) ([]byte, error)

Get 根据 key 读取数据

func (*DB) ListKeys

func (db *DB) ListKeys() [][]byte

ListKeys 列出数据库中所有的 key

func (*DB) Merge

func (db *DB) Merge() error

Merge 清理无效数据,生成 Hint 文件

func (*DB) NewIterator

func (db *DB) NewIterator(opt *IteratorOption) *Iterator

func (*DB) NewWriteBatch

func (db *DB) NewWriteBatch(options WriteBatchOption) *WriteBatch

func (*DB) Put

func (db *DB) Put(key []byte, value []byte) error

Put 写入 key-value 数据,key 不能为空

func (*DB) Stat

func (db *DB) Stat() *Stat

Stat 返回数据库的统计信息

func (*DB) Sync

func (db *DB) Sync() error

Sync 持久化数据文件

type IndexType

type IndexType = int8
const (
	BTree     IndexType = iota + 1 // B+ 树索引
	ART                            // Adaptive Radix Tree 自适应基数树索引
	BPlusTree                      // B+ 树索引, 将索引数据存储在磁盘当中
)

type Iterator

type Iterator struct {
	// contains filtered or unexported fields
}

Iterator 迭代器

func (*Iterator) Close

func (i *Iterator) Close()

func (*Iterator) Key

func (i *Iterator) Key() []byte

func (*Iterator) Next

func (i *Iterator) Next()

func (*Iterator) Rewind

func (i *Iterator) Rewind()

func (*Iterator) Seek

func (i *Iterator) Seek(key []byte)

func (*Iterator) Valid

func (i *Iterator) Valid() bool

func (*Iterator) Value

func (i *Iterator) Value() ([]byte, error)

type IteratorOption

type IteratorOption struct {
	Prefix  []byte // 遍历前缀为指定 key 的值,默认为空,即遍历所有 key
	Reverse bool   // 是否反向迭代
}

type Options

type Options struct {
	DirPath string // 数据库数据目录

	MaxFileSize int64 // 数据文件大小

	SyncWrites bool // 是否同步写入,true 时每次写入都会持久化到磁盘当中

	BytesPerSync uint // 每写入指定字节数后同步到磁盘

	IndexType IndexType // 索引类型

	MMapAtStartup bool // 是否在启动时将索引文件映射到内存当中

	DataFileMergeThreshold float32 // 数据文件合并阈值, 无效数据文件占总数据文件大小的比例超过该阈值时触发合并
}

type Stat

type Stat struct {
	KeyNum          uint  // key 的总数量
	DataFileNum     uint  // 数据文件的数量
	ReclaimableSize int64 // 可以进行 merge 回收的数据量,单位 byte
	DiskSize        int64 // 数据目录占用的磁盘空间,单位 byte
}

Stat 存储引擎的统计信息

type WriteBatch

type WriteBatch struct {
	// contains filtered or unexported fields
}

WriteBatch 原子批量写

func (*WriteBatch) Commit

func (w *WriteBatch) Commit() error

Commit 提交事务,将暂存数据写入数据文件,并更新内存索引

func (*WriteBatch) Delete

func (w *WriteBatch) Delete(key []byte) error

Delete 添加待批量删除的数据

func (*WriteBatch) Put

func (w *WriteBatch) Put(key, value []byte) error

Put 添加待批量写入的数据

type WriteBatchOption

type WriteBatchOption struct {
	MaxBatchSize uint // 最大批量写入大小

	SyncWrites bool // 是否同步写入,true 时每次写入都会持久化到磁盘当中
}

WriteBatchOption 批量写入配置项

Directories

Path Synopsis
cmd

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL