y

package
v1.0.4 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 27, 2024 License: Apache-2.0 Imports: 27 Imported by: 0

Documentation

Index

Constants

View Source
const (
	BADGER_METRIC_PREFIX = "badger_"
)

Variables

View Source
var (
	// ErrEOF indicates an end of file when trying to read from a memory mapped file
	// and encountering the end of slice.
	ErrEOF = errors.New("ErrEOF: End of file")

	// ErrCommitAfterFinish indicates that write batch commit was called after
	// finish
	ErrCommitAfterFinish = errors.New("Batch commit not permitted after finish")
)
View Source
var (

	// CastagnoliCrcTable is a CRC32 polynomial table
	CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli)
)
View Source
var ErrChecksumMismatch = errors.New("checksum mismatch")

ErrChecksumMismatch is returned at checksum mismatch.

View Source
var (
	NoEventLog trace.EventLog = nilEventLog{}
)

Functions

func AssertTrue

func AssertTrue(b bool)

AssertTrue asserts that b is true. Otherwise, it would log fatal.

func AssertTruef

func AssertTruef(b bool, format string, args ...interface{})

AssertTruef is AssertTrue with extra info.

func BloomBitsPerKey

func BloomBitsPerKey(numEntries int, fp float64) int

BloomBitsPerKey returns the bits per key required by bloomfilter based on the false positive rate.

func BytesToU16

func BytesToU16(b []byte) uint16

BytesToU16 converts the given byte slice to uint16

func BytesToU32

func BytesToU32(b []byte) uint32

BytesToU32 converts the given byte slice to uint32

func BytesToU32Slice

func BytesToU32Slice(b []byte) []uint32

BytesToU32Slice converts the given byte slice to uint32 slice

func BytesToU64

func BytesToU64(b []byte) uint64

BytesToU64 converts the given byte slice to uint64

func BytesToU64Slice

func BytesToU64Slice(b []byte) []uint64

BytesToU64Slice converts the given byte slice to uint64 slice

func CalculateChecksum

func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64

CalculateChecksum calculates checksum for data using ct checksum type.

func Check

func Check(err error)

Check logs fatal if err != nil.

func Check2

func Check2(_ interface{}, err error)

Check2 acts as convenience wrapper around Check, using the 2nd argument as error.

func CombineErrors

func CombineErrors(one, other error) error

func CompareKeys

func CompareKeys(key1, key2 []byte) int

CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs is same. a<timestamp> would be sorted higher than aa<timestamp> if we use bytes.compare All keys should have timestamp.

func Copy

func Copy(a []byte) []byte

Copy copies a byte slice and returns the copied slice.

func CreateSyncedFile

func CreateSyncedFile(filename string, sync bool) (*os.File, error)

CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed.

func FixedDuration

func FixedDuration(d time.Duration) string

FixedDuration returns a string representation of the given duration with the hours, minutes, and seconds.

func GenerateIV

func GenerateIV() ([]byte, error)

GenerateIV generates IV.

func Hash

func Hash(b []byte) uint32

hash implements a hashing algorithm similar to the Murmur hash.

func IBytesToString

func IBytesToString(size uint64, precision int) string

IBytesToString converts size in bytes to human readable format. The code is taken from humanize library and changed to provide value upto custom decimal precision. IBytesToString(12312412, 1) -> 11.7 MiB

func KeyWithTs

func KeyWithTs(key []byte, ts uint64) []byte

KeyWithTs generates a new key by appending ts to key.

func LSMSizeGet

func LSMSizeGet(enabled bool, key string) expvar.Var

func LSMSizeSet

func LSMSizeSet(enabled bool, key string, val expvar.Var)

func NewKV

func NewKV(alloc *z.Allocator) *pb.KV

func NumBytesCompactionWrittenAdd

func NumBytesCompactionWrittenAdd(enabled bool, key string, val int64)

func NumBytesReadsLSMAdd

func NumBytesReadsLSMAdd(enabled bool, val int64)

func NumBytesReadsVlogAdd

func NumBytesReadsVlogAdd(enabled bool, val int64)

func NumBytesWrittenToL0Add

func NumBytesWrittenToL0Add(enabled bool, val int64)

func NumBytesWrittenUserAdd

func NumBytesWrittenUserAdd(enabled bool, val int64)

func NumBytesWrittenVlogAdd

func NumBytesWrittenVlogAdd(enabled bool, val int64)

func NumCompactionTablesAdd

func NumCompactionTablesAdd(enabled bool, val int64)

func NumGetsAdd

func NumGetsAdd(enabled bool, val int64)

func NumGetsWithResultsAdd

func NumGetsWithResultsAdd(enabled bool, val int64)

func NumIteratorsCreatedAdd

func NumIteratorsCreatedAdd(enabled bool, val int64)

func NumLSMBloomHitsAdd

func NumLSMBloomHitsAdd(enabled bool, key string, val int64)

func NumLSMGetsAdd

func NumLSMGetsAdd(enabled bool, key string, val int64)

func NumMemtableGetsAdd

func NumMemtableGetsAdd(enabled bool, val int64)

func NumPutsAdd

func NumPutsAdd(enabled bool, val int64)

func NumReadsVlogAdd

func NumReadsVlogAdd(enabled bool, val int64)

func NumWritesVlogAdd

func NumWritesVlogAdd(enabled bool, val int64)

func OpenExistingFile

func OpenExistingFile(filename string, flags Flags) (*os.File, error)

OpenExistingFile opens an existing file, errors if it doesn't exist.

func OpenSyncedFile

func OpenSyncedFile(filename string, sync bool) (*os.File, error)

OpenSyncedFile creates the file if one doesn't exist.

func OpenTruncFile

func OpenTruncFile(filename string, sync bool) (*os.File, error)

OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC

func ParseKey

func ParseKey(key []byte) []byte

ParseKey parses the actual key from the key bytes.

func ParseTs

func ParseTs(key []byte) uint64

ParseTs parses the timestamp from the key bytes.

func PendingWritesSet

func PendingWritesSet(enabled bool, key string, val expvar.Var)

func SafeCopy

func SafeCopy(a, src []byte) []byte

SafeCopy does append(a[:0], src...).

func SameKey

func SameKey(src, dst []byte) bool

SameKey checks for key equality ignoring the version timestamp suffix.

func U16ToBytes

func U16ToBytes(v uint16) []byte

U16ToBytes converts the given Uint16 to bytes

func U32SliceToBytes

func U32SliceToBytes(u32s []uint32) []byte

U32SliceToBytes converts the given Uint32 slice to byte slice

func U32ToBytes

func U32ToBytes(v uint32) []byte

U32ToBytes converts the given Uint32 to bytes

func U64SliceToBytes

func U64SliceToBytes(u64s []uint64) []byte

U64SliceToBytes converts the given Uint64 slice to byte slice

func U64ToBytes

func U64ToBytes(v uint64) []byte

U64ToBytes converts the given Uint64 to bytes

func VerifyChecksum

func VerifyChecksum(data []byte, expected *pb.Checksum) error

VerifyChecksum validates the checksum for the data against the given expected checksum.

func VlogSizeGet

func VlogSizeGet(enabled bool, key string) expvar.Var

func VlogSizeSet

func VlogSizeSet(enabled bool, key string, val expvar.Var)

func Wrap

func Wrap(err error, msg string) error

Wrap wraps errors from external lib.

func Wrapf

func Wrapf(err error, format string, args ...interface{}) error

Wrapf is Wrap with extra info.

func XORBlock

func XORBlock(dst, src, key, iv []byte) error

XORBlock encrypts the given data with AES and XOR's with IV. Can be used for both encryption and decryption. IV is of AES block size.

func XORBlockAllocate

func XORBlockAllocate(src, key, iv []byte) ([]byte, error)

func XORBlockStream

func XORBlockStream(w io.Writer, src, key, iv []byte) error

func ZSTDCompress

func ZSTDCompress(dst, src []byte, compressionLevel int) ([]byte, error)

ZSTDCompress compresses a block using ZSTD algorithm.

func ZSTDCompressBound

func ZSTDCompressBound(srcSize int) int

ZSTDCompressBound returns the worst case size needed for a destination buffer. Klauspost ZSTD library does not provide any API for Compression Bound. This calculation is based on the DataDog ZSTD library. See https://pkg.go.dev/github.com/DataDog/zstd#CompressBound

func ZSTDDecompress

func ZSTDDecompress(dst, src []byte) ([]byte, error)

ZSTDDecompress decompresses a block using ZSTD algorithm.

Types

type Filter

type Filter []byte

Filter is an encoded set of []byte keys.

func NewFilter

func NewFilter(keys []uint32, bitsPerKey int) Filter

NewFilter returns a new Bloom filter that encodes a set of []byte keys with the given number of bits per key, approximately.

A good bitsPerKey value is 10, which yields a filter with ~ 1% false positive rate.

func (Filter) MayContain

func (f Filter) MayContain(h uint32) bool

MayContain returns whether the filter may contain given key. False positives are possible, where it returns true for keys not in the original set.

func (Filter) MayContainKey

func (f Filter) MayContainKey(k []byte) bool

type Flags

type Flags int
const (
	// Sync indicates that O_DSYNC should be set on the underlying file,
	// ensuring that data writes do not return until the data is flushed
	// to disk.
	Sync Flags = 1 << iota
	// ReadOnly opens the underlying file on a read-only basis.
	ReadOnly
)

type Iterator

type Iterator interface {
	Next()
	Rewind()
	Seek(key []byte)
	Key() []byte
	Value() ValueStruct
	Valid() bool

	// All iterators should be closed so that file garbage collection works.
	Close() error
}

Iterator is an interface for a basic iterator.

type PageBuffer

type PageBuffer struct {
	// contains filtered or unexported fields
}

PageBuffer consists of many pages. A page is a wrapper over []byte. PageBuffer can act as a replacement of bytes.Buffer. Instead of having single underlying buffer, it has multiple underlying buffers. Hence it avoids any copy during relocation(as happens in bytes.Buffer). PageBuffer allocates memory in pages. Once a page is full, it will allocate page with double the size of previous page. Its function are not thread safe.

func NewPageBuffer

func NewPageBuffer(pageSize int) *PageBuffer

NewPageBuffer returns a new PageBuffer with first page having size pageSize.

func (*PageBuffer) Bytes

func (b *PageBuffer) Bytes() []byte

Bytes returns whole Buffer data as single []byte.

func (*PageBuffer) Len

func (b *PageBuffer) Len() int

Len returns length of PageBuffer.

func (*PageBuffer) NewReaderAt

func (b *PageBuffer) NewReaderAt(offset int) *PageBufferReader

NewReaderAt returns a reader which starts reading from offset in page buffer.

func (*PageBuffer) Truncate

func (b *PageBuffer) Truncate(n int)

Truncate truncates PageBuffer to length n.

func (*PageBuffer) Write

func (b *PageBuffer) Write(data []byte) (int, error)

Write writes data to PageBuffer b. It returns number of bytes written and any error encountered.

func (*PageBuffer) WriteByte

func (b *PageBuffer) WriteByte(data byte) error

WriteByte writes data byte to PageBuffer and returns any encountered error.

func (*PageBuffer) WriteTo

func (b *PageBuffer) WriteTo(w io.Writer) (int64, error)

WriteTo writes whole buffer to w. It returns number of bytes written and any error encountered.

type PageBufferReader

type PageBufferReader struct {
	// contains filtered or unexported fields
}

PageBufferReader is a reader for PageBuffer.

func (*PageBufferReader) Read

func (r *PageBufferReader) Read(p []byte) (int, error)

Read reads upto len(p) bytes. It returns number of bytes read and any error encountered.

type RateMonitor

type RateMonitor struct {
	// contains filtered or unexported fields
}

func NewRateMonitor

func NewRateMonitor(numSamples int) *RateMonitor

func (*RateMonitor) Capture

func (rm *RateMonitor) Capture(sent uint64)

Capture captures the current number of sent bytes. This number should be monotonically increasing.

func (*RateMonitor) Rate

func (rm *RateMonitor) Rate() uint64

Rate returns the average rate of transmission smoothed out by the number of samples.

type Slice

type Slice struct {
	// contains filtered or unexported fields
}

Slice holds a reusable buf, will reallocate if you request a larger size than ever before. One problem is with n distinct sizes in random order it'll reallocate log(n) times.

func (*Slice) Resize

func (s *Slice) Resize(sz int) []byte

Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of length sz.

type Throttle

type Throttle struct {
	// contains filtered or unexported fields
}

Throttle allows a limited number of workers to run at a time. It also provides a mechanism to check for errors encountered by workers and wait for them to finish.

func NewThrottle

func NewThrottle(max int) *Throttle

NewThrottle creates a new throttle with a max number of workers.

func (*Throttle) Do

func (t *Throttle) Do() error

Do should be called by workers before they start working. It blocks if there are already maximum number of workers working. If it detects an error from previously Done workers, it would return it.

func (*Throttle) Done

func (t *Throttle) Done(err error)

Done should be called by workers when they finish working. They can also pass the error status of work done.

func (*Throttle) Finish

func (t *Throttle) Finish() error

Finish waits until all workers have finished working. It would return any error passed by Done. If Finish is called multiple time, it will wait for workers to finish only once(first time). From next calls, it will return same error as found on first call.

type ValueStruct

type ValueStruct struct {
	Meta      byte
	UserMeta  byte
	ExpiresAt uint64
	Value     []byte

	Version uint64 // This field is not serialized. Only for internal usage.
}

ValueStruct represents the value info that can be associated with a key, but also the internal Meta field.

func (*ValueStruct) Decode

func (v *ValueStruct) Decode(b []byte)

Decode uses the length of the slice to infer the length of the Value field.

func (*ValueStruct) Encode

func (v *ValueStruct) Encode(b []byte) uint32

Encode expects a slice of length at least v.EncodedSize().

func (*ValueStruct) EncodeTo

func (v *ValueStruct) EncodeTo(buf *bytes.Buffer)

EncodeTo should be kept in sync with the Encode function above. The reason this function exists is to avoid creating byte arrays per key-value pair in table/builder.go.

func (*ValueStruct) EncodedSize

func (v *ValueStruct) EncodedSize() uint32

EncodedSize is the size of the ValueStruct when encoded

type WaterMark

type WaterMark struct {
	Name string
	// contains filtered or unexported fields
}

WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes finished or "done" according to a WaterMark once Done(k) has been called

  1. as many times as Begin(k) has, AND
  2. a positive number of times.

An index may also become "done" by calling SetDoneUntil at a time such that it is not inter-mingled with Begin/Done calls.

Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they are 64-bit aligned by putting them at the beginning of the structure.

func (*WaterMark) Begin

func (w *WaterMark) Begin(index uint64)

Begin sets the last index to the given value.

func (*WaterMark) BeginMany

func (w *WaterMark) BeginMany(indices []uint64)

BeginMany works like Begin but accepts multiple indices.

func (*WaterMark) Done

func (w *WaterMark) Done(index uint64)

Done sets a single index as done.

func (*WaterMark) DoneMany

func (w *WaterMark) DoneMany(indices []uint64)

DoneMany works like Done but accepts multiple indices.

func (*WaterMark) DoneUntil

func (w *WaterMark) DoneUntil() uint64

DoneUntil returns the maximum index that has the property that all indices less than or equal to it are done.

func (*WaterMark) Init

func (w *WaterMark) Init(closer *z.Closer)

Init initializes a WaterMark struct. MUST be called before using it.

func (*WaterMark) LastIndex

func (w *WaterMark) LastIndex() uint64

LastIndex returns the last index for which Begin has been called.

func (*WaterMark) SetDoneUntil

func (w *WaterMark) SetDoneUntil(val uint64)

SetDoneUntil sets the maximum index that has the property that all indices less than or equal to it are done.

func (*WaterMark) WaitForMark

func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error

WaitForMark waits until the given index is marked as done.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL