y

package
v1.5.1-0...-38533b6 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 15, 2024 License: Apache-2.0 Imports: 14 Imported by: 73

Documentation

Index

Constants

View Source
const (
	// Sync indicates that O_DSYNC should be set on the underlying file,
	// ensuring that data writes do not return until the data is flushed
	// to disk.
	Sync = 1 << iota
	// ReadOnly opens the underlying file on a read-only basis.
	ReadOnly
)

Variables

View Source
var (
	// LSMSize has size of the LSM in bytes
	LSMSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: namespace,
		Name:      "lsm_size",
	}, []string{labelPath})
	// VlogSize has size of the value log in bytes
	VlogSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Namespace: namespace,
		Name:      "vlog_size",
	}, []string{labelPath})

	// NumReads has cumulative number of reads
	NumReads = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_reads",
	}, []string{labelPath})
	// NumWrites has cumulative number of writes
	NumWrites = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_writes",
	}, []string{labelPath})
	// NumBytesRead has cumulative number of bytes read
	NumBytesRead = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_bytes_read",
	}, []string{labelPath})
	// NumVLogBytesWritten has cumulative number of bytes written
	NumVLogBytesWritten = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_bytes_written",
	}, []string{labelPath})
	// NumGets is number of gets
	NumGets = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_gets",
	}, []string{labelPath})
	// NumPuts is number of puts
	NumPuts = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_puts",
	}, []string{labelPath})
	// NumMemtableGets is number of memtable gets
	NumMemtableGets = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_memtable_gets",
	}, []string{labelPath})

	// NumCompactionBytesWrite has cumulative size of keys read during compaction.
	NumCompactionBytesWrite = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_compaction_bytes_write",
	}, []string{labelPath, labelLevel})
	// NumCompactionBytesRead has cumulative size of keys write during compaction.
	NumCompactionBytesRead = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_compaction_bytes_read",
	}, []string{labelPath, labelLevel})
	// NumCompactionBytesRead has cumulative size of discarded keys after compaction.
	NumCompactionBytesDiscard = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_compaction_bytes_discard",
	}, []string{labelPath, labelLevel})
	// NumCompactionKeysWrite has cumulative count of keys write during compaction.
	NumCompactionKeysWrite = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_compaction_keys_write",
	}, []string{labelPath, labelLevel})
	// NumCompactionKeysRead has cumulative count of keys read during compaction.
	NumCompactionKeysRead = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_compaction_keys_read",
	}, []string{labelPath, labelLevel})
	// NumCompactionKeysDiscard has cumulative count of discarded keys after compaction.
	NumCompactionKeysDiscard = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_compaction_keys_discard",
	}, []string{labelPath, labelLevel})
	// NumLSMGets is number of LMS gets
	NumLSMGets = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_lsm_gets",
	}, []string{labelPath, labelLevel})
	// NumLSMBloomFalsePositive is number of LMS bloom hits
	NumLSMBloomFalsePositive = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: namespace,
		Name:      "num_lsm_bloom_false_positive",
	}, []string{labelPath, labelLevel})

	VlogSyncDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: namespace,
		Name:      "vlog_sync_duration",
		Buckets:   prometheus.ExponentialBuckets(0.001, 1.5, 20),
	}, []string{labelPath})

	WriteLSMDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: namespace,
		Name:      "write_lsm_duration",
		Buckets:   prometheus.ExponentialBuckets(0.0003, 1.5, 20),
	}, []string{labelPath})

	LSMGetDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: namespace,
		Name:      "lsm_get_duration",
		Buckets:   prometheus.ExponentialBuckets(0.0003, 1.5, 20),
	}, []string{labelPath})

	LSMMultiGetDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Namespace: namespace,
		Name:      "lsm_multi_get_duration",
		Buckets:   prometheus.ExponentialBuckets(0.0003, 1.5, 20),
	}, []string{labelPath})
)
View Source
var (

	// CastagnoliCrcTable is a CRC32 polynomial table
	CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli)
)
View Source
var ErrEOF = errors.New("End of mapped region")

ErrEOF indicates an end of file when trying to read from a memory mapped file and encountering the end of slice.

Functions

func Assert

func Assert(b bool)

AssertTrue asserts that b is true. Otherwise, it would log fatal.

func AssertTruef

func AssertTruef(b bool, format string, args ...interface{})

AssertTruef is AssertTrue with extra info.

func Check

func Check(err error)

Check logs fatal if err != nil.

func Check2

func Check2(_ interface{}, err error)

Check2 acts as convenience wrapper around Check, using the 2nd argument as error.

func Copy added in v0.9.0

func Copy(a []byte) []byte

Copy copies a byte slice and returns the copied slice.

func CreateSyncedFile

func CreateSyncedFile(filename string, sync bool) (*os.File, error)

CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed.

func Madvise

func Madvise(b []byte, readahead bool) error

Madvise uses the madvise system call to give advise about the use of memory when using a slice that is memory-mapped to a file. Set the readahead flag to false if page references are expected in random order.

func Mmap

func Mmap(fd *os.File, writable bool, size int64) ([]byte, error)

Mmap uses the mmap system call to memory-map a file. If writable is true, memory protection of the pages is set so that they may be written to as well.

func Munmap

func Munmap(b []byte) error

Munmap unmaps a previously mapped slice.

func NextAllVersion

func NextAllVersion(it Iterator)

func OpenExistingFile added in v1.4.0

func OpenExistingFile(filename string, flags uint32) (*os.File, error)

OpenExistingFile opens an existing file, errors if it doesn't exist.

func OpenSyncedFile

func OpenSyncedFile(filename string, sync bool) (*os.File, error)

OpenSyncedFile creates the file if one doesn't exist.

func OpenTruncFile

func OpenTruncFile(filename string, sync bool) (*os.File, error)

OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC

func SafeCopy added in v1.1.0

func SafeCopy(a []byte, src []byte) []byte

SafeCopy does append(a[:0], src...).

func SeekToVersion

func SeekToVersion(it Iterator, version uint64) bool

SeekToVersion seeks a valid Iterator to the version that <= the given version.

func Wrap

func Wrap(err error) error

Wrap wraps errors from external lib.

func Wrapf

func Wrapf(err error, format string, args ...interface{}) error

Wrapf is Wrap with extra info.

Types

type Closer

type Closer struct {
	// contains filtered or unexported fields
}

Closer holds the two things we need to close a goroutine and wait for it to finish: a chan to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting down.

func NewCloser

func NewCloser(initial int) *Closer

NewCloser constructs a new Closer, with an initial count on the WaitGroup.

func (*Closer) AddRunning

func (lc *Closer) AddRunning(delta int)

AddRunning Add()'s delta to the WaitGroup.

func (*Closer) Done

func (lc *Closer) Done()

Done calls Done() on the WaitGroup.

func (*Closer) HasBeenClosed

func (lc *Closer) HasBeenClosed() <-chan struct{}

HasBeenClosed gets signaled when Signal() is called.

func (*Closer) Signal

func (lc *Closer) Signal()

Signal signals the HasBeenClosed signal.

func (*Closer) SignalAndWait

func (lc *Closer) SignalAndWait()

SignalAndWait calls Signal(), then Wait().

func (*Closer) Wait

func (lc *Closer) Wait()

Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done calls to balance out.)

type CompactionStats

type CompactionStats struct {
	KeysRead     int
	BytesRead    int
	KeysWrite    int
	BytesWrite   int
	KeysDiscard  int
	BytesDiscard int
}

type Iterator

type Iterator interface {
	// Next returns the next entry with different key on the latest version.
	// If old version is needed, call NextVersion.
	Next()
	// NextVersion set the current entry to an older version.
	// The iterator must be valid to call this method.
	// It returns true if there is an older version, returns false if there is no older version.
	// The iterator is still valid and on the same key.
	NextVersion() bool
	Rewind()
	Seek(key []byte)
	Key() Key
	Value() ValueStruct
	FillValue(vs *ValueStruct)
	Valid() bool
	Close() error
}

Iterator is an interface for a basic iterator.

type Key

type Key struct {
	UserKey []byte
	Version uint64
}

Key is the struct for user key with version.

func KeyWithTs added in v0.9.0

func KeyWithTs(key []byte, ts uint64) Key

KeyWithTs generates a new key by appending ts to key.

func ParseKey added in v0.9.0

func ParseKey(keyBytes []byte) Key

ParseKey parses the actual key from the key bytes.

func (Key) AppendTo

func (k Key) AppendTo(buf []byte) []byte

func (Key) Compare

func (k Key) Compare(k2 Key) int

func (*Key) Copy

func (k *Key) Copy(k2 Key)

func (Key) Equal

func (k Key) Equal(k2 Key) bool

func (Key) IsEmpty

func (k Key) IsEmpty() bool

func (Key) Len

func (k Key) Len() int

func (*Key) Reset

func (k *Key) Reset()

func (Key) SameUserKey

func (k Key) SameUserKey(k2 Key) bool

func (Key) String

func (k Key) String() string

func (Key) WriteTo

func (k Key) WriteTo(w io.Writer) error

type LevelMetricsSet

type LevelMetricsSet struct {
	*MetricsSet
	NumCompactionKeysRead     prometheus.Counter
	NumCompactionBytesRead    prometheus.Counter
	NumCompactionKeysWrite    prometheus.Counter
	NumCompactionBytesWrite   prometheus.Counter
	NumCompactionKeysDiscard  prometheus.Counter
	NumCompactionBytesDiscard prometheus.Counter
	NumLSMGets                prometheus.Counter
	NumLSMBloomFalsePositive  prometheus.Counter
}

func (*LevelMetricsSet) UpdateCompactionStats

func (m *LevelMetricsSet) UpdateCompactionStats(stats *CompactionStats)

type MetricsSet

type MetricsSet struct {
	LSMSize             prometheus.Gauge
	VlogSize            prometheus.Gauge
	NumReads            prometheus.Counter
	NumWrites           prometheus.Counter
	NumBytesRead        prometheus.Counter
	NumVLogBytesWritten prometheus.Counter
	NumGets             prometheus.Counter
	NumPuts             prometheus.Counter
	NumMemtableGets     prometheus.Counter
	VlogSyncDuration    prometheus.Observer
	WriteLSMDuration    prometheus.Observer
	LSMGetDuration      prometheus.Observer
	LSMMultiGetDuration prometheus.Observer
	// contains filtered or unexported fields
}

func NewMetricSet

func NewMetricSet(path string) *MetricsSet

func (*MetricsSet) NewLevelMetricsSet

func (m *MetricsSet) NewLevelMetricsSet(levelLabel string) *LevelMetricsSet

type Slice

type Slice struct {
	// contains filtered or unexported fields
}

Slice holds a reusable buf, will reallocate if you request a larger size than ever before. One problem is with n distinct sizes in random order it'll reallocate log(n) times.

func (*Slice) Resize

func (s *Slice) Resize(sz int) []byte

Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of length sz.

type ValueStruct

type ValueStruct struct {
	Meta     byte
	UserMeta []byte
	Value    []byte

	Version uint64 // This field is not serialized. Only for internal usage.
}

ValueStruct represents the value info that can be associated with a key, but also the internal Meta field.

func (*ValueStruct) Decode added in v0.9.0

func (v *ValueStruct) Decode(b []byte)

Decode uses the length of the slice to infer the length of the Value field.

func (*ValueStruct) Encode

func (v *ValueStruct) Encode(b []byte)

Encode expects a slice of length at least v.EncodedSize().

func (*ValueStruct) EncodeTo added in v1.0.0

func (v *ValueStruct) EncodeTo(buf []byte) []byte

EncodeTo should be kept in sync with the Encode function above. The reason this function exists is to avoid creating byte arrays per key-value pair in table/builder.go.

func (*ValueStruct) EncodedSize

func (v *ValueStruct) EncodedSize() uint32

EncodedSize is the size of the ValueStruct when encoded

func (*ValueStruct) Valid

func (v *ValueStruct) Valid() bool

Valid checks if the ValueStruct is valid.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL