Documentation ¶
Index ¶
- Constants
- func AllPostingsKey() (name, value string)
- func ExpandPostings(p Postings) (res []storage.SeriesRef, err error)
- func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64, int) error) error
- type ByteSlice
- type ChunkMeta
- type ChunkMetas
- type Decoder
- func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error)
- func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error)
- func (dec *Decoder) Postings(b []byte) (int, Postings, error)
- func (dec *Decoder) Series(b []byte, lbls *phlaremodel.Labels, chks *[]ChunkMeta) (uint64, error)
- type FileWriter
- func (fw *FileWriter) AddPadding(size int) error
- func (fw *FileWriter) Close() error
- func (fw *FileWriter) Flush() error
- func (fw *FileWriter) Pos() uint64
- func (fw *FileWriter) Remove() error
- func (fw *FileWriter) Write(bufs ...[]byte) error
- func (fw *FileWriter) WriteAt(buf []byte, pos uint64) error
- type FingerprintOffsets
- type MemPostings
- func (p *MemPostings) Add(id storage.SeriesRef, lset phlaremodel.Labels)
- func (p *MemPostings) All() Postings
- func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{})
- func (p *MemPostings) EnsureOrder()
- func (p *MemPostings) Get(name, value string) Postings
- func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error
- func (p *MemPostings) LabelNames() []string
- func (p *MemPostings) LabelValues(name string) []string
- func (p *MemPostings) SortedKeys() []labels.Label
- func (p *MemPostings) Stats(label string) *PostingsStats
- func (p *MemPostings) Symbols() iter.Iterator[string]
- type Metadata
- type PoolChunkMetas
- type Postings
- type PostingsStats
- type Range
- type Reader
- func (r *Reader) Bounds() (int64, int64)
- func (r *Reader) Checksum() uint32
- func (r *Reader) Close() error
- func (r *Reader) FileInfo() block.File
- func (r *Reader) LabelNames(matchers ...*labels.Matcher) ([]string, error)
- func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error)
- func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, error)
- func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error)
- func (r *Reader) Postings(name string, shard *ShardAnnotation, values ...string) (Postings, error)
- func (r *Reader) PostingsRanges() (map[labels.Label]Range, error)
- func (r *Reader) Series(id storage.SeriesRef, lbls *phlaremodel.Labels, chks *[]ChunkMeta) (uint64, error)
- func (r *Reader) Size() int64
- func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error)
- func (r *Reader) SymbolTableSize() uint64
- func (r *Reader) Symbols() StringIter
- func (r *Reader) Version() int
- type RealByteSlice
- type ShardAnnotation
- type ShardedPostings
- type Stat
- type StringIter
- type Symbols
- type TOC
- type Writer
Constants ¶
const ( // MagicIndex 4 bytes at the head of an index file. MagicIndex = 0xBAAAD700 // HeaderLen represents number of bytes reserved of index for header. HeaderLen = 5 // FormatV1 represents 1 version of index. FormatV1 = 1 // FormatV2 represents 2 version of index. FormatV2 = 2 IndexFilename = "index" )
const ( // ShardLabel is a reserved label referencing a cortex shard ShardLabel = "__tsdb_shard__" // ShardLabelFmt is the fmt of the ShardLabel key. ShardLabelFmt = "%d_of_%d" )
Variables ¶
This section is empty.
Functions ¶
func AllPostingsKey ¶
func AllPostingsKey() (name, value string)
AllPostingsKey returns the label key that is used to store the postings list of all existing IDs.
func ExpandPostings ¶
ExpandPostings returns the postings expanded as a slice.
Types ¶
type ChunkMeta ¶
type ChunkMeta struct { Checksum uint32 MinTime, MaxTime int64 // Bytes stored, rounded to nearest KB KB uint32 SeriesIndex uint32 }
Meta holds information about a chunk of data.
type ChunkMetas ¶
type ChunkMetas []ChunkMeta
func (ChunkMetas) Bounds ¶
func (c ChunkMetas) Bounds() (mint, maxt model.Time)
func (ChunkMetas) Finalize ¶
func (c ChunkMetas) Finalize() ChunkMetas
Finalize sorts and dedupes TODO(owen-d): can we remove the need for this by ensuring we only push in order and without duplicates?
func (ChunkMetas) Len ¶
func (c ChunkMetas) Len() int
func (ChunkMetas) Less ¶
func (c ChunkMetas) Less(i, j int) bool
Sort by (MinTime, MaxTime, Checksum)
func (ChunkMetas) Swap ¶
func (c ChunkMetas) Swap(i, j int)
type Decoder ¶
Decoder provides decoding methods for the v1 and v2 index file format.
It currently does not contain decoding methods for all entry types but can be extended by them if there's demand.
func (*Decoder) LabelNamesOffsetsFor ¶
LabelNamesOffsetsFor decodes the offsets of the name symbols for a given series. They are returned in the same order they're stored, which should be sorted lexicographically.
func (*Decoder) LabelValueFor ¶
LabelValueFor decodes a label for a given series.
type FileWriter ¶
type FileWriter struct {
// contains filtered or unexported fields
}
func NewFileWriter ¶
func NewFileWriter(name string) (*FileWriter, error)
func (*FileWriter) AddPadding ¶
func (fw *FileWriter) AddPadding(size int) error
AddPadding adds zero byte padding until the file size is a multiple size.
func (*FileWriter) Close ¶
func (fw *FileWriter) Close() error
func (*FileWriter) Flush ¶
func (fw *FileWriter) Flush() error
func (*FileWriter) Pos ¶
func (fw *FileWriter) Pos() uint64
func (*FileWriter) Remove ¶
func (fw *FileWriter) Remove() error
func (*FileWriter) Write ¶
func (fw *FileWriter) Write(bufs ...[]byte) error
type FingerprintOffsets ¶
type FingerprintOffsets [][2]uint64
(SeriesRef, Fingerprint) tuples
func (FingerprintOffsets) Range ¶
func (xs FingerprintOffsets) Range(shard ShardAnnotation) (minOffset, maxOffset uint64)
type MemPostings ¶
type MemPostings struct {
// contains filtered or unexported fields
}
MemPostings holds postings list for series ID per label pair. They may be written to out of order. EnsureOrder() must be called once before any reads are done. This allows for quick unordered batch fills on startup.
func NewMemPostings ¶
func NewMemPostings() *MemPostings
NewMemPostings returns a memPostings that's ready for reads and writes.
func NewUnorderedMemPostings ¶
func NewUnorderedMemPostings() *MemPostings
NewUnorderedMemPostings returns a memPostings that is not safe to be read from until EnsureOrder() was called once.
func (*MemPostings) Add ¶
func (p *MemPostings) Add(id storage.SeriesRef, lset phlaremodel.Labels)
Add a label set to the postings index.
func (*MemPostings) All ¶
func (p *MemPostings) All() Postings
All returns a postings list over all documents ever added.
func (*MemPostings) Delete ¶
func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{})
Delete removes all ids in the given map from the postings lists.
func (*MemPostings) EnsureOrder ¶
func (p *MemPostings) EnsureOrder()
EnsureOrder ensures that all postings lists are sorted. After it returns all further calls to add and addFor will insert new IDs in a sorted manner.
func (*MemPostings) Get ¶
func (p *MemPostings) Get(name, value string) Postings
Get returns a postings list for the given label pair.
func (*MemPostings) Iter ¶
Iter calls f for each postings list. It aborts if f returns an error and returns it.
func (*MemPostings) LabelNames ¶
func (p *MemPostings) LabelNames() []string
LabelNames returns all the unique label names.
func (*MemPostings) LabelValues ¶
func (p *MemPostings) LabelValues(name string) []string
LabelValues returns label values for the given name.
func (*MemPostings) SortedKeys ¶
func (p *MemPostings) SortedKeys() []labels.Label
SortedKeys returns a list of sorted label keys of the ostings.
func (*MemPostings) Stats ¶
func (p *MemPostings) Stats(label string) *PostingsStats
Stats calculates the cardinality statistics from postings.
type PoolChunkMetas ¶
type PoolChunkMetas struct {
// contains filtered or unexported fields
}
var ChunkMetasPool PoolChunkMetas
func (*PoolChunkMetas) Get ¶
func (p *PoolChunkMetas) Get() []ChunkMeta
func (*PoolChunkMetas) Put ¶
func (p *PoolChunkMetas) Put(xs []ChunkMeta)
type Postings ¶
Postings provides iterative access over a postings list.
func EmptyPostings ¶
func EmptyPostings() Postings
EmptyPostings returns a postings list that's always empty. NOTE: Returning EmptyPostings sentinel when Postings struct has no postings is recommended. It triggers optimized flow in other functions like Intersect, Without etc.
func ErrPostings ¶
ErrPostings returns new postings that immediately error.
type PostingsStats ¶
type PostingsStats struct { CardinalityMetricsStats []Stat CardinalityLabelStats []Stat LabelValueStats []Stat LabelValuePairsStats []Stat NumLabelPairs int }
PostingsStats contains cardinality based statistics for postings.
type Reader ¶
type Reader struct {
// contains filtered or unexported fields
}
func NewFileReader ¶
NewFileReader returns a new index reader against the given index file.
func NewReader ¶
NewReader returns a new index reader on the given byte slice. It automatically handles different format versions.
func (*Reader) LabelNames ¶
LabelNames returns all the unique label names present in the index. TODO(twilkie) implement support for matchers
func (*Reader) LabelNamesFor ¶
LabelNamesFor returns all the label names for the series referred to by IDs. The names returned are sorted.
func (*Reader) LabelValueFor ¶
LabelValueFor returns label value for the given label name in the series referred to by ID.
func (*Reader) LabelValues ¶
LabelValues returns value tuples that exist for the given label name. It is not safe to use the return value beyond the lifetime of the byte slice passed into the Reader. TODO(replay): Support filtering by matchers
func (*Reader) PostingsRanges ¶
PostingsRanges returns a new map of byte range in the underlying index file for all postings lists.
func (*Reader) Series ¶
func (r *Reader) Series(id storage.SeriesRef, lbls *phlaremodel.Labels, chks *[]ChunkMeta) (uint64, error)
Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
func (*Reader) SortedLabelValues ¶
SortedLabelValues returns value tuples that exist for the given label name. It is not safe to use the return value beyond the lifetime of the byte slice passed into the Reader.
func (*Reader) SymbolTableSize ¶
SymbolTableSize returns the symbol table size in bytes.
func (*Reader) Symbols ¶
func (r *Reader) Symbols() StringIter
Symbols returns an iterator over the symbols that exist within the index.
type RealByteSlice ¶
type RealByteSlice []byte
func (RealByteSlice) Len ¶
func (b RealByteSlice) Len() int
func (RealByteSlice) Range ¶
func (b RealByteSlice) Range(start, end int) []byte
func (RealByteSlice) Sub ¶
func (b RealByteSlice) Sub(start, end int) ByteSlice
type ShardAnnotation ¶
ShardAnnotation is a convenience struct which holds data from a parsed shard label Of MUST be a power of 2 to ensure sharding logic works correctly.
func NewShard ¶
func NewShard(x, of uint32) ShardAnnotation
func (ShardAnnotation) Bounds ¶
func (shard ShardAnnotation) Bounds() (model.Fingerprint, model.Fingerprint)
Bounds shows the [minimum, maximum) fingerprints. If there is no maximum fingerprint (for example )
func (ShardAnnotation) Match ¶
func (shard ShardAnnotation) Match(fp model.Fingerprint) bool
Match returns whether a fingerprint belongs to a certain shard. The Shard must be a power of 2. Inclusion in a shard is calculated by determining the arbitrary bit prefix for a shard, then ensuring the fingerprint has the same prefix
func (ShardAnnotation) RequiredBits ¶
func (shard ShardAnnotation) RequiredBits() uint64
func (ShardAnnotation) String ¶
func (shard ShardAnnotation) String() string
String encodes a shardAnnotation into a label value
type ShardedPostings ¶
type ShardedPostings struct {
// contains filtered or unexported fields
}
func NewShardedPostings ¶
func NewShardedPostings(p Postings, shard ShardAnnotation, offsets FingerprintOffsets) *ShardedPostings
Note: shardedPostings can technically return more series than just those that correspond to the requested shard. This is because we do fingperint/offset sampling in TSDB so we won't know exactly which offsets to start/end at, but will likely buffer a little on each end, so they still need to be checked for shard inclusiveness. For example (below), given a shard, we'll likely return a slight superset of offsets surrounding the shard. ---[shard0]--- # Shard membership -[--shard0--]- # Series returned by shardedPostings
func (*ShardedPostings) At ¶
func (sp *ShardedPostings) At() storage.SeriesRef
At returns the value at the current iterator position.
func (*ShardedPostings) Close ¶
func (sp *ShardedPostings) Close() error
func (*ShardedPostings) Err ¶
func (sp *ShardedPostings) Err() (err error)
Err returns the last error of the iterator.
func (*ShardedPostings) Next ¶
func (sp *ShardedPostings) Next() bool
Next advances the iterator and returns true if another value was found.
type StringIter ¶
type StringIter interface { // Next advances the iterator and returns true if another value was found. Next() bool // At returns the value at the current iterator position. At() string // Err returns the last error of the iterator. Err() error }
StringIter iterates over a sorted list of strings.
type Symbols ¶
type Symbols struct {
// contains filtered or unexported fields
}
func NewSymbols ¶
NewSymbols returns a Symbols object for symbol lookups.
func (Symbols) Iter ¶
func (s Symbols) Iter() StringIter
type TOC ¶
type TOC struct { Symbols uint64 Series uint64 LabelIndices uint64 LabelIndicesTable uint64 Postings uint64 PostingsTable uint64 FingerprintOffsets uint64 Metadata Metadata }
TOC represents index Table Of Content that states where each section of index starts.
func NewTOCFromByteSlice ¶
NewTOCFromByteSlice return parsed TOC from given index byte slice.
type Writer ¶
type Writer struct { Version int // contains filtered or unexported fields }
Writer implements the IndexWriter interface for the standard serialization format.
func NewWriter ¶
NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
func (*Writer) AddSeries ¶
func (w *Writer) AddSeries(ref storage.SeriesRef, lset phlaremodel.Labels, fp model.Fingerprint, chunks ...ChunkMeta) error
AddSeries adds the series one at a time along with its chunks. Requires a specific fingerprint to be passed in the case where the "desired" fingerprint differs from what labels.Hash() produces. For example, multitenant TSDBs embed a tenant label, but the actual series has no such label and so the derived fingerprint differs.