Documentation ¶
Index ¶
- Constants
- Variables
- func CompareIterators[A, B any](t *testing.T, f func(t *testing.T, a A, b B), a iter.Iterator[A], ...)
- func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual iter.Iterator[T])
- func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, ...) (*Block, []SeriesWithBlooms, [][][]byte)
- func PointerSlice[T any](xs []T) []*T
- func Tar(dst io.Writer, reader BlockReader) error
- func TarCompress(enc compression.Codec, dst io.Writer, reader BlockReader) error
- func UnTar(dst string, r io.Reader) error
- func UnTarCompress(enc compression.Codec, dst string, r io.Reader) error
- type AndLabelMatcher
- type Block
- type BlockBuilder
- type BlockIndex
- type BlockMetadata
- type BlockOptions
- type BlockQuerier
- func (bq *BlockQuerier) Err() error
- func (bq *BlockQuerier) Fuse(inputs []iter.PeekIterator[Request], logger log.Logger) *FusedQuerier
- func (bq *BlockQuerier) Iter() *BlockQuerierIter
- func (bq *BlockQuerier) Metadata() (BlockMetadata, error)
- func (bq *BlockQuerier) Reset() error
- func (bq *BlockQuerier) Schema() (Schema, error)
- type BlockQuerierIter
- type BlockReader
- type BlockWriter
- type Bloom
- type BloomBlock
- type BloomBlockBuilder
- type BloomCreation
- type BloomOffset
- type BloomPageDecoder
- func LazyDecodeBloomPage(r io.Reader, alloc mempool.Allocator, pool compression.ReaderPool, ...) (*BloomPageDecoder, error)
- func LazyDecodeBloomPageNoCompression(r io.Reader, alloc mempool.Allocator, page BloomPageHeader) (*BloomPageDecoder, error)
- func NewBloomPageDecoder(data []byte) *BloomPageDecoder
- type BloomPageHeader
- type BloomPopulatorFunc
- type BloomQuerier
- type BloomRecorder
- type BloomTest
- type BloomTests
- type BloomTokenizer
- type BoundedIter
- type BoundsCheck
- type ByteReader
- type ChecksumPool
- type ChunkRef
- type ChunkRefWithIter
- type ChunkRefs
- func (refs ChunkRefs) Compare(others ChunkRefs, populateInclusive bool) (exclusive ChunkRefs, inclusive ChunkRefs)
- func (refs ChunkRefs) Intersect(others ChunkRefs) ChunkRefs
- func (refs ChunkRefs) Len() int
- func (refs ChunkRefs) Less(i, j int) bool
- func (refs ChunkRefs) Swap(i, j int)
- func (refs ChunkRefs) Union(others ChunkRefs) ChunkRefs
- func (refs ChunkRefs) Unless(others []ChunkRef) ChunkRefs
- type DirectoryBlockReader
- type DirectoryBlockWriter
- func (b *DirectoryBlockWriter) Blooms() (io.WriteCloser, error)
- func (b *DirectoryBlockWriter) Cleanup() error
- func (b *DirectoryBlockWriter) Full(maxSize uint64) (full bool, size int, err error)
- func (b *DirectoryBlockWriter) Index() (io.WriteCloser, error)
- func (b *DirectoryBlockWriter) Init() error
- func (b *DirectoryBlockWriter) Size() (int, error)
- type Field
- type FingerprintBounds
- func (b FingerprintBounds) Bounds() (model.Fingerprint, model.Fingerprint)
- func (b FingerprintBounds) Cmp(fp model.Fingerprint) BoundsCheck
- func (b FingerprintBounds) Equal(target FingerprintBounds) bool
- func (b FingerprintBounds) GetFromThrough() (model.Fingerprint, model.Fingerprint)
- func (b FingerprintBounds) Hash(h hash.Hash32) error
- func (b FingerprintBounds) Intersection(target FingerprintBounds) *FingerprintBounds
- func (b FingerprintBounds) Less(other FingerprintBounds) bool
- func (b FingerprintBounds) Match(fp model.Fingerprint) bool
- func (b FingerprintBounds) Overlaps(target FingerprintBounds) bool
- func (b FingerprintBounds) Range() uint64
- func (b FingerprintBounds) Slice(min, max model.Fingerprint) *FingerprintBounds
- func (b FingerprintBounds) String() string
- func (b FingerprintBounds) Union(target FingerprintBounds) (res []FingerprintBounds)
- func (b FingerprintBounds) Unless(target FingerprintBounds) (res []FingerprintBounds)
- func (b FingerprintBounds) Within(target FingerprintBounds) bool
- type FusedQuerier
- type HeapIterator
- type IndexBuilder
- type LabelMatcher
- type LazyBloomIter
- type LazySeriesIter
- type MemoryBlockWriter
- type MergeBuilder
- type Meta
- type Metrics
- type NoopCloser
- type OrLabelMatcher
- type Output
- type PageWriter
- type PlainLabelMatcher
- type Request
- type Schema
- func (s Schema) Compatible(other Schema) bool
- func (s *Schema) CompressorPool() compression.WriterPool
- func (s *Schema) Decode(dec *encoding.Decbuf) error
- func (s *Schema) DecodeFrom(r io.ReadSeeker) error
- func (s *Schema) DecompressorPool() compression.ReaderPool
- func (s *Schema) Encode(enc *encoding.Encbuf)
- func (s Schema) Len() int
- func (s Schema) String() string
- func (s Schema) Version() Version
- type Series
- type SeriesHeader
- type SeriesPageDecoder
- type SeriesPageHeaderWithOffset
- type SeriesWithBlooms
- type SeriesWithMeta
- type Set
- type StructuredMetadataTokenizer
- type TarEntry
- type UnencodedBlockOptions
- type UnsupportedLabelMatcher
- type V3Builder
- type Version
Constants ¶
const ( BloomFileName = "bloom" SeriesFileName = "series" )
const (
ExtTar = ".tar"
)
Variables ¶
var ( SupportedVersions = []Version{V3} ErrUnsupportedSchemaVersion = errors.New("unsupported schema version") )
var ( // Pool of crc32 hash Crc32HashPool = ChecksumPool{ Pool: sync.Pool{ New: func() interface{} { return crc32.New(castagnoliTable) }, }, } // buffer pool for series pages // 1KB 2KB 4KB 8KB 16KB 32KB 64KB 128KB SeriesPagePool = mempool.NewBytePoolAllocator(1<<10, 128<<10, 2) )
var DefaultMaxPageSize = 64 << 20 // 64MB
NB(chaudum): Some block pages are way bigger than others (400MiB and bigger), and loading multiple pages into memory in parallel can cause the gateways to OOM. Figure out a decent default maximum page size that we can process.
var ( // FullBounds is the bounds that covers the entire fingerprint space FullBounds = NewBounds(0, model.Fingerprint(math.MaxUint64)) )
var MatchAll = matchAllTest{}
Functions ¶
func CompareIterators ¶ added in v3.1.0
func CompareIterators[A, B any]( t *testing.T, f func(t *testing.T, a A, b B), a iter.Iterator[A], b iter.Iterator[B], )
CompareIterators is a testing utility for comparing iterators of different types. It accepts a callback which can be used to assert characteristics of the corersponding elements of the two iterators. It also ensures that the lengths are the same and there are no errors from either iterator.
func EqualIterators ¶
func MakeBlock ¶
func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (*Block, []SeriesWithBlooms, [][][]byte)
func PointerSlice ¶
func PointerSlice[T any](xs []T) []*T
func TarCompress ¶ added in v3.3.0
func TarCompress(enc compression.Codec, dst io.Writer, reader BlockReader) error
func UnTarCompress ¶ added in v3.3.0
Types ¶
type AndLabelMatcher ¶ added in v3.3.0
type AndLabelMatcher struct{ Left, Right LabelMatcher }
AndLabelMatcher represents a logical AND test. Bloom tests must only pass if both of the Left and Right label matcher bloom tests pass.
type Block ¶
type Block struct {
// contains filtered or unexported fields
}
func NewBlock ¶
func NewBlock(reader BlockReader, metrics *Metrics) *Block
func (*Block) LoadHeaders ¶
func (*Block) Metadata ¶
func (b *Block) Metadata() (BlockMetadata, error)
func (*Block) Reader ¶
func (b *Block) Reader() BlockReader
type BlockBuilder ¶
type BlockBuilder = V3Builder
Convenience alias for the most current version.
func NewBlockBuilder ¶
func NewBlockBuilder(opts BlockOptions, writer BlockWriter) (*BlockBuilder, error)
Convenience constructor targeting the most current version.
type BlockIndex ¶
type BlockIndex struct {
// contains filtered or unexported fields
}
Block index is a set of series pages along with the headers for each page
func (*BlockIndex) DecodeHeaders ¶
func (b *BlockIndex) DecodeHeaders(r io.ReadSeeker) (uint32, error)
func (*BlockIndex) NewSeriesPageDecoder ¶
func (b *BlockIndex) NewSeriesPageDecoder(r io.ReadSeeker, header SeriesPageHeaderWithOffset, metrics *Metrics) (res *SeriesPageDecoder, err error)
decompress page and return an iterator over the bytes
type BlockMetadata ¶
type BlockMetadata struct { Options BlockOptions Series SeriesHeader Checksum uint32 }
type BlockOptions ¶
type BlockOptions struct { // Schema determines the Schema of the block and cannot be changed // without recreating the block from underlying data Schema Schema // target size in bytes (decompressed) // of each page type SeriesPageSize, BloomPageSize, BlockSize uint64 // UnencodedBlockOptions are not encoded into the block's binary format, // but are a helpful way to pass additional options to the block builder. // Thus, they're used during construction but not on reads. UnencodedBlockOptions UnencodedBlockOptions }
func NewBlockOptions ¶
func NewBlockOptions(enc compression.Codec, maxBlockSizeBytes, maxBloomSizeBytes uint64) BlockOptions
func NewBlockOptionsFromSchema ¶
func NewBlockOptionsFromSchema(s Schema, maxBloomSizeBytes uint64) BlockOptions
func (*BlockOptions) DecodeFrom ¶
func (b *BlockOptions) DecodeFrom(r io.ReadSeeker) error
func (BlockOptions) Encode ¶
func (b BlockOptions) Encode(enc *encoding.Encbuf)
func (BlockOptions) Len ¶
func (b BlockOptions) Len() int
type BlockQuerier ¶
type BlockQuerier struct { *LazySeriesIter // contains filtered or unexported fields }
func NewBlockQuerier ¶
func NewBlockQuerier(b *Block, alloc mempool.Allocator, maxPageSize int) *BlockQuerier
NewBlockQuerier returns a new BlockQuerier for the given block. WARNING: You can pass an implementation of Allocator that is responsible for whether the underlying byte slice of the bloom page will be returned to the pool for efficiency or not. Returning to the pool can only safely be used when the underlying bloom bytes don't escape the decoder, i.e. when loading blooms for querying (bloom-gateway), but not for writing (bloom-builder). Therefore, when calling NewBlockQuerier on the write path, you should always pass the SimpleHeapAllocator implementation of the Allocator interface.
func (*BlockQuerier) Err ¶
func (bq *BlockQuerier) Err() error
func (*BlockQuerier) Fuse ¶
func (bq *BlockQuerier) Fuse(inputs []iter.PeekIterator[Request], logger log.Logger) *FusedQuerier
Fuse combines multiple requests into a single loop iteration over the data set and returns the corresponding outputs TODO(owen-d): better async control
func (*BlockQuerier) Iter ¶ added in v3.1.0
func (bq *BlockQuerier) Iter() *BlockQuerierIter
Iter returns a new BlockQuerierIter, which changes the iteration type to SeriesWithBlooms, automatically loading the blooms for each series rather than requiring the caller to turn the offset to a `Bloom` via `LoadOffset`
func (*BlockQuerier) Metadata ¶
func (bq *BlockQuerier) Metadata() (BlockMetadata, error)
func (*BlockQuerier) Reset ¶
func (bq *BlockQuerier) Reset() error
func (*BlockQuerier) Schema ¶
func (bq *BlockQuerier) Schema() (Schema, error)
type BlockQuerierIter ¶ added in v3.1.0
type BlockQuerierIter struct {
*BlockQuerier
}
func (*BlockQuerierIter) At ¶ added in v3.1.0
func (b *BlockQuerierIter) At() *SeriesWithBlooms
func (*BlockQuerierIter) Next ¶ added in v3.1.0
func (b *BlockQuerierIter) Next() bool
type BlockReader ¶
type BlockWriter ¶
type Bloom ¶
type Bloom struct {
filter.ScalableBloomFilter
}
type BloomBlock ¶
type BloomBlock struct {
// contains filtered or unexported fields
}
func (*BloomBlock) BloomPageDecoder ¶
func (b *BloomBlock) BloomPageDecoder(r io.ReadSeeker, alloc mempool.Allocator, pageIdx int, maxPageSize int, metrics *Metrics) (res *BloomPageDecoder, skip bool, err error)
BloomPageDecoder returns a decoder for the given page index. It may skip the page if it's too large. NB(owen-d): if `skip` is true, err _must_ be nil.
func (*BloomBlock) DecodeHeaders ¶
func (b *BloomBlock) DecodeHeaders(r io.ReadSeeker) (uint32, error)
type BloomBlockBuilder ¶
type BloomBlockBuilder struct {
// contains filtered or unexported fields
}
func NewBloomBlockBuilder ¶
func NewBloomBlockBuilder(opts BlockOptions, writer io.WriteCloser) *BloomBlockBuilder
func (*BloomBlockBuilder) Append ¶
func (b *BloomBlockBuilder) Append(bloom *Bloom) (BloomOffset, error)
func (*BloomBlockBuilder) Close ¶
func (b *BloomBlockBuilder) Close() (uint32, error)
func (*BloomBlockBuilder) UnflushedSize ¶ added in v3.3.0
func (b *BloomBlockBuilder) UnflushedSize() int
type BloomCreation ¶ added in v3.1.0
type BloomOffset ¶
type BloomOffset struct { Page int // page number in bloom block ByteOffset int // offset to beginning of bloom within page }
func (*BloomOffset) Decode ¶
func (o *BloomOffset) Decode(dec *encoding.Decbuf, _ Version, previousOffset BloomOffset) error
func (*BloomOffset) Encode ¶
func (o *BloomOffset) Encode(enc *encoding.Encbuf, _ Version, previousOffset BloomOffset)
type BloomPageDecoder ¶
type BloomPageDecoder struct {
// contains filtered or unexported fields
}
Decoder is a seekable, reset-able iterator TODO(owen-d): use buffer pools. The reason we don't currently do this is because the `data` slice currently escapes the decoder via the returned bloom, so we can't know when it's safe to return it to the pool. This happens via `data ([]byte) -> dec (*encoding.Decbuf) -> bloom (Bloom)` where the final Bloom has a reference to the data slice. We could optimize this by encoding the mode (read, write) into our structs and doing copy-on-write shenannigans, but I'm avoiding this for now.
func LazyDecodeBloomPage ¶
func LazyDecodeBloomPage(r io.Reader, alloc mempool.Allocator, pool compression.ReaderPool, page BloomPageHeader) (*BloomPageDecoder, error)
func LazyDecodeBloomPageNoCompression ¶
func LazyDecodeBloomPageNoCompression(r io.Reader, alloc mempool.Allocator, page BloomPageHeader) (*BloomPageDecoder, error)
shortcut to skip allocations when we know the page is not compressed
func NewBloomPageDecoder ¶
func NewBloomPageDecoder(data []byte) *BloomPageDecoder
func (*BloomPageDecoder) At ¶
func (d *BloomPageDecoder) At() *Bloom
func (*BloomPageDecoder) Err ¶
func (d *BloomPageDecoder) Err() error
func (*BloomPageDecoder) Next ¶
func (d *BloomPageDecoder) Next() bool
func (*BloomPageDecoder) Relinquish ¶
func (d *BloomPageDecoder) Relinquish(alloc mempool.Allocator)
Relinquish returns the underlying byte slice to the pool for efficiency. It's intended to be used as a perf optimization. This can only safely be used when the underlying bloom bytes don't escape the decoder: on reads in the bloom-gw but not in the bloom-builder
func (*BloomPageDecoder) Reset ¶
func (d *BloomPageDecoder) Reset()
func (*BloomPageDecoder) Seek ¶
func (d *BloomPageDecoder) Seek(offset int)
type BloomPageHeader ¶
type BloomPageHeader struct {
N, Offset, Len, DecompressedLen int
}
func (*BloomPageHeader) Encode ¶
func (h *BloomPageHeader) Encode(enc *encoding.Encbuf)
type BloomPopulatorFunc ¶ added in v3.1.0
type BloomPopulatorFunc func(series *Series, preExistingBlooms iter.SizedIterator[*Bloom], chunksToAdd ChunkRefs, ch chan *BloomCreation)
type BloomQuerier ¶
type BloomQuerier interface {
Seek(BloomOffset) (*Bloom, error)
}
type BloomRecorder ¶ added in v3.1.0
type BloomRecorder struct {
// contains filtered or unexported fields
}
func NewBloomRecorder ¶ added in v3.1.0
func NewBloomRecorder(ctx context.Context, id string) *BloomRecorder
BloomRecorder records the results of a bloom search
func (*BloomRecorder) Merge ¶ added in v3.1.0
func (r *BloomRecorder) Merge(other *BloomRecorder)
type BloomTest ¶
type BloomTest interface { Matches(series labels.Labels, bloom filter.Checker) bool MatchesWithPrefixBuf(series labels.Labels, bloom filter.Checker, buf []byte, prefixLen int) bool }
func LabelMatchersToBloomTest ¶ added in v3.3.0
func LabelMatchersToBloomTest(matchers ...LabelMatcher) BloomTest
type BloomTests ¶
type BloomTests []BloomTest
func (BloomTests) MatchesWithPrefixBuf ¶
type BloomTokenizer ¶
type BloomTokenizer struct {
// contains filtered or unexported fields
}
BloomTokenizer is a utility that converts either Loki chunks or individual lines into tokens. These tokens are n-grams, representing adjacent letters, that are used to populate a bloom filter. https://en.wikipedia.org/wiki/Bloom_filter Bloom filters are utilized for faster lookups of log lines.
func NewBloomTokenizer ¶
func NewBloomTokenizer(maxBloomSize int, metrics *Metrics, logger log.Logger) *BloomTokenizer
NewBloomTokenizer returns a new instance of the Bloom Tokenizer. Warning: the tokens returned use the same byte slice to reduce allocations. This has two consequences: 1) The token slices generated must not be mutated externally 2) The token slice must not be used after the next call to `Tokens()` as it will repopulate the slice. 2) This is not thread safe.
func (*BloomTokenizer) Populate ¶
func (bt *BloomTokenizer) Populate(blooms v2iter.SizedIterator[*Bloom], chks v2iter.Iterator[ChunkRefWithIter], ch chan *BloomCreation)
Populates a bloom filter(s) with the tokens from the given chunks. Called once per series
type BoundedIter ¶
unused, but illustrative
func NewBoundedIter ¶
func NewBoundedIter[V any](itr iter.Iterator[V], cmp func(V) BoundsCheck) *BoundedIter[V]
func (*BoundedIter[V]) Next ¶
func (bi *BoundedIter[V]) Next() bool
type ByteReader ¶
type ByteReader struct {
// contains filtered or unexported fields
}
In memory reader
func NewByteReader ¶
func NewByteReader(index, blooms *bytes.Buffer) *ByteReader
func (*ByteReader) Blooms ¶
func (r *ByteReader) Blooms() (io.ReadSeeker, error)
func (*ByteReader) Cleanup ¶ added in v3.2.0
func (r *ByteReader) Cleanup() error
func (*ByteReader) Index ¶
func (r *ByteReader) Index() (io.ReadSeeker, error)
func (*ByteReader) TarEntries ¶
func (r *ByteReader) TarEntries() (iter.Iterator[TarEntry], error)
type ChecksumPool ¶
func (*ChecksumPool) Get ¶
func (p *ChecksumPool) Get() hash.Hash32
func (*ChecksumPool) Put ¶
func (p *ChecksumPool) Put(h hash.Hash32)
type ChunkRefWithIter ¶
type ChunkRefWithIter struct { Ref ChunkRef Itr iter.EntryIterator }
ChunkRefWithIter is a wrapper around a ChunkRef and an EntryIterator.
type ChunkRefs ¶
type ChunkRefs []ChunkRef
func (ChunkRefs) Compare ¶
func (refs ChunkRefs) Compare(others ChunkRefs, populateInclusive bool) (exclusive ChunkRefs, inclusive ChunkRefs)
Compare returns two sets of chunk refs, both must be sorted: 1) the chunk refs which are in the original set but not in the other set 2) the chunk refs which are in both sets the `populateInclusive` argument allows avoiding populating the inclusive set if it is not needed TODO(owen-d): can be improved to use binary search when one list is signficantly larger than the other
type DirectoryBlockReader ¶
type DirectoryBlockReader struct {
// contains filtered or unexported fields
}
File reader
func NewDirectoryBlockReader ¶
func NewDirectoryBlockReader(dir string) *DirectoryBlockReader
func (*DirectoryBlockReader) Blooms ¶
func (r *DirectoryBlockReader) Blooms() (io.ReadSeeker, error)
func (*DirectoryBlockReader) Cleanup ¶ added in v3.2.0
func (r *DirectoryBlockReader) Cleanup() error
func (*DirectoryBlockReader) Index ¶
func (r *DirectoryBlockReader) Index() (io.ReadSeeker, error)
func (*DirectoryBlockReader) Init ¶
func (r *DirectoryBlockReader) Init() error
func (*DirectoryBlockReader) TarEntries ¶
func (r *DirectoryBlockReader) TarEntries() (iter.Iterator[TarEntry], error)
type DirectoryBlockWriter ¶
type DirectoryBlockWriter struct {
// contains filtered or unexported fields
}
Directory based impl
func NewDirectoryBlockWriter ¶
func NewDirectoryBlockWriter(dir string) *DirectoryBlockWriter
func (*DirectoryBlockWriter) Blooms ¶
func (b *DirectoryBlockWriter) Blooms() (io.WriteCloser, error)
func (*DirectoryBlockWriter) Cleanup ¶ added in v3.2.0
func (b *DirectoryBlockWriter) Cleanup() error
func (*DirectoryBlockWriter) Full ¶ added in v3.1.0
func (b *DirectoryBlockWriter) Full(maxSize uint64) (full bool, size int, err error)
func (*DirectoryBlockWriter) Index ¶
func (b *DirectoryBlockWriter) Index() (io.WriteCloser, error)
func (*DirectoryBlockWriter) Init ¶
func (b *DirectoryBlockWriter) Init() error
func (*DirectoryBlockWriter) Size ¶
func (b *DirectoryBlockWriter) Size() (int, error)
type FingerprintBounds ¶
type FingerprintBounds struct {
Min, Max model.Fingerprint
}
func BoundsFromProto ¶
func BoundsFromProto(pb logproto.FPBounds) FingerprintBounds
func NewBounds ¶
func NewBounds(min, max model.Fingerprint) FingerprintBounds
func ParseBoundsFromAddr ¶
func ParseBoundsFromAddr(s string) (FingerprintBounds, error)
ParseBoundsFromAddr parses a fingerprint bounds from a string
func ParseBoundsFromParts ¶
func ParseBoundsFromParts(a, b string) (FingerprintBounds, error)
ParseBoundsFromParts parses a fingerprint bounds already separated strings
func (FingerprintBounds) Bounds ¶
func (b FingerprintBounds) Bounds() (model.Fingerprint, model.Fingerprint)
Bounds returns the inclusive bounds [from,through]
func (FingerprintBounds) Cmp ¶
func (b FingerprintBounds) Cmp(fp model.Fingerprint) BoundsCheck
Cmp returns the fingerprint's position relative to the bounds
func (FingerprintBounds) Equal ¶
func (b FingerprintBounds) Equal(target FingerprintBounds) bool
Returns whether the fingerprint bounds is equal to the target bounds
func (FingerprintBounds) GetFromThrough ¶
func (b FingerprintBounds) GetFromThrough() (model.Fingerprint, model.Fingerprint)
GetFromThrough implements TSDBs FingerprintFilter interface, NB(owen-d): adjusts to return `[from,through)` instead of `[from,through]` which the fingerprint bounds struct tracks.
func (FingerprintBounds) Intersection ¶
func (b FingerprintBounds) Intersection(target FingerprintBounds) *FingerprintBounds
Intersection returns the intersection of the two bounds
func (FingerprintBounds) Less ¶
func (b FingerprintBounds) Less(other FingerprintBounds) bool
func (FingerprintBounds) Match ¶
func (b FingerprintBounds) Match(fp model.Fingerprint) bool
Match implements TSDBs FingerprintFilter interface
func (FingerprintBounds) Overlaps ¶
func (b FingerprintBounds) Overlaps(target FingerprintBounds) bool
Overlaps returns whether the bounds (partially) overlap with the target bounds
func (FingerprintBounds) Range ¶
func (b FingerprintBounds) Range() uint64
Range returns the number of fingerprints in the bounds
func (FingerprintBounds) Slice ¶
func (b FingerprintBounds) Slice(min, max model.Fingerprint) *FingerprintBounds
Slice returns a new fingerprint bounds clipped to the target bounds or nil if there is no overlap
func (FingerprintBounds) String ¶
func (b FingerprintBounds) String() string
Addr returns the string representation of the fingerprint bounds for use in content addressable storage. TODO(owen-d): incorporate this into the schema so we can change it, similar to `{,Parse}ExternalKey`
func (FingerprintBounds) Union ¶
func (b FingerprintBounds) Union(target FingerprintBounds) (res []FingerprintBounds)
Union returns the union of the two bounds
func (FingerprintBounds) Unless ¶
func (b FingerprintBounds) Unless(target FingerprintBounds) (res []FingerprintBounds)
Unless returns the subspace of itself which does not intersect with the target bounds
func (FingerprintBounds) Within ¶
func (b FingerprintBounds) Within(target FingerprintBounds) bool
Within returns whether the fingerprint is fully within the target bounds
type FusedQuerier ¶
type FusedQuerier struct {
// contains filtered or unexported fields
}
func NewFusedQuerier ¶
func NewFusedQuerier(bq *BlockQuerier, inputs []iter.PeekIterator[Request], logger log.Logger) *FusedQuerier
func (*FusedQuerier) Run ¶
func (fq *FusedQuerier) Run() error
type HeapIterator ¶
type HeapIterator[T any] struct { // contains filtered or unexported fields }
HeapIterator is a heap implementation of BlockQuerier backed by multiple blocks It is used to merge multiple blocks into a single ordered querier NB(owen-d): it uses a custom heap implementation because Pop() only returns a single value of the top-most iterator, rather than the iterator itself
func NewHeapIterForSeriesWithBloom ¶
func NewHeapIterForSeriesWithBloom(queriers ...iter.PeekIterator[*SeriesWithBlooms]) *HeapIterator[*SeriesWithBlooms]
func NewHeapIterator ¶
func NewHeapIterator[T any](less func(T, T) bool, itrs ...iter.PeekIterator[T]) *HeapIterator[T]
func (*HeapIterator[T]) At ¶
func (mbq *HeapIterator[T]) At() T
func (*HeapIterator[T]) Err ¶
func (mbq *HeapIterator[T]) Err() error
TODO(owen-d): don't swallow this error
func (HeapIterator[T]) Len ¶
func (mbq HeapIterator[T]) Len() int
func (*HeapIterator[T]) Less ¶
func (mbq *HeapIterator[T]) Less(i, j int) bool
func (*HeapIterator[T]) Next ¶
func (mbq *HeapIterator[T]) Next() (ok bool)
func (*HeapIterator[T]) Swap ¶
func (mbq *HeapIterator[T]) Swap(a, b int)
type IndexBuilder ¶
type IndexBuilder struct {
// contains filtered or unexported fields
}
func NewIndexBuilder ¶
func NewIndexBuilder(opts BlockOptions, writer io.WriteCloser) *IndexBuilder
func (*IndexBuilder) Append ¶
func (b *IndexBuilder) Append(series SeriesWithMeta) error
func (*IndexBuilder) Close ¶
func (b *IndexBuilder) Close() (uint32, error)
func (*IndexBuilder) UnflushedSize ¶ added in v3.3.0
func (b *IndexBuilder) UnflushedSize() int
func (*IndexBuilder) WriteOpts ¶
func (b *IndexBuilder) WriteOpts() error
type LabelMatcher ¶ added in v3.3.0
type LabelMatcher interface {
// contains filtered or unexported methods
}
LabelMatcher represents bloom tests for key-value pairs, mapped from LabelFilterExprs from the AST.
func ExtractTestableLabelMatchers ¶ added in v3.3.0
func ExtractTestableLabelMatchers(expr syntax.Expr) []LabelMatcher
ExtractTestableLabelMatchers extracts label matchers from the label filters in an expression. The resulting label matchers can then be used for testing against bloom filters. Only label matchers before the first parse stage are included.
Unsupported LabelFilterExprs map to an UnsupportedLabelMatcher, for which bloom tests should always pass.
type LazyBloomIter ¶
type LazyBloomIter struct {
// contains filtered or unexported fields
}
func NewLazyBloomIter ¶
func NewLazyBloomIter(b *Block, alloc mempool.Allocator, maxSize int) *LazyBloomIter
NewLazyBloomIter returns a new lazy bloom iterator. If pool is true, the underlying byte slice of the bloom page will be returned to the pool for efficiency. This can only safely be used when the underlying bloom bytes don't escape the decoder.
func (*LazyBloomIter) At ¶
func (it *LazyBloomIter) At() *Bloom
func (*LazyBloomIter) Err ¶
func (it *LazyBloomIter) Err() error
func (*LazyBloomIter) LoadOffset ¶ added in v3.1.0
func (it *LazyBloomIter) LoadOffset(offset BloomOffset) (skip bool)
LoadOffset returns whether the bloom page at the given offset should be skipped (due to being too large) _and_ there's no error
func (*LazyBloomIter) Next ¶
func (it *LazyBloomIter) Next() bool
func (*LazyBloomIter) Reset ¶ added in v3.2.0
func (it *LazyBloomIter) Reset()
type LazySeriesIter ¶
type LazySeriesIter struct {
// contains filtered or unexported fields
}
func NewLazySeriesIter ¶
func NewLazySeriesIter(b *Block) *LazySeriesIter
Decodes series pages one at a time and iterates through them
func (*LazySeriesIter) At ¶
func (it *LazySeriesIter) At() *SeriesWithMeta
func (*LazySeriesIter) Err ¶
func (it *LazySeriesIter) Err() error
func (*LazySeriesIter) Next ¶
func (it *LazySeriesIter) Next() bool
func (*LazySeriesIter) Seek ¶
func (it *LazySeriesIter) Seek(fp model.Fingerprint) error
Seek returns an iterator over the pages where the first fingerprint is >= fp
type MemoryBlockWriter ¶
type MemoryBlockWriter struct {
// contains filtered or unexported fields
}
in memory impl
func NewMemoryBlockWriter ¶
func NewMemoryBlockWriter(index, blooms *bytes.Buffer) MemoryBlockWriter
func (MemoryBlockWriter) Blooms ¶
func (b MemoryBlockWriter) Blooms() (io.WriteCloser, error)
func (MemoryBlockWriter) Cleanup ¶ added in v3.2.0
func (b MemoryBlockWriter) Cleanup() error
func (MemoryBlockWriter) Full ¶ added in v3.1.0
func (b MemoryBlockWriter) Full(maxSize uint64) (full bool, size int, err error)
func (MemoryBlockWriter) Index ¶
func (b MemoryBlockWriter) Index() (io.WriteCloser, error)
func (MemoryBlockWriter) Size ¶
func (b MemoryBlockWriter) Size() (int, error)
type MergeBuilder ¶
type MergeBuilder struct {
// contains filtered or unexported fields
}
Simplistic implementation of a merge builder that builds a single block from a list of blocks and a store of series.
func NewMergeBuilder ¶
func NewMergeBuilder( blocks iter.Iterator[*SeriesWithBlooms], store iter.Iterator[*Series], populate BloomPopulatorFunc, metrics *Metrics, logger log.Logger, ) *MergeBuilder
NewMergeBuilder is a specific builder which does the following:
- merges multiple blocks into a single ordered querier, i) When two blocks have the same series, it will prefer the one with the most chunks already indexed
- iterates through the store, adding chunks to the relevant blooms via the `populate` argument
func (*MergeBuilder) Build ¶
func (mb *MergeBuilder) Build(builder *BlockBuilder) (checksum uint32, totalBytes int, err error)
type Meta ¶ added in v3.3.0
type Meta struct { Fields Set[Field] Offsets []BloomOffset }
type Metrics ¶
type Metrics struct {
// contains filtered or unexported fields
}
func NewMetrics ¶
func NewMetrics(r prometheus.Registerer) *Metrics
type NoopCloser ¶
func NewNoopCloser ¶
func NewNoopCloser(w io.Writer) NoopCloser
func (NoopCloser) Close ¶
func (n NoopCloser) Close() error
type OrLabelMatcher ¶ added in v3.3.0
type OrLabelMatcher struct{ Left, Right LabelMatcher }
OrLabelMatcher represents a logical OR test. Bloom tests must only pass if one of the Left or Right label matcher bloom tests pass.
type Output ¶
type Output struct { Fp model.Fingerprint Removals ChunkRefs }
Output represents a chunk that failed to pass all searches and must be downloaded
type PageWriter ¶
type PageWriter struct {
// contains filtered or unexported fields
}
func NewPageWriter ¶
func NewPageWriter(targetSize int) PageWriter
func (*PageWriter) Add ¶
func (w *PageWriter) Add(item []byte) (offset int)
func (*PageWriter) Count ¶
func (w *PageWriter) Count() int
func (*PageWriter) Reset ¶
func (w *PageWriter) Reset()
func (*PageWriter) SpaceFor ¶
func (w *PageWriter) SpaceFor(numBytes int) bool
func (*PageWriter) UnflushedSize ¶ added in v3.3.0
func (w *PageWriter) UnflushedSize() int
type PlainLabelMatcher ¶ added in v3.3.0
type PlainLabelMatcher struct{ Key, Value string }
PlainLabelMatcher represents a direct key-value matcher. Bloom tests must only pass if the key-value pair exists in the bloom.
type Request ¶
type Request struct { Fp model.Fingerprint Labels labels.Labels Chks ChunkRefs Search BloomTest Response chan<- Output Recorder *BloomRecorder }
type Schema ¶
type Schema struct {
// contains filtered or unexported fields
}
func (Schema) Compatible ¶
func (*Schema) CompressorPool ¶
func (s *Schema) CompressorPool() compression.WriterPool
func (*Schema) DecodeFrom ¶
func (s *Schema) DecodeFrom(r io.ReadSeeker) error
func (*Schema) DecompressorPool ¶
func (s *Schema) DecompressorPool() compression.ReaderPool
type Series ¶
type Series struct { Fingerprint model.Fingerprint Chunks ChunkRefs }
type SeriesHeader ¶
type SeriesHeader struct { NumSeries int Bounds FingerprintBounds FromTs, ThroughTs model.Time }
func (*SeriesHeader) Encode ¶
func (h *SeriesHeader) Encode(enc *encoding.Encbuf)
type SeriesPageDecoder ¶
type SeriesPageDecoder struct {
// contains filtered or unexported fields
}
can decode a series page one item at a time, useful when we don't need to iterate an entire page
func (*SeriesPageDecoder) At ¶
func (d *SeriesPageDecoder) At() (res *SeriesWithMeta)
func (*SeriesPageDecoder) Err ¶
func (d *SeriesPageDecoder) Err() error
func (*SeriesPageDecoder) Next ¶
func (d *SeriesPageDecoder) Next() bool
func (*SeriesPageDecoder) Reset ¶
func (d *SeriesPageDecoder) Reset()
func (*SeriesPageDecoder) Seek ¶
func (d *SeriesPageDecoder) Seek(fp model.Fingerprint)
type SeriesPageHeaderWithOffset ¶
type SeriesPageHeaderWithOffset struct {
Offset, Len, DecompressedLen int
SeriesHeader
}
Header for a series page
func (*SeriesPageHeaderWithOffset) Decode ¶
func (h *SeriesPageHeaderWithOffset) Decode(dec *encoding.Decbuf) error
func (*SeriesPageHeaderWithOffset) Encode ¶
func (h *SeriesPageHeaderWithOffset) Encode(enc *encoding.Encbuf)
type SeriesWithBlooms ¶ added in v3.1.0
type SeriesWithBlooms struct { Series *SeriesWithMeta Blooms iter.SizedIterator[*Bloom] }
func MkBasicSeriesWithBlooms ¶
func MkBasicSeriesWithBlooms(nSeries int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) ([]SeriesWithBlooms, [][][]byte)
type SeriesWithMeta ¶ added in v3.3.0
func (*SeriesWithMeta) Decode ¶ added in v3.3.0
func (s *SeriesWithMeta) Decode( dec *encoding.Decbuf, version Version, previousFp model.Fingerprint, previousOffset BloomOffset, ) (model.Fingerprint, BloomOffset, error)
func (*SeriesWithMeta) Encode ¶ added in v3.3.0
func (s *SeriesWithMeta) Encode( enc *encoding.Encbuf, version Version, previousFp model.Fingerprint, previousOffset BloomOffset, ) BloomOffset
type Set ¶ added in v3.3.0
type Set[V comparable] struct { // contains filtered or unexported fields }
func NewSet ¶ added in v3.3.0
func NewSet[V comparable](size int) Set[V]
func NewSetFromLiteral ¶ added in v3.3.0
func NewSetFromLiteral[V comparable](v ...V) Set[V]
type StructuredMetadataTokenizer ¶ added in v3.3.0
type StructuredMetadataTokenizer struct {
// contains filtered or unexported fields
}
func NewStructuredMetadataTokenizer ¶ added in v3.3.0
func NewStructuredMetadataTokenizer(prefix string) *StructuredMetadataTokenizer
func (*StructuredMetadataTokenizer) Tokens ¶ added in v3.3.0
func (t *StructuredMetadataTokenizer) Tokens(kv push.LabelAdapter) iter.Iterator[string]
type UnencodedBlockOptions ¶ added in v3.1.0
type UnencodedBlockOptions struct {
MaxBloomSizeBytes uint64
}
Options for the block which are not encoded into it iself.
type UnsupportedLabelMatcher ¶ added in v3.3.0
type UnsupportedLabelMatcher struct{}
UnsupportedLabelMatcher represents a label matcher which could not be mapped. Bloom tests for UnsupportedLabelMatchers must always pass.
type V3Builder ¶ added in v3.3.0
type V3Builder struct {
// contains filtered or unexported fields
}
func NewBlockBuilderV3 ¶ added in v3.3.0
func NewBlockBuilderV3(opts BlockOptions, writer BlockWriter) (*V3Builder, error)
func (*V3Builder) AddBloom ¶ added in v3.3.0
func (b *V3Builder) AddBloom(bloom *Bloom) (BloomOffset, error)
func (*V3Builder) AddSeries ¶ added in v3.3.0
func (b *V3Builder) AddSeries(series Series, offsets []BloomOffset, fields Set[Field]) (bool, error)
AddSeries adds a series to the block. It returns true after adding the series, the block is full.