Documentation ¶
Overview ¶
Copyright 2021 The Prometheus Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Index ¶
- Constants
- Variables
- func NewIndexCompactor() compactor.IndexCompactor
- func NewStore(name, prefix string, indexShipperCfg indexshipper.Config, ...) (index.ReaderWriter, func(), error)
- func NewTSDBIndexFromFile(location string) (*TSDBIndex, GetRawFileReaderFunc, error)
- func OpenShippableTSDB(p string) (shipperindex.Index, error)
- func Overlap(a, b Bounded) bool
- func PostingsForMatchers(ix IndexReader, fpFilter index.FingerprintFilter, ms ...*labels.Matcher) (index.Postings, error)
- func RebuildWithVersion(ctx context.Context, path string, desiredVer int) (shipperindex.Index, error)
- type Bounded
- type Builder
- func (b *Builder) AddSeries(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta)
- func (b *Builder) Build(ctx context.Context, scratchDir string, ...) (id Identifier, err error)
- func (b *Builder) DropChunk(streamID string, chk index.ChunkMeta) (bool, error)
- func (b *Builder) FinalizeChunks()
- func (b *Builder) InsertChunk(streamID string, chk index.ChunkMeta) error
- type ChunkMetasRecord
- type ChunkRef
- type GetRawFileReaderFunc
- type Head
- type HeadManager
- type Identifier
- type Index
- type IndexClient
- func (c *IndexClient) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, ...) ([]logproto.ChunkRef, error)
- func (c *IndexClient) GetSeries(ctx context.Context, userID string, from, through model.Time, ...) ([]labels.Labels, error)
- func (c *IndexClient) GetShards(ctx context.Context, userID string, from, through model.Time, ...) (*logproto.ShardsResponse, error)
- func (c *IndexClient) HasForSeries(_, _ model.Time) (sharding.ForSeries, bool)
- func (c *IndexClient) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, _ string, ...) ([]string, error)
- func (c *IndexClient) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, _ string, ...) ([]string, error)
- func (c *IndexClient) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
- func (c *IndexClient) Stats(ctx context.Context, userID string, from, through model.Time, ...) (*stats.Stats, error)
- func (c *IndexClient) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, ...) (*logproto.VolumeResponse, error)
- type IndexClientOptions
- type IndexIter
- type IndexReader
- type IndexSlice
- type IndexStatsAccumulator
- type IndexWriter
- type LazyIndex
- func (f LazyIndex) Bounds() (model.Time, model.Time)
- func (f LazyIndex) Close() error
- func (f LazyIndex) ForSeries(ctx context.Context, userID string, fpFilter index.FingerprintFilter, ...) error
- func (f LazyIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, ...) ([]ChunkRef, error)
- func (f LazyIndex) LabelNames(ctx context.Context, userID string, from, through model.Time, ...) ([]string, error)
- func (f LazyIndex) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, ...) ([]string, error)
- func (f LazyIndex) Series(ctx context.Context, userID string, from, through model.Time, res []Series, ...) ([]Series, error)
- func (f LazyIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
- func (f LazyIndex) Stats(ctx context.Context, userID string, from, through model.Time, ...) error
- func (f LazyIndex) Volume(ctx context.Context, userID string, from, through model.Time, ...) error
- type Limits
- type Metrics
- type MultiIndex
- func (i *MultiIndex) Bounds() (model.Time, model.Time)
- func (i *MultiIndex) Close() error
- func (i MultiIndex) ForSeries(ctx context.Context, userID string, fpFilter index.FingerprintFilter, ...) error
- func (i *MultiIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, ...) ([]ChunkRef, error)
- func (i *MultiIndex) LabelNames(ctx context.Context, userID string, from, through model.Time, ...) ([]string, error)
- func (i *MultiIndex) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, ...) ([]string, error)
- func (i *MultiIndex) Series(ctx context.Context, userID string, from, through model.Time, res []Series, ...) ([]Series, error)
- func (i *MultiIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
- func (i *MultiIndex) Stats(ctx context.Context, userID string, from, through model.Time, ...) error
- func (i *MultiIndex) Volume(ctx context.Context, userID string, from, through model.Time, ...) error
- type MultiTenantIndex
- func (m *MultiTenantIndex) Bounds() (model.Time, model.Time)
- func (m *MultiTenantIndex) Close() error
- func (m *MultiTenantIndex) ForSeries(ctx context.Context, userID string, fpFilter index.FingerprintFilter, ...) error
- func (m *MultiTenantIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, ...) ([]ChunkRef, error)
- func (m *MultiTenantIndex) LabelNames(ctx context.Context, userID string, from, through model.Time, ...) ([]string, error)
- func (m *MultiTenantIndex) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, ...) ([]string, error)
- func (m *MultiTenantIndex) Series(ctx context.Context, userID string, from, through model.Time, res []Series, ...) ([]Series, error)
- func (m *MultiTenantIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
- func (m *MultiTenantIndex) Stats(ctx context.Context, userID string, from, through model.Time, ...) error
- func (m *MultiTenantIndex) Volume(ctx context.Context, userID string, from, through model.Time, ...) error
- type MultitenantTSDBIdentifier
- type NoopIndex
- func (NoopIndex) Bounds() (_, through model.Time)
- func (NoopIndex) Close() error
- func (NoopIndex) ForSeries(_ context.Context, _ string, _ index.FingerprintFilter, _ model.Time, ...) error
- func (NoopIndex) GetChunkRefs(_ context.Context, _ string, _, _ model.Time, _ []ChunkRef, ...) ([]ChunkRef, error)
- func (NoopIndex) LabelNames(_ context.Context, _ string, _, _ model.Time, _ ...*labels.Matcher) ([]string, error)
- func (NoopIndex) LabelValues(_ context.Context, _ string, _, _ model.Time, _ string, _ ...*labels.Matcher) ([]string, error)
- func (NoopIndex) Series(_ context.Context, _ string, _, _ model.Time, _ []Series, ...) ([]Series, error)
- func (NoopIndex) SetChunkFilterer(_ chunk.RequestChunkFilterer)
- func (NoopIndex) Stats(_ context.Context, _ string, _, _ model.Time, _ IndexStatsAccumulator, ...) error
- func (NoopIndex) Volume(_ context.Context, _ string, _, _ model.Time, _ VolumeAccumulator, ...) error
- type PoolChunkRefs
- type PoolSeries
- type RecordType
- type Series
- type SingleTenantTSDBIdentifier
- type TSDBFile
- type TSDBIndex
- func (i *TSDBIndex) Bounds() (model.Time, model.Time)
- func (i *TSDBIndex) Checksum() uint32
- func (i *TSDBIndex) Close() error
- func (i *TSDBIndex) ForSeries(ctx context.Context, _ string, fpFilter index.FingerprintFilter, ...) error
- func (i *TSDBIndex) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, ...) ([]ChunkRef, error)
- func (i *TSDBIndex) Identifier(string) SingleTenantTSDBIdentifier
- func (i *TSDBIndex) LabelNames(_ context.Context, _ string, _, _ model.Time, matchers ...*labels.Matcher) ([]string, error)
- func (i *TSDBIndex) LabelValues(_ context.Context, _ string, _, _ model.Time, name string, ...) ([]string, error)
- func (i *TSDBIndex) Series(ctx context.Context, _ string, from, through model.Time, res []Series, ...) ([]Series, error)
- func (i *TSDBIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
- func (i *TSDBIndex) Stats(ctx context.Context, _ string, from, through model.Time, ...) error
- func (i *TSDBIndex) Volume(ctx context.Context, _ string, from, through model.Time, acc VolumeAccumulator, ...) error
- type TSDBManager
- type VolumeAccumulator
- type WAL
- type WALIdentifier
- type WALRecord
- type WalGroup
Constants ¶
const TenantLabel = "__loki_tenant__"
TenantLabel is part of the reserved label namespace (__ prefix) It's used to create multi-tenant TSDBs (which do not have a tenancy concept) These labels are stripped out during compaction to single-tenant TSDBs
Variables ¶
var ( ChunkMetasPool = &index.ChunkMetasPool // re-exporting SeriesPool PoolSeries ChunkRefsPool PoolChunkRefs )
var ErrAlreadyOnDesiredVersion = errors.New("tsdb file already on desired version")
var ErrEmptyAccumulator = errors.New("no items in result accumulator")
Functions ¶
func NewIndexCompactor ¶
func NewIndexCompactor() compactor.IndexCompactor
func NewStore ¶
func NewStore( name, prefix string, indexShipperCfg indexshipper.Config, schemaCfg config.SchemaConfig, _ *fetcher.Fetcher, objectClient client.ObjectClient, limits downloads.Limits, tableRange config.TableRange, reg prometheus.Registerer, logger log.Logger, ) ( index.ReaderWriter, func(), error, )
NewStore creates a new tsdb index ReaderWriter.
func NewTSDBIndexFromFile ¶
func NewTSDBIndexFromFile(location string) (*TSDBIndex, GetRawFileReaderFunc, error)
Return the index as well as the underlying raw file reader which isn't exposed as an index method but is helpful for building an io.reader for the index shipper
func OpenShippableTSDB ¶
func OpenShippableTSDB(p string) (shipperindex.Index, error)
func PostingsForMatchers ¶
func PostingsForMatchers(ix IndexReader, fpFilter index.FingerprintFilter, ms ...*labels.Matcher) (index.Postings, error)
PostingsForMatchers assembles a single postings iterator against the index reader based on the given matchers. The resulting postings are not ordered by series.
func RebuildWithVersion ¶
Types ¶
type Builder ¶
type Builder struct {
// contains filtered or unexported fields
}
Builder is a helper used to create tsdb indices. It can accept streams in any order and will create the tsdb index appropriately via `Build()` It can even receive multiple writes for the same stream with the caveat that chunks must be added in order and not duplicated
func NewBuilder ¶
func (*Builder) Build ¶
func (b *Builder) Build( ctx context.Context, scratchDir string, createFn func(from, through model.Time, checksum uint32) Identifier, ) (id Identifier, err error)
func (*Builder) FinalizeChunks ¶
func (b *Builder) FinalizeChunks()
type ChunkMetasRecord ¶
type ChunkMetasRecord struct { Chks index.ChunkMetas Ref uint64 }
type ChunkRef ¶
type GetRawFileReaderFunc ¶
type GetRawFileReaderFunc func() (io.ReadSeeker, error)
GetRawFileReaderFunc returns an io.ReadSeeker for reading raw tsdb file from disk
type Head ¶
type Head struct {
// contains filtered or unexported fields
}
func (*Head) Append ¶
func (h *Head) Append(ls labels.Labels, fprint uint64, chks index.ChunkMetas) (created bool, refID uint64)
Note: chks must not be nil or zero-length
func (*Head) Index ¶
func (h *Head) Index() IndexReader
Index returns an IndexReader against the block.
type HeadManager ¶
type HeadManager struct { Index // contains filtered or unexported fields }
func NewHeadManager ¶
func NewHeadManager(name string, logger log.Logger, dir string, metrics *Metrics, tsdbManager TSDBManager) *HeadManager
func (*HeadManager) Append ¶
func (m *HeadManager) Append(userID string, ls labels.Labels, fprint uint64, chks index.ChunkMetas) error
func (*HeadManager) Start ¶
func (m *HeadManager) Start() error
func (*HeadManager) Stop ¶
func (m *HeadManager) Stop() error
type Identifier ¶
Identifier can resolve an index to a name (in object storage) and a path (on disk)
func NewPrefixedIdentifier ¶
func NewPrefixedIdentifier(id Identifier, path, name string) Identifier
type Index ¶
type Index interface { Bounded SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) Close() error sharding.ForSeries // GetChunkRefs accepts an optional []ChunkRef argument. // If not nil, it will use that slice to build the result, // allowing us to avoid unnecessary allocations at the caller's discretion. // If nil, the underlying index implementation is required // to build the resulting slice nonetheless (it should not panic), // ideally by requesting a slice from the pool. // Shard is also optional. If not nil, TSDB will limit the result to // the requested shard. If it is nil, TSDB will return all results, // regardless of shard. // Note: any shard used must be a valid factor of two, meaning `0_of_2` and `3_of_4` are fine, but `0_of_3` is not. GetChunkRefs(ctx context.Context, userID string, from, through model.Time, res []ChunkRef, fpFilter index.FingerprintFilter, matchers ...*labels.Matcher) ([]ChunkRef, error) // Series follows the same semantics regarding the passed slice and shard as GetChunkRefs. Series(ctx context.Context, userID string, from, through model.Time, res []Series, fpFilter index.FingerprintFilter, matchers ...*labels.Matcher) ([]Series, error) LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error) LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error) Stats(ctx context.Context, userID string, from, through model.Time, acc IndexStatsAccumulator, fpFilter index.FingerprintFilter, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error Volume(ctx context.Context, userID string, from, through model.Time, acc VolumeAccumulator, fpFilter index.FingerprintFilter, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) error }
type IndexClient ¶
type IndexClient struct {
// contains filtered or unexported fields
}
implements stores.Index
func NewIndexClient ¶
func NewIndexClient(idx Index, opts IndexClientOptions, l Limits) *IndexClient
func (*IndexClient) GetChunkRefs ¶
func (c *IndexClient) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, predicate chunk.Predicate) ([]logproto.ChunkRef, error)
TODO(owen-d): synchronize logproto.ChunkRef and tsdb.ChunkRef so we don't have to convert. They share almost the same fields, so we can add the missing `KB` field to the proto and then use that within the tsdb package.
func (*IndexClient) HasForSeries ¶
func (*IndexClient) LabelNamesForMetricName ¶
func (c *IndexClient) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, _ string, matchers ...*labels.Matcher) ([]string, error)
tsdb no longer uses the __metric_name__="logs" hack, so we can ignore metric names!
func (*IndexClient) LabelValuesForMetricName ¶
func (c *IndexClient) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, _ string, labelName string, matchers ...*labels.Matcher) ([]string, error)
tsdb no longer uses the __metric_name__="logs" hack, so we can ignore metric names!
func (*IndexClient) SetChunkFilterer ¶
func (c *IndexClient) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
SetChunkFilterer sets a chunk filter to be used when retrieving chunks. This is only used for GetSeries implementation. Todo we might want to pass it as a parameter to GetSeries instead.
type IndexClientOptions ¶
type IndexClientOptions struct { // Whether using bloom filters in the Stats() method // should be skipped. This helps probabilistically detect // duplicates when chunks are written to multiple // index buckets, which is of use in the (index-gateway|querier) // but not worth the memory costs in the ingesters. // NB(owen-d): This is NOT the bloom-filter feature developed late 2023 onwards, // but a smaller bloom filter used internally for probabalistic deduping of series counts // in the index stats() method across index buckets (which can have the same series) UseBloomFilters bool }
func DefaultIndexClientOptions ¶
func DefaultIndexClientOptions() IndexClientOptions
type IndexIter ¶
type IndexIter interface { // For may be executed concurrently, // but all work must complete before // it returns. // TODO(owen-d|sandeepsukhani): // Lazy iteration may touch different index files within the same index query. // `For` e.g, Bounds and GetChunkRefs might go through different index files // if a sync happened between the calls. // The second parameter sets a limit on the number of indexes iterated concurrently. For(context.Context, int, func(context.Context, Index) error) error }
type IndexReader ¶
type IndexReader interface { // Bounds returns the earliest and latest samples in the index Bounds() (int64, int64) Checksum() uint32 // Symbols return an iterator over sorted string symbols that may occur in // series' labels and indices. It is not safe to use the returned strings // beyond the lifetime of the index reader. Symbols() index.StringIter // SortedLabelValues returns sorted possible label values. SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) // LabelValues returns possible label values which may not be sorted. LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) // Postings returns the postings list iterator for the label pairs. // The Postings here contain the offsets to the series inside the index. // Found IDs are not strictly required to point to a valid Series, e.g. // during background garbage collections. Input values must be sorted. Postings(name string, fpFilter index.FingerprintFilter, values ...string) (index.Postings, error) // Series populates the given labels and chunk metas for the series identified // by the reference. // Returns storage.ErrNotFound if the ref does not resolve to a known series. Series(ref storage.SeriesRef, from int64, through int64, lset *labels.Labels, chks *[]index.ChunkMeta) (uint64, error) // ChunkStats returns the stats for the chunks in the given series. ChunkStats(ref storage.SeriesRef, from, through int64, lset *labels.Labels) (uint64, index.ChunkStats, error) // LabelNames returns all the unique label names present in the index in sorted order. LabelNames(matchers ...*labels.Matcher) ([]string, error) // LabelValueFor returns label value for the given label name in the series referred to by ID. // If the series couldn't be found or the series doesn't have the requested label a // storage.ErrNotFound is returned as error. LabelValueFor(id storage.SeriesRef, label string) (string, error) // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) // Close releases the underlying resources of the reader. Close() error }
IndexReader provides reading access of serialized index data.
type IndexSlice ¶
type IndexSlice []Index
type IndexStatsAccumulator ¶
type IndexStatsAccumulator interface { AddStream(fp model.Fingerprint) AddChunkStats(s index.ChunkStats) Stats() stats.Stats }
type IndexWriter ¶
type LazyIndex ¶
Index adapter for a function which returns an index when queried.
func (LazyIndex) GetChunkRefs ¶
func (LazyIndex) LabelNames ¶
func (LazyIndex) LabelValues ¶
func (LazyIndex) SetChunkFilterer ¶
func (f LazyIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
type Metrics ¶
type Metrics struct {
// contains filtered or unexported fields
}
TODO(owen-d)
func NewMetrics ¶
func NewMetrics(r prometheus.Registerer) *Metrics
type MultiIndex ¶
type MultiIndex struct {
// contains filtered or unexported fields
}
func NewMultiIndex ¶
func NewMultiIndex(i IndexIter) *MultiIndex
func (*MultiIndex) Close ¶
func (i *MultiIndex) Close() error
func (*MultiIndex) GetChunkRefs ¶
func (*MultiIndex) LabelNames ¶
func (*MultiIndex) LabelValues ¶
func (*MultiIndex) SetChunkFilterer ¶
func (i *MultiIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
func (*MultiIndex) Stats ¶
func (i *MultiIndex) Stats(ctx context.Context, userID string, from, through model.Time, acc IndexStatsAccumulator, fpFilter index.FingerprintFilter, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error
type MultiTenantIndex ¶
type MultiTenantIndex struct {
// contains filtered or unexported fields
}
MultiTenantIndex will inject a tenant label to it's queries This works with pre-compacted TSDBs which aren't yet per tenant.
func NewMultiTenantIndex ¶
func NewMultiTenantIndex(idx Index) *MultiTenantIndex
func (*MultiTenantIndex) Close ¶
func (m *MultiTenantIndex) Close() error
func (*MultiTenantIndex) GetChunkRefs ¶
func (*MultiTenantIndex) LabelNames ¶
func (*MultiTenantIndex) LabelValues ¶
func (*MultiTenantIndex) SetChunkFilterer ¶
func (m *MultiTenantIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
func (*MultiTenantIndex) Stats ¶
func (m *MultiTenantIndex) Stats(ctx context.Context, userID string, from, through model.Time, acc IndexStatsAccumulator, fpFilter index.FingerprintFilter, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error
func (*MultiTenantIndex) Volume ¶
func (m *MultiTenantIndex) Volume(ctx context.Context, userID string, from, through model.Time, acc VolumeAccumulator, fpFilter index.FingerprintFilter, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) error
type MultitenantTSDBIdentifier ¶
type MultitenantTSDBIdentifier struct {
// contains filtered or unexported fields
}
func (MultitenantTSDBIdentifier) Name ¶
func (id MultitenantTSDBIdentifier) Name() string
Name builds filename with format <file-creation-ts> + `-` + `<nodeName>
func (MultitenantTSDBIdentifier) Path ¶
func (id MultitenantTSDBIdentifier) Path() string
type NoopIndex ¶
type NoopIndex struct{}
func (NoopIndex) GetChunkRefs ¶
func (NoopIndex) LabelNames ¶
func (NoopIndex) LabelValues ¶
func (NoopIndex) Series ¶
func (NoopIndex) Series(_ context.Context, _ string, _, _ model.Time, _ []Series, _ index.FingerprintFilter, _ ...*labels.Matcher) ([]Series, error)
Series follows the same semantics regarding the passed slice and shard as GetChunkRefs.
func (NoopIndex) SetChunkFilterer ¶
func (NoopIndex) SetChunkFilterer(_ chunk.RequestChunkFilterer)
type PoolChunkRefs ¶
type PoolChunkRefs struct {
// contains filtered or unexported fields
}
func (*PoolChunkRefs) Get ¶
func (p *PoolChunkRefs) Get() []ChunkRef
func (*PoolChunkRefs) Put ¶
func (p *PoolChunkRefs) Put(xs []ChunkRef)
type PoolSeries ¶
type PoolSeries struct {
// contains filtered or unexported fields
}
func (*PoolSeries) Get ¶
func (p *PoolSeries) Get() []Series
func (*PoolSeries) Put ¶
func (p *PoolSeries) Put(xs []Series)
type RecordType ¶
type RecordType byte
const ( // FirstWrite is a special record type written once // at the beginning of every WAL. It records the system time // when the WAL was created. This is used to determine when to rotate // WALs and persists across restarts. WalRecordSeries RecordType = iota WalRecordChunks WalRecordSeriesWithFingerprint )
By prefixing records with versions, we can easily update our wal schema
type SingleTenantTSDBIdentifier ¶
type SingleTenantTSDBIdentifier struct { TS time.Time From, Through model.Time Checksum uint32 // contains filtered or unexported fields }
Identifier has all the information needed to resolve a TSDB index Notably this abstracts away OS path separators, etc.
func ParseSingleTenantTSDBPath ¶
func ParseSingleTenantTSDBPath(p string) (id SingleTenantTSDBIdentifier, ok bool)
func (SingleTenantTSDBIdentifier) Hash ¶
func (i SingleTenantTSDBIdentifier) Hash(h hash.Hash32) (err error)
implement Hash
func (SingleTenantTSDBIdentifier) Name ¶
func (i SingleTenantTSDBIdentifier) Name() string
func (SingleTenantTSDBIdentifier) Path ¶
func (i SingleTenantTSDBIdentifier) Path() string
type TSDBFile ¶
type TSDBFile struct { // reuse Identifier for resolving locations Identifier // reuse TSDBIndex for reading Index // contains filtered or unexported fields }
nolint TSDBFile is backed by an actual file and implements the indexshipper/index.Index interface
func NewShippableTSDBFile ¶
func NewShippableTSDBFile(id Identifier) (*TSDBFile, error)
type TSDBIndex ¶
type TSDBIndex struct {
// contains filtered or unexported fields
}
nolint TSDBIndex is backed by an IndexReader and translates the IndexReader to an Index implementation It loads the file into memory and doesn't keep a file descriptor open
func NewTSDBIndex ¶
func NewTSDBIndex(reader IndexReader) *TSDBIndex
func (*TSDBIndex) ForSeries ¶
func (i *TSDBIndex) ForSeries(ctx context.Context, _ string, fpFilter index.FingerprintFilter, from model.Time, through model.Time, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) (stop bool), matchers ...*labels.Matcher) error
fn must NOT capture it's arguments. They're reused across series iterations and returned to a pool after completion. Iteration will stop if the callback returns true. Accepts a userID argument in order to implement `Index` interface, but since this is a single tenant index, it is ignored (it's enforced elsewhere in index selection)
func (*TSDBIndex) GetChunkRefs ¶
func (*TSDBIndex) Identifier ¶
func (i *TSDBIndex) Identifier(string) SingleTenantTSDBIdentifier
func (*TSDBIndex) LabelNames ¶
func (*TSDBIndex) LabelValues ¶
func (*TSDBIndex) SetChunkFilterer ¶
func (i *TSDBIndex) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
func (*TSDBIndex) Volume ¶
func (i *TSDBIndex) Volume( ctx context.Context, _ string, from, through model.Time, acc VolumeAccumulator, fpFilter index.FingerprintFilter, _ shouldIncludeChunk, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher, ) error
Volume returns the volumes of the series described by the passed matchers by the names of the passed matchers. All non-requested labels are aggregated into the requested series.
ex: Imagine we have two labels: 'foo' and 'fizz' each with two values 'a' and 'b'. Called with the matcher `{foo="a"}`, Volume returns the aggregated size of the series `{foo="a"}`. If Volume with `{foo=~".+", fizz=~".+"}, it returns the volumes aggregated as follows:
{foo="a", fizz="a"} {foo="a", fizz="b"} {foo="b", fizz="a"} {foo="b", fizz="b"}
Volume optionally accepts a slice of target labels. If provided, volumes are aggregated into those labels only. For example, given the matcher {fizz=~".+"} and target labels of []string{"foo"}, volumes would be aggregated as follows:
{foo="a"} which would be the sum of {foo="a", fizz="a"} and {foo="a", fizz="b"} {foo="b"} which would be the sum of {foo="b", fizz="a"} and {foo="b", fizz="b"}
type TSDBManager ¶
type TSDBManager interface { Start() error // Builds a new TSDB file from a set of WALs BuildFromWALs(time.Time, []WALIdentifier, bool) error // Builds a new TSDB file from tenantHeads BuildFromHead(*tenantHeads) error }
nolint:revive TSDBManager wraps the index shipper and writes/manages TSDB files on disk
func NewTSDBManager ¶
func NewTSDBManager( name, nodeName, dir string, indexShipper indexshipper.IndexShipper, tableRange config.TableRange, schemaCfg config.SchemaConfig, logger log.Logger, metrics *Metrics, ) TSDBManager
type VolumeAccumulator ¶
type VolumeAccumulator interface { AddVolume(string, uint64) error Volumes() *logproto.VolumeResponse }
type WALIdentifier ¶
type WALIdentifier struct {
// contains filtered or unexported fields
}
func (WALIdentifier) String ¶
func (w WALIdentifier) String() string