block

package
v0.0.0-...-b350be6 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 24, 2024 License: AGPL-3.0 Imports: 48 Imported by: 1

Documentation

Index

Constants

View Source
const (
	// MetaFilename is the known JSON filename for meta information.
	MetaFilename = "meta.json"
	// IndexFilename is the known index file for block index.
	IndexFilename = "index"
	// IndexHeaderFilename is the canonical name for binary index header file that stores essential information.
	IndexHeaderFilename = "index-header"
	// SparseIndexHeaderFilename is the canonical name for sparse index header file that stores abbreviated slices of index-header.
	SparseIndexHeaderFilename = "sparse-index-header"
	// ChunksDirname is the known dir name for chunks with compressed samples.
	ChunksDirname = "chunks"

	// DebugMetas is a directory for debug meta files that happen in the past. Useful for debugging.
	DebugMetas = "debug/metas"
)
View Source
const (
	CorruptedMeta = "corrupted-meta-json"
	NoMeta        = "no-meta-json"
	LoadedMeta    = "loaded"
	FailedMeta    = "failed"

	// Blocks that are marked for deletion can be loaded as well. This is done to make sure that we load blocks that are meant to be deleted,
	// but don't have a replacement block yet.
	MarkedForDeletionMeta = "marked-for-deletion"

	// MarkedForNoCompactionMeta is label for blocks which are loaded but also marked for no compaction. This label is also counted in `loaded` label metric.
	MarkedForNoCompactionMeta = "marked-for-no-compact"
)
View Source
const (
	// DeletionMarkFilename is the known json filename for optional file storing details about when block is marked for deletion.
	// If such file is present in block dir, it means the block is meant to be deleted after certain delay.
	DeletionMarkFilename = "deletion-mark.json"
	// NoCompactMarkFilename is the known json filename for optional file storing details about why block has to be excluded from compaction.
	// If such file is present in block dir, it means the block has to excluded from compaction (both vertical and horizontal) or rewrite (e.g deletions).
	NoCompactMarkFilename = "no-compact-mark.json"

	// DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos.
	DeletionMarkVersion1 = 1
	// NoCompactMarkVersion1 is the version of no-compact-mark file supported by Thanos.
	NoCompactMarkVersion1 = 1
)
View Source
const (
	// ManualNoCompactReason is a custom reason of excluding from compaction that should be added when no-compact mark is added for unknown/user specified reason.
	ManualNoCompactReason NoCompactReason = "manual"
	// IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424)
	// This reason can be ignored when vertical block sharding will be implemented.
	IndexSizeExceedingNoCompactReason = "index-size-exceeding"
	// OutOfOrderChunksNoCompactReason is a reason of to no compact block with index contains out of order chunk so that the compaction is not blocked.
	OutOfOrderChunksNoCompactReason = "block-index-out-of-order-chunk"
	// CriticalNoCompactReason is a reason of to no compact block that has some critical issue (e.g. corrupted index).
	CriticalNoCompactReason = "critical"
)
View Source
const (
	// TSDBVersion1 is a enumeration of TSDB meta versions supported by Thanos.
	TSDBVersion1 = 1
	// ThanosVersion1 is a enumeration of Thanos section of TSDB meta supported by Thanos.
	ThanosVersion1 = 1
)
View Source
const BlockIDLabel = "__block_id"

BlockIDLabel is a special label that will have an ULID of the meta.json being referenced to.

View Source
const (
	MarkersPathname = "markers"
)

Variables

View Source
var (
	ErrorSyncMetaNotFound  = errors.New("meta.json not found")
	ErrorSyncMetaCorrupted = errors.New("meta.json corrupted")
)
View Source
var (
	// ErrorMarkerNotFound is the error when marker file is not found.
	ErrorMarkerNotFound = errors.New("marker not found")
	// ErrorUnmarshalMarker is the error when unmarshalling marker JSON file.
	// This error can occur because marker has been partially uploaded to block storage
	// or the marker file is not a valid json file.
	ErrorUnmarshalMarker = errors.New("unmarshal marker JSON")
)

Functions

func BucketWithGlobalMarkers

func BucketWithGlobalMarkers(b objstore.Bucket) objstore.Bucket

BucketWithGlobalMarkers wraps the input bucket into a bucket which also keeps track of markers in the global markers location.

func CreateBlock

func CreateBlock(
	ctx context.Context,
	dir string,
	series []labels.Labels,
	numSamples int,
	mint, maxt int64,
	extLset labels.Labels,
) (id ulid.ULID, err error)

CreateBlock writes a block with the given series and numSamples samples each. Timeseries i%3==0 will contain floats, i%3==1 will contain histograms and i%3==2 will contain float histograms Samples will be in the time range [mint, maxt).

func Delete

func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) error

Delete removes directory that is meant to be block directory. NOTE: Always prefer this method for deleting blocks.

  • We have to delete block's files in the certain order (meta.json first and deletion-mark.json last) to ensure we don't end up with malformed partial blocks. Thanos system handles well partial blocks only if they don't have meta.json. If meta.json is present Thanos assumes valid block.
  • This avoids deleting empty dir (whole bucket) by mistake.

func DeleteNoCompactMarker

func DeleteNoCompactMarker(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) error

func DeletionMarkFilepath

func DeletionMarkFilepath(blockID ulid.ULID) string

DeletionMarkFilepath returns the path, relative to the tenant's bucket location, of a block deletion mark in the bucket markers location.

func Download

func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id ulid.ULID, dst string, options ...objstore.DownloadOption) error

Download downloads a directory meant to be a block directory. If any one of the files has a hash calculated in the meta file and it matches with what is in the destination path then we do not download it. We always re-download the meta file.

func GetMetaAttributes

func GetMetaAttributes(ctx context.Context, meta *Meta, bucketReader objstore.BucketReader) (objstore.ObjectAttributes, error)

GetMetaAttributes returns the attributes for the block associated with the meta, using the userBucket to read the attributes.

func GetSegmentFiles

func GetSegmentFiles(blockDir string) []string

GetSegmentFiles returns list of segment files for given block. Paths are relative to the chunks directory. In case of errors, nil is returned.

func IgnoreCompleteOutsideChunk

func IgnoreCompleteOutsideChunk(mint, maxt int64, _, curr *chunks.Meta) (bool, error)

func IgnoreDuplicateOutsideChunk

func IgnoreDuplicateOutsideChunk(_, _ int64, last, curr *chunks.Meta) (bool, error)

func IgnoreIssue347OutsideChunk

func IgnoreIssue347OutsideChunk(_, maxt int64, _, curr *chunks.Meta) (bool, error)

func IsBlockDir

func IsBlockDir(path string) (id ulid.ULID, ok bool)

func IsDeletionMarkFilename

func IsDeletionMarkFilename(name string) (ulid.ULID, bool)

IsDeletionMarkFilename returns whether the input filename matches the expected pattern of block deletion markers stored in the markers location.

func IsNoCompactMarkFilename

func IsNoCompactMarkFilename(name string) (ulid.ULID, bool)

IsNoCompactMarkFilename returns true if input filename matches the expected pattern of block marker stored in the markers location.

func ListBlockDeletionMarks

func ListBlockDeletionMarks(ctx context.Context, bkt objstore.BucketReader) (map[ulid.ULID]struct{}, error)

ListBlockDeletionMarks looks for block deletion marks in the global markers location and returns a map containing all blocks having a deletion mark and their location in the bucket.

func MarkForDeletion

func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, details string, markedForDeletion prometheus.Counter) error

MarkForDeletion creates a file which stores information about when the block was marked for deletion.

func MarkForNoCompact

func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason NoCompactReason, details string, markedForNoCompact prometheus.Counter) error

MarkForNoCompact creates a file which marks block to be not compacted.

func MetaBytesSize

func MetaBytesSize(m *Meta) int64

func MockStorageBlock

func MockStorageBlock(t testing.TB, bucket objstore.Bucket, userID string, minT, maxT int64) tsdb.BlockMeta

func NoCompactMarkFilepath

func NoCompactMarkFilepath(blockID ulid.ULID) string

NoCompactMarkFilepath returns the path, relative to the tenant's bucket location, of a no-compact block mark in the bucket markers location.

func ReadMarker

func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.InstrumentedBucketReader, dir string, marker Marker) error

ReadMarker reads the given mark file from <dir>/<marker filename>.json in bucket. ReadMarker has a one-minute timeout for completing the read against the bucket. This protects against operations that can take unbounded time.

func Repair

func Repair(ctx context.Context, logger log.Logger, dir string, id ulid.ULID, source SourceType, clampChunks bool, ignoreChkFns ...ignoreFnType) (resid ulid.ULID, err error)

Repair opens the block with given id in dir and creates a new one with fixed data. It: - removes out of order duplicates - all "complete" outsiders (they will not accessed anyway) - removes all near "complete" outside chunks introduced by https://github.com/prometheus/tsdb/issues/347. Fixable inconsistencies are resolved in the new block. TODO(bplotka): https://github.com/thanos-io/thanos/issues/378.

func Upload

func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, blockDir string, meta *Meta) error

Upload uploads a TSDB block to the object storage. Notes:

- If meta parameter is supplied (not nil), then uploaded meta.json file reflects meta parameter. However local meta.json file must still exist.

- Meta struct is updated with gatherFileStats

func VerifyBlock

func VerifyBlock(ctx context.Context, logger log.Logger, blockDir string, minTime, maxTime int64, checkChunks bool) error

VerifyBlock does a full run over a block index and chunk data and verifies that they fulfill the order invariants.

Types

type DeletionMark

type DeletionMark struct {
	// ID of the tsdb block.
	ID ulid.ULID `json:"id"`
	// Version of the file.
	Version int `json:"version"`
	// Details is a human readable string giving details of reason.
	Details string `json:"details,omitempty"`

	// DeletionTime is a unix timestamp of when the block was marked to be deleted.
	DeletionTime int64 `json:"deletion_time"`
}

DeletionMark stores block id and when block was marked for deletion.

func MockStorageDeletionMark

func MockStorageDeletionMark(t testing.TB, bucket objstore.Bucket, userID string, meta tsdb.BlockMeta) *DeletionMark

func (DeletionMark) BlockULID

func (d DeletionMark) BlockULID() ulid.ULID

type FetcherMetrics

type FetcherMetrics struct {
	Syncs        prometheus.Counter
	SyncFailures prometheus.Counter
	SyncDuration prometheus.Histogram

	Synced *extprom.TxGaugeVec
}

FetcherMetrics holds metrics tracked by the metadata fetcher. This struct and its fields are exported to allow depending projects (eg. Cortex) to implement their own custom metadata fetcher while tracking compatible metrics.

func NewFetcherMetrics

func NewFetcherMetrics(reg prometheus.Registerer, syncedExtraLabels [][]string) *FetcherMetrics

func (*FetcherMetrics) ResetTx

func (s *FetcherMetrics) ResetTx()

ResetTx starts new transaction for metrics tracked by transaction GaugeVec.

func (*FetcherMetrics) Submit

func (s *FetcherMetrics) Submit()

Submit applies new values for metrics tracked by transaction GaugeVec.

type File

type File struct {
	RelPath string `json:"rel_path"`
	// SizeBytes is optional (e.g meta.json does not show size).
	SizeBytes int64 `json:"size_bytes,omitempty"`
}

func GatherFileStats

func GatherFileStats(blockDir string) (res []File, _ error)

GatherFileStats returns File entry for files inside TSDB block (index, chunks, meta.json).

type GaugeVec

type GaugeVec interface {
	WithLabelValues(lvs ...string) prometheus.Gauge
}

GaugeVec hides something like a Prometheus GaugeVec or an extprom.TxGaugeVec.

type HealthStats

type HealthStats struct {
	// TotalSeries represents total number of series in block.
	TotalSeries int64
	// OutOfOrderSeries represents number of series that have out of order chunks.
	OutOfOrderSeries int

	// OutOfOrderChunks represents number of chunks that are out of order (older time range is after younger one).
	OutOfOrderChunks int
	// DuplicatedChunks represents number of chunks with same time ranges within same series, potential duplicates.
	DuplicatedChunks int
	// OutsideChunks represents number of all chunks that are before or after time range specified in block meta.
	OutsideChunks int
	// CompleteOutsideChunks is subset of OutsideChunks that will never be accessed. They are completely out of time range specified in block meta.
	CompleteOutsideChunks int
	// Issue347OutsideChunks represents subset of OutsideChunks that are outsiders caused by https://github.com/prometheus/tsdb/issues/347
	// and is something that Thanos handle.
	//
	// Specifically we mean here chunks with minTime == block.maxTime and maxTime > block.MaxTime. These are
	// segregated into separate counters. These chunks are safe to be deleted, since they are duplicated across 2 blocks.
	Issue347OutsideChunks int
	// OutOfOrderLabels represents the number of postings that contained out
	// of order labels, a bug present in Prometheus 2.8.0 and below.
	OutOfOrderLabels int

	// Debug Statistics.
	SeriesMinLifeDuration time.Duration
	SeriesAvgLifeDuration time.Duration
	SeriesMaxLifeDuration time.Duration

	SeriesMinLifeDurationWithoutSingleSampleSeries time.Duration
	SeriesAvgLifeDurationWithoutSingleSampleSeries time.Duration
	SeriesMaxLifeDurationWithoutSingleSampleSeries time.Duration

	SeriesMinChunks int64
	SeriesAvgChunks int64
	SeriesMaxChunks int64

	TotalChunks int64

	ChunkMinDuration time.Duration
	ChunkAvgDuration time.Duration
	ChunkMaxDuration time.Duration

	ChunkMinSize int64
	ChunkAvgSize int64
	ChunkMaxSize int64

	SingleSampleSeries int64
	SingleSampleChunks int64

	LabelNamesCount        int64
	MetricLabelValuesCount int64
}

func GatherBlockHealthStats

func GatherBlockHealthStats(ctx context.Context, logger log.Logger, blockDir string, minTime, maxTime int64, checkChunkData bool) (stats HealthStats, err error)

GatherBlockHealthStats returns useful counters as well as outsider chunks (chunks outside of block time range) that helps to assess index and optionally chunk health. It considers https://github.com/prometheus/tsdb/issues/347 as something that Thanos can handle. See HealthStats.Issue347OutsideChunks for details.

func (HealthStats) AnyErr

func (i HealthStats) AnyErr() error

AnyErr returns error if stats indicates any block issue.

func (HealthStats) CriticalErr

func (i HealthStats) CriticalErr() error

CriticalErr returns error if stats indicates critical block issue, that might be solved only by manual repair procedure.

func (HealthStats) Issue347OutsideChunksErr

func (i HealthStats) Issue347OutsideChunksErr() error

Issue347OutsideChunksErr returns error if stats indicates issue347 block issue, that is repaired explicitly before compaction (on plan block).

func (HealthStats) OutOfOrderChunksErr

func (i HealthStats) OutOfOrderChunksErr() error

func (HealthStats) OutOfOrderLabelsErr

func (i HealthStats) OutOfOrderLabelsErr() error

OutOfOrderLabelsErr returns an error if the HealthStats object indicates postings without of order labels. This is corrected by Prometheus Issue #5372 and affects Prometheus versions 2.8.0 and below.

type IgnoreDeletionMarkFilter

type IgnoreDeletionMarkFilter struct {
	// contains filtered or unexported fields
}

IgnoreDeletionMarkFilter is a filter that filters out the blocks that are marked for deletion after a given delay. The delay duration is to make sure that the replacement block can be fetched before we filter out the old block. Delay is not considered when computing DeletionMarkBlocks map. Not go-routine safe.

func NewIgnoreDeletionMarkFilter

func NewIgnoreDeletionMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader, delay time.Duration, concurrency int) *IgnoreDeletionMarkFilter

NewIgnoreDeletionMarkFilter creates IgnoreDeletionMarkFilter.

func (*IgnoreDeletionMarkFilter) DeletionMarkBlocks

func (f *IgnoreDeletionMarkFilter) DeletionMarkBlocks() map[ulid.ULID]*DeletionMark

DeletionMarkBlocks returns block ids that were marked for deletion.

func (*IgnoreDeletionMarkFilter) Filter

func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*Meta, synced GaugeVec) error

Filter filters out blocks that are marked for deletion after a given delay. It also returns the blocks that can be deleted since they were uploaded delay duration before current time.

type Marker

type Marker interface {
	BlockULID() ulid.ULID
	// contains filtered or unexported methods
}

type Matchers

type Matchers []*labels.Matcher

func (*Matchers) UnmarshalYAML

func (m *Matchers) UnmarshalYAML(value *yaml.Node) (err error)

type Meta

type Meta struct {
	tsdb.BlockMeta

	Thanos ThanosMeta `json:"thanos"`
}

Meta describes the a block's meta. It wraps the known TSDB meta structure and extends it by Thanos-specific fields.

func DownloadMeta

func DownloadMeta(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) (Meta, error)

DownloadMeta downloads only meta file from bucket by block ID. TODO(bwplotka): Differentiate between network error & partial upload.

func GenerateBlockFromSpec

func GenerateBlockFromSpec(storageDir string, specs SeriesSpecs) (_ *Meta, returnErr error)

GenerateBlockFromSpec generates a TSDB block with series and chunks provided by the input specs. This utility is intended just to be used for testing. Do not use it for any production code.

func InjectThanosMeta

func InjectThanosMeta(logger log.Logger, bdir string, meta ThanosMeta, downsampledMeta *tsdb.BlockMeta) (*Meta, error)

InjectThanosMeta sets Thanos meta to the block meta JSON and saves it to the disk. NOTE: It should be used after writing any block by any Thanos component, otherwise we will miss crucial metadata.

func MockStorageBlockWithExtLabels

func MockStorageBlockWithExtLabels(t testing.TB, bucket objstore.Bucket, userID string, minT, maxT int64, externalLabels map[string]string) Meta

func ReadMeta

func ReadMeta(rc io.ReadCloser) (_ *Meta, err error)

ReadMeta reads the block meta from the given reader.

func ReadMetaFromDir

func ReadMetaFromDir(dir string) (*Meta, error)

ReadMetaFromDir reads the given meta from <dir>/meta.json.

func (Meta) BlockBytes

func (m Meta) BlockBytes() int64

BlockBytes calculates the size of all files in the block.

func (*Meta) String

func (m *Meta) String() string

func (Meta) Write

func (m Meta) Write(w io.Writer) error

Write writes the given encoded meta to writer.

func (Meta) WriteToDir

func (m Meta) WriteToDir(logger log.Logger, dir string) error

WriteToDir writes the encoded meta into <dir>/meta.json.

type MetaCache

type MetaCache struct {
	// contains filtered or unexported fields
}

MetaCache is a LRU cache for parsed *Meta objects, optionally used by *MetaFetcher. While MetaFetcher.cache is per-instance, MetaCache can be reused between different *MetaFetcher instances.

func NewMetaCache

func NewMetaCache(maxSize, minCompactionLevel, minSources int) *MetaCache

NewMetaCache creates new *MetaCache with given max size, and parameters for storing *Meta objects. Only *Meta objects with specified minimum compaction level and number of sources are stored into the cache.

func (*MetaCache) Get

func (mc *MetaCache) Get(id ulid.ULID) *Meta

func (*MetaCache) MaxSize

func (mc *MetaCache) MaxSize() int

func (*MetaCache) Put

func (mc *MetaCache) Put(meta *Meta)

func (*MetaCache) Stats

func (mc *MetaCache) Stats() (items int, bytesSize int64, hits, misses int)

type MetaFetcher

type MetaFetcher struct {
	// contains filtered or unexported fields
}

MetaFetcher is a struct that synchronizes filtered metadata of all block in the object storage with the local state. Go-routine safe.

func NewMetaFetcher

func NewMetaFetcher(logger log.Logger, concurrency int, bkt objstore.InstrumentedBucketReader, dir string, reg prometheus.Registerer, filters []MetadataFilter, metaCache *MetaCache) (*MetaFetcher, error)

NewMetaFetcher returns a MetaFetcher.

func (*MetaFetcher) Fetch

func (f *MetaFetcher) Fetch(ctx context.Context) (metas map[ulid.ULID]*Meta, partials map[ulid.ULID]error, err error)

Fetch returns all block metas as well as partial blocks (blocks without or with corrupted meta file) from the bucket. It's caller responsibility to not change the returned metadata files. Maps can be modified.

Returned error indicates a failure in fetching metadata. Returned meta can be assumed as correct, with some blocks missing.

func (*MetaFetcher) FetchWithoutMarkedForDeletion

func (f *MetaFetcher) FetchWithoutMarkedForDeletion(ctx context.Context) (metas map[ulid.ULID]*Meta, partials map[ulid.ULID]error, err error)

FetchWithoutMarkedForDeletion returns all block metas as well as partial blocks (blocks without or with corrupted meta file) from the bucket. This function excludes all blocks marked for deletion (no deletion delay applied). It's caller responsibility to not change the returned metadata files. Maps can be modified.

Returned error indicates a failure in fetching metadata. Returned meta can be assumed as correct, with some blocks missing.

type MetadataFetcher

type MetadataFetcher interface {
	Fetch(ctx context.Context) (metas map[ulid.ULID]*Meta, partial map[ulid.ULID]error, err error)
}

type MetadataFilter

type MetadataFilter interface {
	Filter(ctx context.Context, metas map[ulid.ULID]*Meta, synced GaugeVec) error
}

MetadataFilter allows filtering or modifying metas from the provided map or returns error.

type NoCompactMark

type NoCompactMark struct {
	// ID of the tsdb block.
	ID ulid.ULID `json:"id"`
	// Version of the file.
	Version int `json:"version"`
	// Details is a human readable string giving details of reason.
	Details string `json:"details,omitempty"`

	// NoCompactTime is a unix timestamp of when the block was marked for no compact.
	NoCompactTime int64           `json:"no_compact_time"`
	Reason        NoCompactReason `json:"reason"`
}

NoCompactMark marker stores reason of block being excluded from compaction if needed.

func MockNoCompactMark

func MockNoCompactMark(t testing.TB, bucket objstore.Bucket, userID string, meta tsdb.BlockMeta) *NoCompactMark

func (NoCompactMark) BlockULID

func (n NoCompactMark) BlockULID() ulid.ULID

type NoCompactReason

type NoCompactReason string

NoCompactReason is a reason for a block to be excluded from compaction.

type SeriesSpec

type SeriesSpec struct {
	Labels labels.Labels
	Chunks []chunks.Meta
}

type SeriesSpecs

type SeriesSpecs []*SeriesSpec

func (SeriesSpecs) MaxTime

func (s SeriesSpecs) MaxTime() int64

func (SeriesSpecs) MinTime

func (s SeriesSpecs) MinTime() int64

type SourceType

type SourceType string
const (
	ReceiveSource         SourceType = "receive"
	CompactorSource       SourceType = "compactor"
	CompactorRepairSource SourceType = "compactor.repair"
	BucketRepairSource    SourceType = "bucket.repair"
	BlockBuilderSource    SourceType = "block-builder"
	SplitBlocksSource     SourceType = "split-blocks"
	TestSource            SourceType = "test"
)

type ThanosDownsample

type ThanosDownsample struct {
	Resolution int64 `json:"resolution"`
}

type ThanosMeta

type ThanosMeta struct {
	// Version of Thanos meta file. If none specified, 1 is assumed (since first version did not have explicit version specified).
	Version int `json:"version,omitempty"`

	// Labels are the external labels identifying the producer as well as tenant.
	// See https://thanos.io/tip/thanos/storage.md#external-labels for details.
	Labels     map[string]string `json:"labels"`
	Downsample ThanosDownsample  `json:"downsample"`

	// Source is a real upload source of the block.
	Source SourceType `json:"source"`

	// List of segment files (in chunks directory), in sorted order. Optional.
	// Deprecated. Use Files instead.
	SegmentFiles []string `json:"segment_files,omitempty"`

	// File is a sorted (by rel path) list of all files in block directory of this block known to TSDB.
	// Sorted by relative path.
	// Useful to avoid API call to get size of each file, as well as for debugging purposes.
	// Optional, added in v0.17.0.
	Files []File `json:"files,omitempty"`
}

ThanosMeta holds block meta information specific to Thanos.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL