block

package
v1.9.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 17, 2024 License: AGPL-3.0 Imports: 35 Imported by: 0

Documentation

Index

Constants

View Source
const (
	IndexFilename = "index.tsdb"
	ParquetSuffix = ".parquet"

	HostnameLabel = "__hostname__"
)
View Source
const (
	CorruptedMeta = "corrupted-meta-json"
	NoMeta        = "no-meta-json"
	LoadedMeta    = "loaded"
	FailedMeta    = "failed"

	// Blocks that are marked for deletion can be loaded as well. This is done to make sure that we load blocks that are meant to be deleted,
	// but don't have a replacement block yet.
	MarkedForDeletionMeta = "marked-for-deletion"

	// MarkedForNoCompactionMeta is label for blocks which are loaded but also marked for no compaction. This label is also counted in `loaded` label metric.
	MarkedForNoCompactionMeta = "marked-for-no-compact"
)
View Source
const (
	// DeletionMarkFilename is the known json filename for optional file storing details about when block is marked for deletion.
	// If such file is present in block dir, it means the block is meant to be deleted after certain delay.
	DeletionMarkFilename = "deletion-mark.json"
	// NoCompactMarkFilename is the known json filename for optional file storing details about why block has to be excluded from compaction.
	// If such file is present in block dir, it means the block has to excluded from compaction (both vertical and horizontal) or rewrite (e.g deletions).
	NoCompactMarkFilename = "no-compact-mark.json"

	// DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos.
	DeletionMarkVersion1 = 1
	// NoCompactMarkVersion1 is the version of no-compact-mark file supported by Thanos.
	NoCompactMarkVersion1 = 1
)
View Source
const (
	// ManualNoCompactReason is a custom reason of excluding from compaction that should be added when no-compact mark is added for unknown/user specified reason.
	ManualNoCompactReason NoCompactReason = "manual"
	// IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424)
	// This reason can be ignored when vertical block sharding will be implemented.
	IndexSizeExceedingNoCompactReason = "index-size-exceeding"
	// OutOfOrderChunksNoCompactReason is a reason of to no compact block with index contains out of order chunk so that the compaction is not blocked.
	OutOfOrderChunksNoCompactReason = "block-index-out-of-order-chunk"
)
View Source
const (
	// Version1 is a enumeration of Pyroscope section of TSDB meta supported by Pyroscope.
	MetaVersion1 = MetaVersion(1)

	// MetaVersion2 indicates the block format version.
	// https://github.com/grafana/phlare/pull/767.
	//  1. In this version we introduced symdb:
	//     - stacktraces.parquet table has been deprecated.
	//     - StacktracePartition column added to profiles.parquet table.
	//     - symdb is stored in ./symbols sub-directory.
	//  2. TotalValue column added to profiles.parquet table.
	//  3. pprof labels discarded and never stored in the block.
	MetaVersion2 = MetaVersion(2)

	// MetaVersion3 indicates the block format version.
	// https://github.com/grafana/pyroscope/pull/2196.
	//  1. Introduction of symdb v2:
	//     - locations, functions, mappings, strings parquet tables
	//       moved to ./symbols sub-directory (symdb) and partitioned
	//       by StacktracePartition. References to the partitions
	//       are stored in the index.symdb file.
	//  2. In this version, parquet tables are never loaded into
	//     memory entirely. Instead, each partition (row range) is read
	//     from the block on demand at query time.
	MetaVersion3 = MetaVersion(3)
)
View Source
const BlockIDLabel = "__block_id"

BlockIDLabel is a special label that will have an ULID of the meta.json being referenced to.

View Source
const (
	MarkersPathname = "markers"
)
View Source
const (
	MetaFilename = "meta.json"
)

Variables

View Source
var (
	ErrorSyncMetaNotFound  = errors.New("meta.json not found")
	ErrorSyncMetaCorrupted = errors.New("meta.json corrupted")
)
View Source
var (
	// ErrorMarkerNotFound is the error when marker file is not found.
	ErrorMarkerNotFound = errors.New("marker not found")
	// ErrorUnmarshalMarker is the error when unmarshalling marker JSON file.
	// This error can occur because marker has been partially uploaded to block storage
	// or the marker file is not a valid json file.
	ErrorUnmarshalMarker = errors.New("unmarshal marker JSON")
)

Functions

func BucketWithGlobalMarkers added in v1.1.0

func BucketWithGlobalMarkers(b objstore.Bucket) objstore.Bucket

BucketWithGlobalMarkers wraps the input bucket into a bucket which also keeps track of markers in the global markers location.

func Delete

func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) error

Delete removes directory that is meant to be block directory. NOTE: Always prefer this method for deleting blocks.

  • We have to delete block's files in the certain order (meta.json first and deletion-mark.json last) to ensure we don't end up with malformed partial blocks. Thanos system handles well partial blocks only if they don't have meta.json. If meta.json is present Thanos assumes valid block.
  • This avoids deleting empty dir (whole bucket) by mistake.

func DeletionMarkFilepath added in v1.1.0

func DeletionMarkFilepath(blockID ulid.ULID) string

DeletionMarkFilepath returns the path, relative to the tenant's bucket location, of a block deletion mark in the bucket markers location.

func Download added in v1.1.0

func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id ulid.ULID, dst string, options ...objstore.DownloadOption) error

Download downloads directory that is meant to be block directory.

func HashBlockID

func HashBlockID(id ulid.ULID) uint32

HashBlockID returns a 32-bit hash of the block ID useful for ring-based sharding.

func InRange

func InRange(min, max, start, end model.Time) bool

func IsBlockDir

func IsBlockDir(path string) (id ulid.ULID, ok bool)

func IsDeletionMarkFilename added in v1.1.0

func IsDeletionMarkFilename(name string) (ulid.ULID, bool)

IsDeletionMarkFilename returns whether the input filename matches the expected pattern of block deletion markers stored in the markers location.

func IsNoCompactMarkFilename added in v1.1.0

func IsNoCompactMarkFilename(name string) (ulid.ULID, bool)

IsNoCompactMarkFilename returns true if input filename matches the expected pattern of block marker stored in the markers location.

func ListBlockDeletionMarks added in v1.1.0

func ListBlockDeletionMarks(ctx context.Context, bkt objstore.BucketReader) (map[ulid.ULID]struct{}, error)

ListBlockDeletionMarks looks for block deletion marks in the global markers location and returns a map containing all blocks having a deletion mark and their location in the bucket.

func ListBlocks

func ListBlocks(path string, ulidMinTime time.Time) (map[ulid.ULID]*Meta, error)

func MarkForDeletion added in v1.1.0

func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, details string, warnExist bool, markedForDeletion prometheus.Counter) error

MarkForDeletion creates a file which stores information about when the block was marked for deletion.

func MarkForNoCompact added in v1.1.0

func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason NoCompactReason, details string, markedForNoCompact prometheus.Counter) error

MarkForNoCompact creates a file which marks block to be not compacted.

func NoCompactMarkFilepath added in v1.1.0

func NoCompactMarkFilepath(blockID ulid.ULID) string

NoCompactMarkFilepath returns the path, relative to the tenant's bucket location, of a no-compact block mark in the bucket markers location.

func ReadMarker added in v1.1.0

func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, marker Marker) error

ReadMarker reads the given mark file from <dir>/<marker filename>.json in bucket. ReadMarker has a one-minute timeout for completing the read against the bucket. This protects against operations that can take unbounded time.

func Upload

func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string) error

Upload uploads a TSDB block to the object storage. It verifies basic features of Thanos block.

Types

type BlockDesc added in v1.2.0

type BlockDesc struct {
	ULID    ulid.ULID  `json:"ulid"`
	MinTime model.Time `json:"minTime"`
	MaxTime model.Time `json:"maxTime"`
}

BlockDesc describes a block by ULID and time range.

type BlockMetaCompaction added in v1.2.0

type BlockMetaCompaction struct {
	// Maximum number of compaction cycles any source block has
	// gone through.
	Level int `json:"level"`
	// ULIDs of all source head blocks that went into the block.
	Sources []ulid.ULID `json:"sources,omitempty"`
	// Indicates that during compaction it resulted in a block without any samples
	// so it should be deleted on the next reloadBlocks.
	Deletable bool `json:"deletable,omitempty"`
	// Short descriptions of the direct blocks that were used to create
	// this block.
	Parents []BlockDesc `json:"parents,omitempty"`
	Failed  bool        `json:"failed,omitempty"`
	// Additional information about the compaction, for example, block created from out-of-order chunks.
	Hints []string `json:"hints,omitempty"`
}

BlockMetaCompaction holds information about compactions a block went through.

type BlockStats

type BlockStats struct {
	NumSamples  uint64 `json:"numSamples,omitempty"`
	NumSeries   uint64 `json:"numSeries,omitempty"`
	NumProfiles uint64 `json:"numProfiles,omitempty"`
}

type DeletionMark added in v1.1.0

type DeletionMark struct {
	// ID of the tsdb block.
	ID ulid.ULID `json:"id"`
	// Version of the file.
	Version int `json:"version"`
	// Details is a human readable string giving details of reason.
	Details string `json:"details,omitempty"`

	// DeletionTime is a unix timestamp of when the block was marked to be deleted.
	DeletionTime int64 `json:"deletion_time"`
}

DeletionMark stores block id and when block was marked for deletion.

type Downsample added in v1.1.0

type Downsample struct {
	Resolution int64 `json:"resolution"`
}

type FetcherMetrics added in v1.1.0

type FetcherMetrics struct {
	Syncs        prometheus.Counter
	SyncFailures prometheus.Counter
	SyncDuration prometheus.Histogram

	Synced *extprom.TxGaugeVec
}

FetcherMetrics holds metrics tracked by the metadata fetcher. This struct and its fields are exported to allow depending projects (eg. Cortex) to implement their own custom metadata fetcher while tracking compatible metrics.

func NewFetcherMetrics added in v1.1.0

func NewFetcherMetrics(reg prometheus.Registerer, syncedExtraLabels [][]string) *FetcherMetrics

func (*FetcherMetrics) ResetTx added in v1.1.0

func (s *FetcherMetrics) ResetTx()

ResetTx starts new transaction for metrics tracked by transaction GaugeVec.

func (*FetcherMetrics) Submit added in v1.1.0

func (s *FetcherMetrics) Submit()

Submit applies new values for metrics tracked by transaction GaugeVec.

type File

type File struct {
	RelPath string `json:"relPath"`
	// SizeBytes is optional (e.g meta.json does not show size).
	SizeBytes uint64 `json:"sizeBytes,omitempty"`

	// Parquet can contain some optional Parquet file info
	Parquet *ParquetFile `json:"parquet,omitempty"`
	// TSDB can contain some optional TSDB file info
	TSDB *TSDBFile `json:"tsdb,omitempty"`
}

type FileStats added in v1.6.0

type FileStats struct {
	RelPath   string
	SizeBytes uint64
}

type GaugeVec added in v1.1.0

type GaugeVec interface {
	WithLabelValues(lvs ...string) prometheus.Gauge
}

GaugeVec hides something like a Prometheus GaugeVec or an extprom.TxGaugeVec.

type IgnoreDeletionMarkFilter added in v1.1.0

type IgnoreDeletionMarkFilter struct {
	// contains filtered or unexported fields
}

IgnoreDeletionMarkFilter is a filter that filters out the blocks that are marked for deletion after a given delay. The delay duration is to make sure that the replacement block can be fetched before we filter out the old block. Delay is not considered when computing DeletionMarkBlocks map. Not go-routine safe.

func NewIgnoreDeletionMarkFilter added in v1.1.0

func NewIgnoreDeletionMarkFilter(logger log.Logger, bkt objstore.BucketReader, delay time.Duration, concurrency int) *IgnoreDeletionMarkFilter

NewIgnoreDeletionMarkFilter creates IgnoreDeletionMarkFilter.

func (*IgnoreDeletionMarkFilter) DeletionMarkBlocks added in v1.1.0

func (f *IgnoreDeletionMarkFilter) DeletionMarkBlocks() map[ulid.ULID]*DeletionMark

DeletionMarkBlocks returns block ids that were marked for deletion.

func (*IgnoreDeletionMarkFilter) Filter added in v1.1.0

func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*Meta, synced GaugeVec) error

Filter filters out blocks that are marked for deletion after a given delay. It also returns the blocks that can be deleted since they were uploaded delay duration before current time.

type Marker added in v1.1.0

type Marker interface {
	// contains filtered or unexported methods
}

type Meta

type Meta struct {
	// Unique identifier for the block and its contents. Changes on compaction.
	ULID ulid.ULID `json:"ulid"`

	// MinTime and MaxTime specify the time range all samples
	// in the block are in.
	MinTime model.Time `json:"minTime"`
	MaxTime model.Time `json:"maxTime"`

	// Stats about the contents of the block.
	Stats BlockStats `json:"stats,omitempty"`

	// File is a sorted (by rel path) list of all files in block directory of this block known to PyroscopeDB.
	// Sorted by relative path.
	Files []File `json:"files,omitempty"`

	// Information on compactions the block was created from.
	Compaction BlockMetaCompaction `json:"compaction"`

	// Version of the index format.
	Version MetaVersion `json:"version"`

	// Labels are the external labels identifying the producer as well as tenant.
	Labels map[string]string `json:"labels"`

	// Source is a real upload source of the block.
	Source SourceType `json:"source,omitempty"`

	// Downsample is a downsampling resolution of the block. 0 means no downsampling.
	Downsample `json:"downsample"`
}

func DownloadMeta

func DownloadMeta(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) (Meta, error)

DownloadMeta downloads only meta file from bucket by block ID. TODO(bwplotka): Differentiate between network error & partial upload.

func MetaFromDir

func MetaFromDir(dir string) (*Meta, int64, error)

func NewMeta

func NewMeta() *Meta

func Read

func Read(rc io.ReadCloser) (_ *Meta, err error)

Read the block meta from the given reader.

func ReadMetaFromDir added in v1.1.0

func ReadMetaFromDir(dir string) (*Meta, error)

ReadMetaFromDir reads the given meta from <dir>/meta.json.

func SortBlocks

func SortBlocks(metas map[ulid.ULID]*Meta) []*Meta

func (*Meta) BlockInfo added in v1.2.0

func (m *Meta) BlockInfo() *typesv1.BlockInfo

func (*Meta) Clone

func (m *Meta) Clone() *Meta

func (*Meta) FileByRelPath

func (m *Meta) FileByRelPath(name string) *File

func (*Meta) GetStats added in v1.6.0

func (meta *Meta) GetStats() MetaStats

func (*Meta) InRange

func (m *Meta) InRange(start, end model.Time) bool

func (*Meta) String

func (m *Meta) String() string

func (*Meta) TSDBBlockMeta

func (meta *Meta) TSDBBlockMeta() tsdb.BlockMeta

func (*Meta) WriteBlockInfo added in v1.2.0

func (m *Meta) WriteBlockInfo(info *typesv1.BlockInfo)

func (*Meta) WriteTo

func (meta *Meta) WriteTo(w io.Writer) (int64, error)

func (*Meta) WriteToFile

func (meta *Meta) WriteToFile(logger log.Logger, dir string) (int64, error)

WriteToFile writes the encoded meta into <dir>/meta.json.

type MetaFetcher added in v1.1.0

type MetaFetcher struct {
	// contains filtered or unexported fields
}

MetaFetcher is a struct that synchronizes filtered metadata of all block in the object storage with the local state. Go-routine safe.

func NewMetaFetcher added in v1.1.0

func NewMetaFetcher(logger log.Logger, concurrency int, bkt objstore.BucketReader, dir string, reg prometheus.Registerer, filters []MetadataFilter) (*MetaFetcher, error)

NewMetaFetcher returns a MetaFetcher.

func NewMetaFetcherWithMetrics added in v1.2.0

func NewMetaFetcherWithMetrics(logger log.Logger, concurrency int, bkt objstore.BucketReader, dir string, metrics *FetcherMetrics, filters []MetadataFilter) (*MetaFetcher, error)

func (*MetaFetcher) Fetch added in v1.1.0

func (f *MetaFetcher) Fetch(ctx context.Context) (metas map[ulid.ULID]*Meta, partials map[ulid.ULID]error, err error)

Fetch returns all block metas as well as partial blocks (blocks without or with corrupted meta file) from the bucket. It's caller responsibility to not change the returned metadata files. Maps can be modified.

Returned error indicates a failure in fetching metadata. Returned meta can be assumed as correct, with some blocks missing.

func (*MetaFetcher) FetchWithoutMarkedForDeletion added in v1.1.0

func (f *MetaFetcher) FetchWithoutMarkedForDeletion(ctx context.Context) (metas map[ulid.ULID]*Meta, partials map[ulid.ULID]error, err error)

FetchWithoutMarkedForDeletion returns all block metas as well as partial blocks (blocks without or with corrupted meta file) from the bucket. This function excludes all blocks for deletion (no deletion delay applied). It's caller responsibility to not change the returned metadata files. Maps can be modified.

Returned error indicates a failure in fetching metadata. Returned meta can be assumed as correct, with some blocks missing.

func (*MetaFetcher) LoadMeta added in v1.2.1

func (f *MetaFetcher) LoadMeta(ctx context.Context, id ulid.ULID) (*Meta, error)

LoadMeta returns metadata from object storage or error. It returns ErrorSyncMetaNotFound and ErrorSyncMetaCorrupted sentinel errors in those cases.

type MetaStats added in v1.6.0

type MetaStats struct {
	BlockStats
	FileStats      []FileStats
	TotalSizeBytes uint64
}

func (MetaStats) ConvertToBlockStats added in v1.6.0

func (stats MetaStats) ConvertToBlockStats() *ingestv1.BlockStats

type MetaVersion

type MetaVersion int

func (MetaVersion) IsValid added in v1.1.0

func (v MetaVersion) IsValid() bool

IsValid returns true if the version is valid.

type MetadataFetcher added in v1.1.0

type MetadataFetcher interface {
	Fetch(ctx context.Context) (metas map[ulid.ULID]*Meta, partial map[ulid.ULID]error, err error)
}

type MetadataFilter added in v1.1.0

type MetadataFilter interface {
	Filter(ctx context.Context, metas map[ulid.ULID]*Meta, synced GaugeVec) error
}

MetadataFilter allows filtering or modifying metas from the provided map or returns error.

type NoCompactMark added in v1.1.0

type NoCompactMark struct {
	// ID of the tsdb block.
	ID ulid.ULID `json:"id"`
	// Version of the file.
	Version int `json:"version"`
	// Details is a human readable string giving details of reason.
	Details string `json:"details,omitempty"`

	// NoCompactTime is a unix timestamp of when the block was marked for no compact.
	NoCompactTime int64           `json:"no_compact_time"`
	Reason        NoCompactReason `json:"reason"`
}

NoCompactMark marker stores reason of block being excluded from compaction if needed.

type NoCompactReason added in v1.1.0

type NoCompactReason string

NoCompactReason is a reason for a block to be excluded from compaction.

type ParquetFile

type ParquetFile struct {
	NumRowGroups uint64 `json:"numRowGroups,omitempty"`
	NumRows      uint64 `json:"numRows,omitempty"`
}

type SourceType

type SourceType string
const (
	UnknownSource   SourceType = ""
	IngesterSource  SourceType = "ingester"
	CompactorSource SourceType = "compactor"
)

type TSDBFile

type TSDBFile struct {
	NumSeries uint64 `json:"numSeries,omitempty"`
}

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL