Documentation ¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
var ( RingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE}, nil) DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, _ prometheus.Gauge, _ *ring.Ring, _ *ring.Lifecycler, _ Limits, _ string) compact.Grouper { return compact.NewDefaultGrouper( logger, bkt, false, true, reg, blocksMarkedForDeletion, garbageCollectedBlocks, blocksMarkedForNoCompaction, metadata.NoneFunc) } ShuffleShardingGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks prometheus.Counter, remainingPlannedCompactions prometheus.Gauge, ring *ring.Ring, ringLifecycle *ring.Lifecycler, limits Limits, userID string) compact.Grouper { return NewShuffleShardingGrouper( logger, bkt, false, true, reg, blocksMarkedForDeletion, blocksMarkedForNoCompaction, garbageCollectedBlocks, remainingPlannedCompactions, metadata.NoneFunc, cfg, ring, ringLifecycle.Addr, limits, userID) } DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, PlannerFactory, error) { compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool(), nil) if err != nil { return nil, nil, err } plannerFactory := func(logger log.Logger, cfg Config, noCompactionMarkFilter *compact.GatherNoCompactionMarkFilter) compact.Planner { return compact.NewPlanner(logger, cfg.BlockRanges.ToMilliseconds(), noCompactionMarkFilter) } return compactor, plannerFactory, nil } ShuffleShardingBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, PlannerFactory, error) { compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool(), nil) if err != nil { return nil, nil, err } plannerFactory := func(logger log.Logger, cfg Config, noCompactionMarkFilter *compact.GatherNoCompactionMarkFilter) compact.Planner { return NewShuffleShardingPlanner(logger, cfg.BlockRanges.ToMilliseconds(), noCompactionMarkFilter.NoCompactMarkedBlocks) } return compactor, plannerFactory, nil } )
Functions ¶
This section is empty.
Types ¶
type BlocksCleaner ¶ added in v1.2.0
func NewBlocksCleaner ¶ added in v1.2.0
func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, usersScanner *cortex_tsdb.UsersScanner, cfgProvider ConfigProvider, logger log.Logger, reg prometheus.Registerer) *BlocksCleaner
type BlocksCleanerConfig ¶ added in v1.2.0
type BlocksCleanerConfig struct { DeletionDelay time.Duration CleanupInterval time.Duration CleanupConcurrency int BlockDeletionMarksMigrationEnabled bool // TODO Discuss whether we should remove it in Cortex 1.8.0 and document that upgrading to 1.7.0 before 1.8.0 is required. TenantCleanupDelay time.Duration // Delay before removing tenant deletion mark and "debug". }
type BlocksCompactorFactory ¶ added in v1.8.0
type BlocksCompactorFactory func( ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer, ) (compact.Compactor, PlannerFactory, error)
BlocksCompactorFactory builds and returns the compactor and planner to use to compact a tenant's blocks.
type BlocksGrouperFactory ¶ added in v1.8.0
type BlocksGrouperFactory func( ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, blocksMarkedForNoCompact prometheus.Counter, garbageCollectedBlocks prometheus.Counter, remainingPlannedCompactions prometheus.Gauge, ring *ring.Ring, ringLifecycler *ring.Lifecycler, limit Limits, userID string, ) compact.Grouper
BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks.
type Compactor ¶
Compactor is a multi-tenant TSDB blocks compactor based on Thanos.
func NewCompactor ¶
func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, cfgProvider ConfigProvider, logger log.Logger, registerer prometheus.Registerer, limits Limits) (*Compactor, error)
NewCompactor makes a new Compactor.
func (*Compactor) RingHandler ¶ added in v0.7.0
func (c *Compactor) RingHandler(w http.ResponseWriter, req *http.Request)
type Config ¶
type Config struct { BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"` BlockSyncConcurrency int `yaml:"block_sync_concurrency"` MetaSyncConcurrency int `yaml:"meta_sync_concurrency"` ConsistencyDelay time.Duration `yaml:"consistency_delay"` DataDir string `yaml:"data_dir"` CompactionInterval time.Duration `yaml:"compaction_interval"` CompactionRetries int `yaml:"compaction_retries"` CompactionConcurrency int `yaml:"compaction_concurrency"` CleanupInterval time.Duration `yaml:"cleanup_interval"` CleanupConcurrency int `yaml:"cleanup_concurrency"` DeletionDelay time.Duration `yaml:"deletion_delay"` TenantCleanupDelay time.Duration `yaml:"tenant_cleanup_delay"` SkipBlocksWithOutOfOrderChunksEnabled bool `yaml:"skip_blocks_with_out_of_order_chunks_enabled"` // Whether the migration of block deletion marks to the global markers location is enabled. BlockDeletionMarksMigrationEnabled bool `yaml:"block_deletion_marks_migration_enabled"` EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` // Compactors sharding. ShardingEnabled bool `yaml:"sharding_enabled"` ShardingStrategy string `yaml:"sharding_strategy"` ShardingRing RingConfig `yaml:"sharding_ring"` // Allow downstream projects to customise the blocks compactor. BlocksGrouperFactory BlocksGrouperFactory `yaml:"-"` BlocksCompactorFactory BlocksCompactorFactory `yaml:"-"` // contains filtered or unexported fields }
Config holds the Compactor config.
func (*Config) RegisterFlags ¶
RegisterFlags registers the Compactor flags.
type ConfigProvider ¶ added in v1.8.0
type ConfigProvider interface { bucket.TenantConfigProvider CompactorBlocksRetentionPeriod(user string) time.Duration }
ConfigProvider defines the per-tenant config provider for the Compactor.
type LabelRemoverFilter ¶ added in v1.2.0
type LabelRemoverFilter struct {
// contains filtered or unexported fields
}
func NewLabelRemoverFilter ¶ added in v1.2.0
func NewLabelRemoverFilter(labels []string) *LabelRemoverFilter
NewLabelRemoverFilter creates a LabelRemoverFilter.
type PlannerFactory ¶ added in v1.13.0
type RingConfig ¶ added in v0.7.0
type RingConfig struct { KVStore kv.Config `yaml:"kvstore"` HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` // Wait ring stability. WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` WaitStabilityMaxDuration time.Duration `yaml:"wait_stability_max_duration"` // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` InstanceInterfaceNames []string `yaml:"instance_interface_names"` InstancePort int `yaml:"instance_port" doc:"hidden"` InstanceAddr string `yaml:"instance_addr" doc:"hidden"` // Injected internally ListenPort int `yaml:"-"` WaitActiveInstanceTimeout time.Duration `yaml:"wait_active_instance_timeout"` ObservePeriod time.Duration `yaml:"-"` }
RingConfig masks the ring lifecycler config which contains many options not really required by the compactors ring. This config is used to strip down the config to the minimum, and avoid confusion to the user.
func (*RingConfig) RegisterFlags ¶ added in v0.7.0
func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet)
RegisterFlags adds the flags required to config this to the given FlagSet
func (*RingConfig) ToLifecyclerConfig ¶ added in v0.7.0
func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig
ToLifecyclerConfig returns a LifecyclerConfig based on the compactor ring config.
type ShuffleShardingGrouper ¶ added in v1.13.0
type ShuffleShardingGrouper struct {
// contains filtered or unexported fields
}
func NewShuffleShardingGrouper ¶ added in v1.13.0
func NewShuffleShardingGrouper( logger log.Logger, bkt objstore.Bucket, acceptMalformedIndex bool, enableVerticalCompaction bool, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, blocksMarkedForNoCompact prometheus.Counter, garbageCollectedBlocks prometheus.Counter, remainingPlannedCompactions prometheus.Gauge, hashFunc metadata.HashFunc, compactorCfg Config, ring ring.ReadRing, ringLifecyclerAddr string, limits Limits, userID string, ) *ShuffleShardingGrouper
type ShuffleShardingPlanner ¶ added in v1.13.0
type ShuffleShardingPlanner struct {
// contains filtered or unexported fields
}
func NewShuffleShardingPlanner ¶ added in v1.13.0
func NewShuffleShardingPlanner(logger log.Logger, ranges []int64, noCompBlocksFunc func() map[ulid.ULID]*metadata.NoCompactMark) *ShuffleShardingPlanner