Documentation ¶
Index ¶
- func IsBlockOverlapping(b chunkenc.Block, with *LazyChunk, direction logproto.Direction) bool
- func NewBucketClient(storageConfig Config) (index.BucketClient, error)
- func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, ...) (client.Client, error)
- func NewIndexClient(name string, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, ...) (index.Client, error)
- func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error)
- func NewTableClient(name string, cfg Config, cm ClientMetrics, registerer prometheus.Registerer) (index.TableClient, error)
- func ResetBoltDBIndexClientWithShipper()
- type AsyncStore
- type AsyncStoreCfg
- type ChunkMetrics
- type ClientMetrics
- type Config
- type IngesterQuerier
- type LazyChunk
- func (c *LazyChunk) IsOverlapping(with *LazyChunk, direction logproto.Direction) bool
- func (c *LazyChunk) Iterator(ctx context.Context, from, through time.Time, direction logproto.Direction, ...) (iter.EntryIterator, error)
- func (c *LazyChunk) SampleIterator(ctx context.Context, from, through time.Time, ...) (iter.SampleIterator, error)
- type Store
- type StoreLimits
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func IsBlockOverlapping ¶
func NewBucketClient ¶
func NewBucketClient(storageConfig Config) (index.BucketClient, error)
NewBucketClient makes a new bucket client based on the configuration.
func NewChunkClient ¶
func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, clientMetrics ClientMetrics, registerer prometheus.Registerer) (client.Client, error)
NewChunkClient makes a new chunk.Client of the desired types.
func NewIndexClient ¶
func NewIndexClient(name string, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, ownsTenantFn downloads.IndexGatewayOwnsTenant, registerer prometheus.Registerer) (index.Client, error)
NewIndexClient makes a new index client of the desired type.
func NewObjectClient ¶
func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error)
NewObjectClient makes a new StorageClient of the desired types.
func NewTableClient ¶
func NewTableClient(name string, cfg Config, cm ClientMetrics, registerer prometheus.Registerer) (index.TableClient, error)
NewTableClient makes a new table client based on the configuration.
func ResetBoltDBIndexClientWithShipper ¶
func ResetBoltDBIndexClientWithShipper()
ResetBoltDBIndexClientWithShipper allows to reset the singleton. MUST ONLY BE USED IN TESTS
Types ¶
type AsyncStore ¶
AsyncStore does querying to both ingesters and chunk store and combines the results after deduping them. This should be used when using an async store like boltdb-shipper. AsyncStore is meant to be used only in queriers or any other service other than ingesters. It should never be used in ingesters otherwise it would start spiraling around doing queries over and over again to other ingesters.
func NewAsyncStore ¶
func NewAsyncStore(cfg AsyncStoreCfg, store stores.Store, scfg config.SchemaConfig) *AsyncStore
func (*AsyncStore) GetChunkRefs ¶
type AsyncStoreCfg ¶
type AsyncStoreCfg struct { IngesterQuerier IngesterQuerier // QueryIngestersWithin defines maximum lookback beyond which ingesters are not queried for chunk ids. QueryIngestersWithin time.Duration }
type ChunkMetrics ¶
type ChunkMetrics struct {
// contains filtered or unexported fields
}
func NewChunkMetrics ¶
func NewChunkMetrics(r prometheus.Registerer, maxBatchSize int) *ChunkMetrics
type ClientMetrics ¶
type ClientMetrics struct {
AzureMetrics azure.BlobStorageMetrics
}
func NewClientMetrics ¶
func NewClientMetrics() ClientMetrics
func (*ClientMetrics) Unregister ¶
func (c *ClientMetrics) Unregister()
type Config ¶
type Config struct { AWSStorageConfig aws.StorageConfig `yaml:"aws"` AzureStorageConfig azure.BlobStorageConfig `yaml:"azure"` BOSStorageConfig baidubce.BOSStorageConfig `yaml:"bos"` GCPStorageConfig gcp.Config `yaml:"bigtable"` GCSConfig gcp.GCSConfig `yaml:"gcs"` CassandraStorageConfig cassandra.Config `yaml:"cassandra"` BoltDBConfig local.BoltDBConfig `yaml:"boltdb"` FSConfig local.FSConfig `yaml:"filesystem"` Swift openstack.SwiftConfig `yaml:"swift"` GrpcConfig grpc.Config `yaml:"grpc_store"` Hedging hedging.Config `yaml:"hedging"` IndexCacheValidity time.Duration `yaml:"index_cache_validity"` IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config"` DisableBroadIndexQueries bool `yaml:"disable_broad_index_queries"` MaxParallelGetChunk int `yaml:"max_parallel_get_chunk"` MaxChunkBatchSize int `yaml:"max_chunk_batch_size"` BoltDBShipperConfig shipper.Config `yaml:"boltdb_shipper"` TSDBShipperConfig indexshipper.Config `yaml:"tsdb_shipper"` // Config for using AsyncStore when using async index stores like `boltdb-shipper`. // It is required for getting chunk ids of recently flushed chunks from the ingesters. EnableAsyncStore bool `yaml:"-"` AsyncStoreConfig AsyncStoreCfg `yaml:"-"` }
Config chooses which storage client to use.
func (*Config) RegisterFlags ¶
RegisterFlags adds the flags required to configure this flag set.
type IngesterQuerier ¶
type LazyChunk ¶
type LazyChunk struct { Chunk chunk.Chunk IsValid bool Fetcher *fetcher.Fetcher // contains filtered or unexported fields }
LazyChunk loads the chunk when it is accessed.
func (*LazyChunk) IsOverlapping ¶
func (*LazyChunk) Iterator ¶
func (c *LazyChunk) Iterator( ctx context.Context, from, through time.Time, direction logproto.Direction, pipeline log.StreamPipeline, nextChunk *LazyChunk, ) (iter.EntryIterator, error)
Iterator returns an entry iterator. The iterator returned will cache overlapping block's entries with the next chunk if passed. This way when we re-use them for ordering across batches we don't re-decompress the data again.
func (*LazyChunk) SampleIterator ¶
func (c *LazyChunk) SampleIterator( ctx context.Context, from, through time.Time, extractor log.StreamSampleExtractor, nextChunk *LazyChunk, ) (iter.SampleIterator, error)
SampleIterator returns an sample iterator. The iterator returned will cache overlapping block's entries with the next chunk if passed. This way when we re-use them for ordering across batches we don't re-decompress the data again.
type Store ¶
type Store interface { stores.Store SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) Series(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) GetSchemaConfigs() []config.PeriodConfig SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) }
Store is the Loki chunk store to retrieve and save chunks.
func NewStore ¶
func NewStore(cfg Config, storeCfg config.ChunkStoreConfig, schemaCfg config.SchemaConfig, limits StoreLimits, clientMetrics ClientMetrics, registerer prometheus.Registerer, logger log.Logger, ) (Store, error)
NewStore creates a new Loki Store using configuration supplied.