Documentation ¶
Index ¶
- Constants
- func NewDefaultChunkBytesPool(maxChunkPoolBytes uint64) (pool.Bytes, error)
- func NewNoCopyScanner(b []byte, splitFunc bufio.SplitFunc) *noCopyScanner
- func RegisterStoreServer(storeSrv storepb.StoreServer) func(*grpc.Server)
- func RegisterWritableStoreServer(storeSrv storepb.WriteableStoreServer) func(*grpc.Server)
- func ScanGRPCCurlProtoStreamMessages(data []byte, atEOF bool) (advance int, token []byte, err error)
- type BucketStore
- func (s *BucketStore) Close() (err error)
- func (s *BucketStore) Info(context.Context, *storepb.InfoRequest) (*storepb.InfoResponse, error)
- func (s *BucketStore) InitialSync(ctx context.Context) error
- func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
- func (s *BucketStore) LabelSet() []labelpb.ZLabelSet
- func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
- func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) (err error)
- func (s *BucketStore) SyncBlocks(ctx context.Context) error
- func (s *BucketStore) TimeRange() (mint, maxt int64)
- type BucketStoreOption
- func WithChunkPool(chunkPool pool.Bytes) BucketStoreOption
- func WithDebugLogging() BucketStoreOption
- func WithFilterConfig(filter *FilterConfig) BucketStoreOption
- func WithIndexCache(cache storecache.IndexCache) BucketStoreOption
- func WithLogger(logger log.Logger) BucketStoreOption
- func WithQueryGate(queryGate gate.Gate) BucketStoreOption
- func WithRegistry(reg prometheus.Registerer) BucketStoreOption
- type BytesCounter
- type ChunksLimiter
- type ChunksLimiterFactory
- type Client
- type CloseDelegator
- type FilterConfig
- type InfoStoreServer
- type Limiter
- type LocalStore
- func (s *LocalStore) Close() (err error)
- func (s *LocalStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
- func (s *LocalStore) LabelNames(_ context.Context, _ *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
- func (s *LocalStore) LabelValues(_ context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
- func (s *LocalStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
- type MultiTSDBStore
- func (s *MultiTSDBStore) Info(ctx context.Context, req *storepb.InfoRequest) (*storepb.InfoResponse, error)
- func (s *MultiTSDBStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
- func (s *MultiTSDBStore) LabelSet() []labelpb.ZLabelSet
- func (s *MultiTSDBStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
- func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
- func (s *MultiTSDBStore) TimeRange() (int64, int64)
- type Part
- type Partitioner
- type PrometheusStore
- func (p *PrometheusStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
- func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
- func (p *PrometheusStore) LabelSet() []labelpb.ZLabelSet
- func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
- func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_SeriesServer) error
- func (p *PrometheusStore) Timestamps() (mint int64, maxt int64)
- type ProxyStore
- func (s *ProxyStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
- func (s *ProxyStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
- func (s *ProxyStore) LabelSet() []labelpb.ZLabelSet
- func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
- func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
- func (s *ProxyStore) TimeRange() (int64, int64)
- type ReadWriteTSDBStore
- type SeriesLimiter
- type SeriesLimiterFactory
- type TSDBReader
- type TSDBStore
- func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
- func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
- func (s *TSDBStore) LabelSet() []labelpb.ZLabelSet
- func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
- func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
- func (s *TSDBStore) TimeRange() (int64, int64)
Constants ¶
const ( // MaxSamplesPerChunk is approximately the max number of samples that we may have in any given chunk. This is needed // for precalculating the number of samples that we may have to retrieve and decode for any given query // without downloading them. Please take a look at https://github.com/prometheus/tsdb/pull/397 to know // where this number comes from. Long story short: TSDB is made in such a way, and it is made in such a way // because you barely get any improvements in compression when the number of samples is beyond this. // Take a look at Figure 6 in this whitepaper http://www.vldb.org/pvldb/vol8/p1816-teller.pdf. MaxSamplesPerChunk = 120 // EstimatedMaxChunkSize is average max of chunk size. This can be exceeded though in very rare (valid) cases. EstimatedMaxChunkSize = 16000 // CompatibilityTypeLabelName is an artificial label that Store Gateway can optionally advertise. This is required for compatibility // with pre v0.8.0 Querier. Previous Queriers was strict about duplicated external labels of all StoreAPIs that had any labels. // Now with newer Store Gateway advertising all the external labels it has access to, there was simple case where // Querier was blocking Store Gateway as duplicate with sidecar. // // Newer Queriers are not strict, no duplicated external labels check is there anymore. // Additionally newer Queriers removes/ignore this exact labels from UI and querying. // // This label name is intentionally against Prometheus label style. // TODO(bwplotka): Remove it at some point. CompatibilityTypeLabelName = "@thanos_compatibility_store_type" // DefaultPostingOffsetInMemorySampling represents default value for --store.index-header-posting-offsets-in-mem-sampling. // 32 value is chosen as it's a good balance for common setups. Sampling that is not too large (too many CPU cycles) and // not too small (too much memory). DefaultPostingOffsetInMemorySampling = 32 PartitionerMaxGapSize = 512 * 1024 )
const RemoteReadFrameLimit = 1048576
const StoreMatcherKey = ctxKey(0)
StoreMatcherKey is the context key for the store's allow list.
Variables ¶
This section is empty.
Functions ¶
func NewDefaultChunkBytesPool ¶ added in v0.26.0
NewDefaultChunkBytesPool returns a chunk bytes pool with default settings.
func NewNoCopyScanner ¶ added in v0.26.0
NewNoCopyScanner returns bufio.Scanner-like scanner that is meant to be used on already allocated byte slice (or mmapped) one. Returned tokens are shared.
func RegisterStoreServer ¶ added in v0.26.0
func RegisterStoreServer(storeSrv storepb.StoreServer) func(*grpc.Server)
func RegisterWritableStoreServer ¶ added in v0.26.0
func RegisterWritableStoreServer(storeSrv storepb.WriteableStoreServer) func(*grpc.Server)
Types ¶
type BucketStore ¶
type BucketStore struct {
// contains filtered or unexported fields
}
BucketStore implements the store API backed by a bucket. It loads all index files to local disk.
NOTE: Bucket store reencodes postings using diff+varint+snappy when storing to cache. This makes them smaller, but takes extra CPU and memory. When used with in-memory cache, memory usage should decrease overall, thanks to postings being smaller.
func NewBucketStore ¶
func NewBucketStore( bkt objstore.InstrumentedBucketReader, fetcher block.MetadataFetcher, dir string, chunksLimiterFactory ChunksLimiterFactory, seriesLimiterFactory SeriesLimiterFactory, partitioner Partitioner, blockSyncConcurrency int, enableCompatibilityLabel bool, postingOffsetsInMemSampling int, enableSeriesResponseHints bool, lazyIndexReaderEnabled bool, lazyIndexReaderIdleTimeout time.Duration, options ...BucketStoreOption, ) (*BucketStore, error)
NewBucketStore creates a new bucket backed store that implements the store API against an object store bucket. It is optimized to work against high latency backends.
func (*BucketStore) Info ¶
func (s *BucketStore) Info(context.Context, *storepb.InfoRequest) (*storepb.InfoResponse, error)
Info implements the storepb.StoreServer interface.
func (*BucketStore) InitialSync ¶
func (s *BucketStore) InitialSync(ctx context.Context) error
InitialSync perform blocking sync with extra step at the end to delete locally saved blocks that are no longer present in the bucket. The mismatch of these can only happen between restarts, so we can do that only once per startup.
func (*BucketStore) LabelNames ¶
func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
LabelNames implements the storepb.StoreServer interface.
func (*BucketStore) LabelSet ¶ added in v0.26.0
func (s *BucketStore) LabelSet() []labelpb.ZLabelSet
func (*BucketStore) LabelValues ¶
func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
LabelValues implements the storepb.StoreServer interface.
func (*BucketStore) Series ¶
func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) (err error)
Series implements the storepb.StoreServer interface.
func (*BucketStore) SyncBlocks ¶
func (s *BucketStore) SyncBlocks(ctx context.Context) error
SyncBlocks synchronizes the stores state with the Bucket bucket. It will reuse disk space as persistent cache based on s.dir param.
func (*BucketStore) TimeRange ¶
func (s *BucketStore) TimeRange() (mint, maxt int64)
TimeRange returns the minimum and maximum timestamp of data available in the store.
type BucketStoreOption ¶ added in v0.26.0
type BucketStoreOption func(s *BucketStore)
BucketStoreOption are functions that configure BucketStore.
func WithChunkPool ¶ added in v0.26.0
func WithChunkPool(chunkPool pool.Bytes) BucketStoreOption
WithChunkPool sets a pool.Bytes to use for chunks.
func WithDebugLogging ¶ added in v0.26.0
func WithDebugLogging() BucketStoreOption
WithDebugLogging enables debug logging.
func WithFilterConfig ¶ added in v0.26.0
func WithFilterConfig(filter *FilterConfig) BucketStoreOption
WithFilterConfig sets a filter which Store uses for filtering metrics based on time.
func WithIndexCache ¶ added in v0.26.0
func WithIndexCache(cache storecache.IndexCache) BucketStoreOption
WithIndexCache sets a indexCache to use instead of a noopCache.
func WithLogger ¶ added in v0.26.0
func WithLogger(logger log.Logger) BucketStoreOption
WithLogger sets the BucketStore logger to the one you pass.
func WithQueryGate ¶ added in v0.26.0
func WithQueryGate(queryGate gate.Gate) BucketStoreOption
WithQueryGate sets a queryGate to use instead of a noopGate.
func WithRegistry ¶ added in v0.26.0
func WithRegistry(reg prometheus.Registerer) BucketStoreOption
WithRegistry sets a registry that BucketStore uses to register metrics with.
type BytesCounter ¶ added in v0.26.0
type BytesCounter struct { io.ReadCloser // contains filtered or unexported fields }
func NewBytesRead ¶ added in v0.26.0
func NewBytesRead(rc io.ReadCloser) *BytesCounter
func (*BytesCounter) BytesCount ¶ added in v0.26.0
func (s *BytesCounter) BytesCount() int
type ChunksLimiter ¶ added in v0.26.0
type ChunksLimiterFactory ¶ added in v0.26.0
type ChunksLimiterFactory func(failedCounter prometheus.Counter) ChunksLimiter
ChunksLimiterFactory is used to create a new ChunksLimiter. The factory is useful for projects depending on Thanos (eg. Cortex) which have dynamic limits.
func NewChunksLimiterFactory ¶ added in v0.26.0
func NewChunksLimiterFactory(limit uint64) ChunksLimiterFactory
NewChunksLimiterFactory makes a new ChunksLimiterFactory with a static limit.
type Client ¶
type Client interface { // StoreClient to access the store. storepb.StoreClient // LabelSets that each apply to some data exposed by the backing store. LabelSets() []labels.Labels // TimeRange returns minimum and maximum time range of data in the store. TimeRange() (mint int64, maxt int64) String() string // Addr returns address of a Client. Addr() string }
Client holds meta information about a store.
type CloseDelegator ¶ added in v0.26.0
CloseDelegator allows to delegate close (releasing resources used by request to the server). This is useful when we invoke StoreAPI within another StoreAPI and results are ephemeral until copied.
type FilterConfig ¶ added in v0.26.0
type FilterConfig struct {
MinTime, MaxTime model.TimeOrDurationValue
}
FilterConfig is a configuration, which Store uses for filtering metrics based on time.
type InfoStoreServer ¶ added in v0.26.0
type InfoStoreServer interface { storepb.StoreServer LabelSet() []labelpb.ZLabelSet TimeRange() (int64, int64) }
InfoStoreServer packs a store server with extra methods in order to be able to obtain information about a particular store.
type Limiter ¶ added in v0.26.0
type Limiter struct {
// contains filtered or unexported fields
}
Limiter is a simple mechanism for checking if something has passed a certain threshold.
func NewLimiter ¶ added in v0.26.0
func NewLimiter(limit uint64, ctr prometheus.Counter) *Limiter
NewLimiter returns a new limiter with a specified limit. 0 disables the limit.
type LocalStore ¶ added in v0.26.0
type LocalStore struct {
// contains filtered or unexported fields
}
LocalStore implements the store API against single file with stream of proto-based SeriesResponses in JSON format. Inefficient implementation for quick StoreAPI view. Chunk order is exactly the same as in a given file.
func NewLocalStoreFromJSONMmappableFile ¶ added in v0.26.0
func NewLocalStoreFromJSONMmappableFile( logger log.Logger, component component.StoreAPI, extLabels labels.Labels, path string, split bufio.SplitFunc, ) (*LocalStore, error)
TODO(bwplotka): Add remote read so Prometheus users can use this. Potentially after streaming will be added https://github.com/prometheus/prometheus/issues/5926. TODO(bwplotka): Consider non mmaped version of this, as well different versions.
func (*LocalStore) Close ¶ added in v0.26.0
func (s *LocalStore) Close() (err error)
func (*LocalStore) Info ¶ added in v0.26.0
func (s *LocalStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
Info returns store information about the Prometheus instance.
func (*LocalStore) LabelNames ¶ added in v0.26.0
func (s *LocalStore) LabelNames(_ context.Context, _ *storepb.LabelNamesRequest) ( *storepb.LabelNamesResponse, error, )
LabelNames returns all known label names.
func (*LocalStore) LabelValues ¶ added in v0.26.0
func (s *LocalStore) LabelValues(_ context.Context, r *storepb.LabelValuesRequest) ( *storepb.LabelValuesResponse, error, )
LabelValues returns all known label values for a given label name.
func (*LocalStore) Series ¶ added in v0.26.0
func (s *LocalStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
Series returns all series for a requested time range and label matcher. The returned data may exceed the requested time bounds.
type MultiTSDBStore ¶ added in v0.26.0
type MultiTSDBStore struct {
// contains filtered or unexported fields
}
MultiTSDBStore implements the Store interface backed by multiple TSDBStore instances. TODO(bwplotka): Remove this and use Proxy instead. Details: https://github.com/thanos-io/thanos/issues/2864
func NewMultiTSDBStore ¶ added in v0.26.0
func NewMultiTSDBStore(logger log.Logger, _ prometheus.Registerer, component component.SourceStoreAPI, tsdbStores func() map[string]InfoStoreServer) *MultiTSDBStore
NewMultiTSDBStore creates a new MultiTSDBStore.
func (*MultiTSDBStore) Info ¶ added in v0.26.0
func (s *MultiTSDBStore) Info(ctx context.Context, req *storepb.InfoRequest) (*storepb.InfoResponse, error)
Info returns store merged information about the underlying TSDBStore instances.
func (*MultiTSDBStore) LabelNames ¶ added in v0.26.0
func (s *MultiTSDBStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
LabelNames returns all known label names constrained by the matchers.
func (*MultiTSDBStore) LabelSet ¶ added in v0.26.0
func (s *MultiTSDBStore) LabelSet() []labelpb.ZLabelSet
func (*MultiTSDBStore) LabelValues ¶ added in v0.26.0
func (s *MultiTSDBStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
LabelValues returns all known label values for a given label name.
func (*MultiTSDBStore) Series ¶ added in v0.26.0
func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
Series returns all series for a requested time range and label matcher. The returned data may exceed the requested time bounds. The data returned may have been read and merged from multiple underlying TSDBStore instances.
func (*MultiTSDBStore) TimeRange ¶ added in v0.26.0
func (s *MultiTSDBStore) TimeRange() (int64, int64)
type Partitioner ¶ added in v0.26.0
type Partitioner interface { // Partition partitions length entries into n <= length ranges that cover all // input ranges // It supports overlapping ranges. // NOTE: It expects range to be sorted by start time. Partition(length int, rng func(int) (uint64, uint64)) []Part }
func NewGapBasedPartitioner ¶ added in v0.26.0
func NewGapBasedPartitioner(maxGapSize uint64) Partitioner
type PrometheusStore ¶
type PrometheusStore struct {
// contains filtered or unexported fields
}
PrometheusStore implements the store node API on top of the Prometheus remote read API.
func NewPrometheusStore ¶
func NewPrometheusStore( logger log.Logger, reg prometheus.Registerer, client *promclient.Client, baseURL *url.URL, component component.StoreAPI, externalLabelsFn func() labels.Labels, timestamps func() (mint int64, maxt int64), promVersion func() string, ) (*PrometheusStore, error)
NewPrometheusStore returns a new PrometheusStore that uses the given HTTP client to talk to Prometheus. It attaches the provided external labels to all results. Provided external labels has to be sorted.
func (*PrometheusStore) Info ¶
func (p *PrometheusStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
Info returns store information about the Prometheus instance. NOTE(bwplotka): MaxTime & MinTime are not accurate nor adjusted dynamically. This is fine for now, but might be needed in future.
func (*PrometheusStore) LabelNames ¶
func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error)
LabelNames returns all known label names of series that match the given matchers.
func (*PrometheusStore) LabelSet ¶ added in v0.26.0
func (p *PrometheusStore) LabelSet() []labelpb.ZLabelSet
func (*PrometheusStore) LabelValues ¶
func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error)
LabelValues returns all known label values for a given label name.
func (*PrometheusStore) Series ¶
func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_SeriesServer) error
Series returns all series for a requested time range and label matcher.
func (*PrometheusStore) Timestamps ¶ added in v0.26.0
func (p *PrometheusStore) Timestamps() (mint int64, maxt int64)
type ProxyStore ¶
type ProxyStore struct {
// contains filtered or unexported fields
}
ProxyStore implements the store API that proxies request to all given underlying stores.
func NewProxyStore ¶
func NewProxyStore( logger log.Logger, reg prometheus.Registerer, stores func() []Client, component component.StoreAPI, selectorLabels labels.Labels, responseTimeout time.Duration, ) *ProxyStore
NewProxyStore returns a new ProxyStore that uses the given clients that implements storeAPI to fan-in all series to the client. Note that there is no deduplication support. Deduplication should be done on the highest level (just before PromQL).
func (*ProxyStore) Info ¶
func (s *ProxyStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
Info returns store information about the external labels this store have.
func (*ProxyStore) LabelNames ¶
func (s *ProxyStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( *storepb.LabelNamesResponse, error, )
LabelNames returns all known label names.
func (*ProxyStore) LabelSet ¶ added in v0.26.0
func (s *ProxyStore) LabelSet() []labelpb.ZLabelSet
func (*ProxyStore) LabelValues ¶
func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( *storepb.LabelValuesResponse, error, )
LabelValues returns all known label values for a given label name.
func (*ProxyStore) Series ¶
func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
Series returns all series for a requested time range and label matcher. Requested series are taken from other stores and proxied to RPC client. NOTE: Resulted data are not trimmed exactly to min and max time range.
func (*ProxyStore) TimeRange ¶ added in v0.26.0
func (s *ProxyStore) TimeRange() (int64, int64)
type ReadWriteTSDBStore ¶ added in v0.26.0
type ReadWriteTSDBStore struct { storepb.StoreServer storepb.WriteableStoreServer }
ReadWriteTSDBStore is a TSDBStore that can also be written to.
type SeriesLimiter ¶ added in v0.26.0
type SeriesLimiterFactory ¶ added in v0.26.0
type SeriesLimiterFactory func(failedCounter prometheus.Counter) SeriesLimiter
SeriesLimiterFactory is used to create a new SeriesLimiter.
func NewSeriesLimiterFactory ¶ added in v0.26.0
func NewSeriesLimiterFactory(limit uint64) SeriesLimiterFactory
NewSeriesLimiterFactory makes a new NewSeriesLimiterFactory with a static limit.
type TSDBReader ¶ added in v0.26.0
type TSDBReader interface { storage.ChunkQueryable StartTime() (int64, error) }
type TSDBStore ¶
type TSDBStore struct {
// contains filtered or unexported fields
}
TSDBStore implements the store API against a local TSDB instance. It attaches the provided external labels to all results. It only responds with raw data and does not support downsampling.
func NewTSDBStore ¶
func NewTSDBStore(logger log.Logger, db TSDBReader, component component.StoreAPI, extLset labels.Labels) *TSDBStore
NewTSDBStore creates a new TSDBStore. NOTE: Given lset has to be sorted.
func (*TSDBStore) Info ¶
func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error)
Info returns store information about the Prometheus instance.
func (*TSDBStore) LabelNames ¶
func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( *storepb.LabelNamesResponse, error, )
LabelNames returns all known label names constrained with the given matchers.
func (*TSDBStore) LabelValues ¶
func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( *storepb.LabelValuesResponse, error, )
LabelValues returns all known label values for a given label name.
func (*TSDBStore) Series ¶
func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error
Series returns all series for a requested time range and label matcher. The returned data may exceed the requested time bounds.