Documentation ¶
Index ¶
- Constants
- Variables
- func Validate(cfg *Config, lcfg limits.Config) error
- type Config
- type ExemplarLabelsPosCache
- type InvertedLabelsCache
- type LabelInfo
- type LabelKey
- type LabelsCache
- type MetricCache
- type MetricNameCache
- func (m *MetricNameCache) Cap() int
- func (m *MetricNameCache) Evictions() uint64
- func (m *MetricNameCache) Get(schema, metric string, isExemplar bool) (model.MetricInfo, error)
- func (m *MetricNameCache) Len() int
- func (m *MetricNameCache) Set(schema, metric string, val model.MetricInfo, isExemplar bool) error
- type PositionCache
- type ResizableCache
- type SeriesCache
- type SeriesCacheImpl
Constants ¶
const ( DefaultMetricCacheSize = 10000 DefaultLabelsCacheSize = 100000 )
const DefaultExemplarKeyPosCacheSize = DefaultMetricCacheSize
Make the cache size the same as the metric size, assuming every metric has an exemplar
const DefaultInvertedLabelsCacheSize = 500000
const DefaultSeriesCacheSize = 1000000
this seems like a good default size for /active/ series. This results in Promscale using around 360MB on start.
Variables ¶
var ( SeriesCacheMaxBytesMetric = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: util.PromNamespace, Name: "series_cache_max_bytes", Help: "The target for the maximum amount of memory the series_cache can use in bytes.", }) InvertedLabelsCacheMaxBytesMetric = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: util.PromNamespace, Name: "inverted_labels_cache_max_bytes", Help: "The target for the maximum amount of memory the inverted labels cache can use in bytes.", }) )
var DefaultConfig = Config{ SeriesCacheInitialSize: DefaultSeriesCacheSize, SeriesCacheMemoryMaxBytes: 1000000, MetricsCacheSize: DefaultMetricCacheSize, LabelsCacheSize: DefaultLabelsCacheSize, ExemplarKeyPosCacheSize: DefaultExemplarKeyPosCacheSize, InvertedLabelsCacheSize: DefaultInvertedLabelsCacheSize, InvertedLabelsCacheMaxBytes: 1000000, }
Functions ¶
Types ¶
type Config ¶
type Config struct { SeriesCacheInitialSize uint64 SeriesCacheMemoryMaxBytes uint64 MetricsCacheSize uint64 LabelsCacheSize uint64 ExemplarKeyPosCacheSize uint64 InvertedLabelsCacheSize uint64 InvertedLabelsCacheMaxBytesFlag limits.PercentageAbsoluteBytesFlag InvertedLabelsCacheMaxBytes uint64 // contains filtered or unexported fields }
type ExemplarLabelsPosCache ¶
type ExemplarLabelsPosCache struct {
// contains filtered or unexported fields
}
func (*ExemplarLabelsPosCache) GetLabelPositions ¶
func (pos *ExemplarLabelsPosCache) GetLabelPositions(metric string) (map[string]int, bool)
func (*ExemplarLabelsPosCache) SetOrUpdateLabelPositions ¶
func (pos *ExemplarLabelsPosCache) SetOrUpdateLabelPositions(metric string, index map[string]int)
type InvertedLabelsCache ¶
type InvertedLabelsCache struct {
*ResizableCache
}
(metric, label key-pair) -> (label id,label position) cache Used when creating series to avoid DB calls for labels Each label position is unique for a specific metric, meaning that one label can have different position for different metrics
func NewInvertedLabelsCache ¶
func NewInvertedLabelsCache(config Config, sigClose chan struct{}) *InvertedLabelsCache
Cache is thread-safe
func (*InvertedLabelsCache) GetLabelsId ¶
func (c *InvertedLabelsCache) GetLabelsId(key LabelKey) (LabelInfo, bool)
type LabelInfo ¶
type LabelInfo struct { LabelID int32 // id of label Pos int32 // position of specific label within a specific metric. }
func NewLabelInfo ¶
type LabelsCache ¶
type LabelsCache interface { // GetValues tries to get a batch of keys and store the corresponding values is valuesOut // returns the number of keys that were actually found. // NOTE: this function does _not_ preserve the order of keys; the first numFound // keys will be the keys whose values are present, while the remainder // will be the keys not present in the cache GetValues(keys []interface{}, valuesOut []interface{}) (numFound int) // InsertBatch inserts a batch of keys with their corresponding values. // This function will _overwrite_ the keys and values slices with their // canonical versions. // returns the number of elements inserted, is lower than len(keys) if insertion // starved InsertBatch(keys []interface{}, values []interface{}, sizes []uint64) (numInserted int) // Len returns the number of labels cached in the system. Len() int // Cap returns the capacity of the labels cache. Cap() int Evictions() uint64 }
func NewLabelsCache ¶
func NewLabelsCache(config Config) LabelsCache
type MetricCache ¶
type MetricCache interface { Get(schema, metric string, isExemplar bool) (model.MetricInfo, error) Set(schema, metric string, mInfo model.MetricInfo, isExemplar bool) error // Len returns the number of metrics cached in the system. Len() int // Cap returns the capacity of the metrics cache. Cap() int Evictions() uint64 }
MetricCache provides a caching mechanism for metric table names.
type MetricNameCache ¶
type MetricNameCache struct {
Metrics *clockcache.Cache
}
MetricNameCache stores and retrieves metric table names in an in-memory cache.
func NewMetricCache ¶
func NewMetricCache(config Config) *MetricNameCache
func (*MetricNameCache) Cap ¶
func (m *MetricNameCache) Cap() int
func (*MetricNameCache) Evictions ¶
func (m *MetricNameCache) Evictions() uint64
func (*MetricNameCache) Get ¶
func (m *MetricNameCache) Get(schema, metric string, isExemplar bool) (model.MetricInfo, error)
Get fetches the table name for specified metric.
func (*MetricNameCache) Len ¶
func (m *MetricNameCache) Len() int
func (*MetricNameCache) Set ¶
func (m *MetricNameCache) Set(schema, metric string, val model.MetricInfo, isExemplar bool) error
Set stores metric info for specified metric with schema.
type PositionCache ¶
type PositionCache interface { // GetLabelPositions fetches the position of label keys (as index) that must be respected // while pushing exemplar label's values to the database. GetLabelPositions(metric string) (map[string]int, bool) // SetOrUpdateLabelPositions sets or updates the position of label (index) keys for the given metric. SetOrUpdateLabelPositions(metric string, index map[string]int) }
func NewExemplarLabelsPosCache ¶
func NewExemplarLabelsPosCache(config Config) PositionCache
NewExemplarLabelsPosCache creates a cache of map[metric_name]LabelPositions where LabelPositions is map[LabelName]LabelPosition. This means that the cache stores positions of each label's value per metric basis, which is meant to preserve and reuse _prom_catalog.exemplar_label_position table's 'pos' column.
type ResizableCache ¶
type ResizableCache struct { *clockcache.Cache // contains filtered or unexported fields }
func NewResizableCache ¶
func NewResizableCache(cache *clockcache.Cache, maxBytes uint64, sigClose <-chan struct{}) *ResizableCache
type SeriesCache ¶
type SeriesCache interface { Reset() GetSeriesFromProtos(labelPairs []prompb.Label) (series *model.Series, metricName string, err error) Len() int Cap() int Evictions() uint64 }
SeriesCache is a cache of model.Series entries.
type SeriesCacheImpl ¶
type SeriesCacheImpl struct {
*ResizableCache
}
func NewSeriesCache ¶
func NewSeriesCache(config Config, sigClose <-chan struct{}) *SeriesCacheImpl
func (*SeriesCacheImpl) GetSeriesFromLabels ¶
GetSeriesFromLabels converts a labels.Labels to a canonical model.Series object
func (*SeriesCacheImpl) GetSeriesFromProtos ¶
func (t *SeriesCacheImpl) GetSeriesFromProtos(labelPairs []prompb.Label) (*model.Series, string, error)
GetSeriesFromProtos returns a model.Series entry given a list of Prometheus prompb.Label. If the desired entry is not in the cache, a "placeholder" model.Series entry is constructed and put into the cache. It is not populated with database IDs until a later phase, see model.Series.SetSeriesID.