Documentation ¶
Index ¶
- Constants
- Variables
- func ChunksToMatrix(ctx context.Context, chunks []Chunk, from, through model.Time) (model.Matrix, error)
- func ExpectTables(ctx context.Context, client TableClient, expected []TableDesc) error
- func ProcessCacheResponse(chunks []Chunk, keys []string, bufs [][]byte) (found []Chunk, missing []Chunk, err error)
- type AutoScalingConfig
- type Bucket
- type Chunk
- type DecodeContext
- type IndexEntry
- type IndexQuery
- type MockStorage
- func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error
- func (m *MockStorage) CreateTable(_ context.Context, desc TableDesc) error
- func (m *MockStorage) DescribeTable(_ context.Context, name string) (desc TableDesc, status string, err error)
- func (m *MockStorage) GetChunks(ctx context.Context, chunkSet []Chunk) ([]Chunk, error)
- func (m *MockStorage) ListTables(_ context.Context) ([]string, error)
- func (m *MockStorage) NewWriteBatch() WriteBatch
- func (m *MockStorage) PutChunks(_ context.Context, chunks []Chunk) error
- func (m *MockStorage) QueryPages(ctx context.Context, query IndexQuery, ...) error
- func (m *MockStorage) UpdateTable(_ context.Context, _, desc TableDesc) error
- type PeriodicTableConfig
- type ReadBatch
- type Schema
- type SchemaConfig
- type StorageClient
- type Store
- type StoreConfig
- type TableClient
- type TableDesc
- type TableManager
- type Tags
- type WriteBatch
Constants ¶
const ( ErrInvalidChunkID = errs.Error("invalid chunk ID") ErrInvalidChecksum = errs.Error("invalid chunk checksum") ErrWrongMetadata = errs.Error("wrong chunk metadata") ErrMetadataLength = errs.Error("chunk metadata wrong length") )
Errors that decode can return
Variables ¶
var (
ErrNoMetricNameNotSupported = errors.New("metric name required for pre-v8 schemas")
)
Errors
Functions ¶
func ChunksToMatrix ¶
func ChunksToMatrix(ctx context.Context, chunks []Chunk, from, through model.Time) (model.Matrix, error)
ChunksToMatrix converts a set of chunks to a model.Matrix.
func ExpectTables ¶
func ExpectTables(ctx context.Context, client TableClient, expected []TableDesc) error
ExpectTables compares existing tables to an expected set of tables. Exposed for testing,
Types ¶
type AutoScalingConfig ¶
type AutoScalingConfig struct { Enabled bool RoleARN string MinCapacity int64 MaxCapacity int64 OutCooldown int64 InCooldown int64 TargetValue float64 }
AutoScalingConfig for DynamoDB tables.
func (*AutoScalingConfig) RegisterFlags ¶
func (cfg *AutoScalingConfig) RegisterFlags(argPrefix string, f *flag.FlagSet)
RegisterFlags adds the flags required to config this to the given FlagSet.
type Bucket ¶
type Bucket struct {
// contains filtered or unexported fields
}
Bucket describes a range of time with a tableName and hashKey
type Chunk ¶
type Chunk struct { // These two fields will be missing from older chunks (as will the hash). // On fetch we will initialise these fields from the DynamoDB key. Fingerprint model.Fingerprint `json:"fingerprint"` UserID string `json:"userID"` // These fields will be in all chunks, including old ones. From model.Time `json:"from"` Through model.Time `json:"through"` Metric model.Metric `json:"metric"` // The hash is not written to the external storage either. We use // crc32, Castagnoli table. See http://www.evanjones.ca/crc32c.html. // For old chunks, ChecksumSet will be false. ChecksumSet bool `json:"-"` Checksum uint32 `json:"-"` // We never use Delta encoding (the zero value), so if this entry is // missing, we default to DoubleDelta. Encoding prom_chunk.Encoding `json:"encoding"` Data prom_chunk.Chunk `json:"-"` // contains filtered or unexported fields }
Chunk contains encoded timeseries data
func NewChunk ¶
func NewChunk(userID string, fp model.Fingerprint, metric model.Metric, c prom_chunk.Chunk, from, through model.Time) Chunk
NewChunk creates a new chunk
func ParseExternalKey ¶
ParseExternalKey is used to construct a partially-populated chunk from the key in DynamoDB. This chunk can then be used to calculate the key needed to fetch the Chunk data from Memcache/S3, and then fully populate the chunk with decode().
Pre-checksums, the keys written to DynamoDB looked like `<fingerprint>:<start time>:<end time>` (aka the ID), and the key for memcache and S3 was `<user id>/<fingerprint>:<start time>:<end time>. Finger prints and times were written in base-10.
Post-checksums, externals keys become the same across DynamoDB, Memcache and S3. Numbers become hex encoded. Keys look like: `<user id>/<fingerprint>:<start time>:<end time>:<checksum>`.
func (*Chunk) Decode ¶
func (c *Chunk) Decode(decodeContext *DecodeContext, input []byte) error
Decode the chunk from the given buffer, and confirm the chunk is the one we expected.
func (*Chunk) Encode ¶
Encode writes the chunk out to a big write buffer, then calculates the checksum.
func (*Chunk) ExternalKey ¶
ExternalKey returns the key you can use to fetch this chunk from external storage. For newer chunks, this key includes a checksum.
type DecodeContext ¶
type DecodeContext struct {
// contains filtered or unexported fields
}
DecodeContext holds data that can be re-used between decodes of different chunks
func NewDecodeContext ¶
func NewDecodeContext() *DecodeContext
NewDecodeContext creates a new, blank, DecodeContext
type IndexEntry ¶
type IndexEntry struct { TableName string HashValue string // For writes, RangeValue will always be set. RangeValue []byte // New for v6 schema, label value is not written as part of the range key. Value []byte }
IndexEntry describes an entry in the chunk index
type IndexQuery ¶
type IndexQuery struct { TableName string HashValue string // One of RangeValuePrefix or RangeValueStart might be set: // - If RangeValuePrefix is not nil, must read all keys with that prefix. // - If RangeValueStart is not nil, must read all keys from there onwards. // - If neither is set, must read all keys for that row. RangeValuePrefix []byte RangeValueStart []byte // Filters for querying ValueEqual []byte }
IndexQuery describes a query for entries
type MockStorage ¶
type MockStorage struct {
// contains filtered or unexported fields
}
MockStorage is a fake in-memory StorageClient.
func (*MockStorage) BatchWrite ¶
func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error
BatchWrite implements StorageClient.
func (*MockStorage) CreateTable ¶
func (m *MockStorage) CreateTable(_ context.Context, desc TableDesc) error
CreateTable implements StorageClient.
func (*MockStorage) DescribeTable ¶
func (m *MockStorage) DescribeTable(_ context.Context, name string) (desc TableDesc, status string, err error)
DescribeTable implements StorageClient.
func (*MockStorage) ListTables ¶
func (m *MockStorage) ListTables(_ context.Context) ([]string, error)
ListTables implements StorageClient.
func (*MockStorage) NewWriteBatch ¶
func (m *MockStorage) NewWriteBatch() WriteBatch
NewWriteBatch implements StorageClient.
func (*MockStorage) PutChunks ¶
func (m *MockStorage) PutChunks(_ context.Context, chunks []Chunk) error
PutChunks implements StorageClient.
func (*MockStorage) QueryPages ¶
func (m *MockStorage) QueryPages(ctx context.Context, query IndexQuery, callback func(result ReadBatch) (shouldContinue bool)) error
QueryPages implements StorageClient.
func (*MockStorage) UpdateTable ¶
func (m *MockStorage) UpdateTable(_ context.Context, _, desc TableDesc) error
UpdateTable implements StorageClient.
type PeriodicTableConfig ¶
type PeriodicTableConfig struct { From util.DayValue Prefix string Period time.Duration Tags Tags ProvisionedWriteThroughput int64 ProvisionedReadThroughput int64 InactiveWriteThroughput int64 InactiveReadThroughput int64 WriteScale AutoScalingConfig InactiveWriteScale AutoScalingConfig InactiveWriteScaleLastN int64 // contains filtered or unexported fields }
PeriodicTableConfig is configuration for a set of time-sharded tables.
func (*PeriodicTableConfig) GetTags ¶
func (cfg *PeriodicTableConfig) GetTags() Tags
GetTags returns tags for the table. Exists to provide backwards compatibility for the command-line.
func (*PeriodicTableConfig) RegisterFlags ¶
func (cfg *PeriodicTableConfig) RegisterFlags(argPrefix, tablePrefix string, f *flag.FlagSet)
RegisterFlags adds the flags required to config this to the given FlagSet.
type Schema ¶
type Schema interface { // When doing a write, use this method to return the list of entries you should write to. GetWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) // When doing a read, use these methods to return the list of entries you should query GetReadQueries(from, through model.Time, userID string) ([]IndexQuery, error) GetReadQueriesForMetric(from, through model.Time, userID string, metricName model.LabelValue) ([]IndexQuery, error) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) }
Schema interface defines methods to calculate the hash and range keys needed to write or read chunks from the external index.
type SchemaConfig ¶
type SchemaConfig struct { // After midnight on this day, we start bucketing indexes by day instead of by // hour. Only the day matters, not the time within the day. DailyBucketsFrom util.DayValue Base64ValuesFrom util.DayValue V4SchemaFrom util.DayValue V5SchemaFrom util.DayValue V6SchemaFrom util.DayValue V7SchemaFrom util.DayValue V8SchemaFrom util.DayValue // Master 'off-switch' for table capacity updates, e.g. when troubleshooting ThroughputUpdatesDisabled bool // Period with which the table manager will poll for tables. DynamoDBPollInterval time.Duration // duration a table will be created before it is needed. CreationGracePeriod time.Duration // Config for the index & chunk tables. OriginalTableName string UsePeriodicTables bool IndexTables PeriodicTableConfig ChunkTables PeriodicTableConfig // Deprecated configuration for setting tags on all tables. Tags Tags }
SchemaConfig contains the config for our chunk index schemas
func (*SchemaConfig) RegisterFlags ¶
func (cfg *SchemaConfig) RegisterFlags(f *flag.FlagSet)
RegisterFlags adds the flags required to config this to the given FlagSet.
type StorageClient ¶
type StorageClient interface { // For the write path. NewWriteBatch() WriteBatch BatchWrite(context.Context, WriteBatch) error // For the read path. QueryPages(ctx context.Context, query IndexQuery, callback func(result ReadBatch) (shouldContinue bool)) error // For storing and retrieving chunks. PutChunks(ctx context.Context, chunks []Chunk) error GetChunks(ctx context.Context, chunks []Chunk) ([]Chunk, error) }
StorageClient is a client for the persistent storage for Cortex. (e.g. DynamoDB + S3).
type Store ¶
type Store struct {
// contains filtered or unexported fields
}
Store implements Store
func NewStore ¶
func NewStore(cfg StoreConfig, schemaCfg SchemaConfig, storage StorageClient) (*Store, error)
NewStore makes a new ChunkStore
func (*Store) Get ¶
func (c *Store) Get(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error)
Get implements ChunkStore
type StoreConfig ¶
type StoreConfig struct { CacheConfig cache.Config MinChunkAge time.Duration QueryChunkLimit int // contains filtered or unexported fields }
StoreConfig specifies config for a ChunkStore
func (*StoreConfig) RegisterFlags ¶
func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet)
RegisterFlags adds the flags required to config this to the given FlagSet
type TableClient ¶
type TableClient interface { ListTables(ctx context.Context) ([]string, error) CreateTable(ctx context.Context, desc TableDesc) error DescribeTable(ctx context.Context, name string) (desc TableDesc, status string, err error) UpdateTable(ctx context.Context, current, expected TableDesc) error }
TableClient is a client for telling Dynamo what to do with tables.
type TableDesc ¶
type TableDesc struct { Name string ProvisionedRead int64 ProvisionedWrite int64 Tags Tags WriteScale AutoScalingConfig }
TableDesc describes a table.
type TableManager ¶
type TableManager struct {
// contains filtered or unexported fields
}
TableManager creates and manages the provisioned throughput on DynamoDB tables
func NewTableManager ¶
func NewTableManager(cfg SchemaConfig, maxChunkAge time.Duration, tableClient TableClient) (*TableManager, error)
NewTableManager makes a new TableManager
func (*TableManager) SyncTables ¶
func (m *TableManager) SyncTables(ctx context.Context) error
SyncTables will calculate the tables expected to exist, create those that do not and update those that need it. It is exposed for testing.
type Tags ¶
Tags is a string-string map that implements flag.Value.
type WriteBatch ¶
WriteBatch represents a batch of writes.