Documentation ¶
Index ¶
- Constants
- Variables
- func AppendSeriesEntry(dst []byte, flag uint8, id uint64, key []byte) []byte
- func AppendSeriesKey(dst []byte, name []byte, tags models.Tags) []byte
- func CompareSeriesKeys(a, b []byte) int
- func GenerateSeriesKeys(names [][]byte, tagsSlice []models.Tags) [][]byte
- func IsValidSeriesEntryFlag(flag byte) bool
- func IsValidSeriesSegmentFilename(filename string) bool
- func JoinSeriesOffset(segmentID uint16, pos uint32) int64
- func MakeTagsKey(keys []string, tags models.Tags) []byte
- func MarshalTags(tags map[string]string) []byte
- func NewFieldKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error)
- func NewMeasurementSliceIterator(names [][]byte) *measurementSliceIterator
- func NewSeriesPointIterator(indexSet IndexSet, opt query.IteratorOptions) (_ query.Iterator, err error)
- func NewSeriesQueryAdapterIterator(sfile *SeriesFile, itr SeriesIDIterator, fieldset *MeasurementFieldSet, ...) query.Iterator
- func NewShardError(id uint64, err error) error
- func NewTagKeySliceIterator(keys [][]byte) *tagKeySliceIterator
- func NewTagKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error)
- func NewTagValueSliceIterator(values [][]byte) *tagValueSliceIterator
- func ParseSeriesKey(data []byte) (name []byte, tags models.Tags)
- func ParseSeriesKeyInto(data []byte, dstTags models.Tags) ([]byte, models.Tags)
- func ParseSeriesSegmentFilename(filename string) (uint16, error)
- func ReadAllSeriesIDIterator(itr SeriesIDIterator) ([]uint64, error)
- func ReadSeriesEntry(data []byte) (flag uint8, id uint64, key []byte, sz int64)
- func ReadSeriesKey(data []byte) (key []byte)
- func ReadSeriesKeyFromSegments(a []*SeriesSegment, offset int64) []byte
- func ReadSeriesKeyLen(data []byte) (sz int, remainder []byte)
- func ReadSeriesKeyMeasurement(data []byte) (name, remainder []byte)
- func ReadSeriesKeyTag(data []byte) (key, value, remainder []byte)
- func ReadSeriesKeyTagN(data []byte) (n int, remainder []byte)
- func RegisterEngine(name string, fn NewEngineFunc)
- func RegisterIndex(name string, fn NewIndexFunc)
- func RegisteredEngines() []string
- func RegisteredIndexes() []string
- func SeriesKeySize(name []byte, tags models.Tags) int
- func SeriesKeysSize(names [][]byte, tagsSlice []models.Tags) int
- func SeriesSegmentSize(id uint16) uint32
- func SplitSeriesOffset(offset int64) (segmentID uint16, pos uint32)
- func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error
- type BooleanArray
- type BooleanArrayCursor
- type ChangeType
- type CompactionPlannerCreator
- type Config
- type Cursor
- type CursorIterator
- type CursorIterators
- type CursorRequest
- type CursorStats
- type Engine
- type EngineFormat
- type EngineOptions
- type ErrPreviousShardFail
- type Field
- type FieldChange
- type FieldChanges
- type FieldCreate
- type FileStoreObserver
- type FloatArray
- type FloatArrayCursor
- type Index
- type IndexFormat
- type IndexSet
- func (is IndexSet) Database() string
- func (is IndexSet) DedupeInmemIndexes() IndexSet
- func (is IndexSet) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error
- func (is IndexSet) HasField(measurement []byte, field string) bool
- func (is IndexSet) HasInmemIndex() bool
- func (is IndexSet) HasTagKey(name, key []byte) (bool, error)
- func (is IndexSet) HasTagValue(name, key, value []byte) (bool, error)
- func (is IndexSet) MatchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (SeriesIDIterator, error)
- func (is IndexSet) MeasurementIterator() (MeasurementIterator, error)
- func (is IndexSet) MeasurementNamesByExpr(auth query.FineAuthorizer, expr influxql.Expr) (_ [][]byte, err error)
- func (is IndexSet) MeasurementNamesByPredicate(auth query.FineAuthorizer, expr influxql.Expr) (_ [][]byte, err error)
- func (is IndexSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error)
- func (is IndexSet) MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error)
- func (is IndexSet) MeasurementSeriesKeyByExprIterator(name []byte, expr influxql.Expr, auth query.FineAuthorizer) (SeriesKeyIterator, error)
- func (is IndexSet) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error)
- func (is IndexSet) MeasurementTagKeyValuesByExpr(auth query.FineAuthorizer, name []byte, keys []string, expr influxql.Expr, ...) ([][]string, error)
- func (is IndexSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error)
- func (is IndexSet) TagKeyHasAuthorizedSeries(auth query.FineAuthorizer, name, tagKey []byte) (bool, error)
- func (is IndexSet) TagKeyIterator(name []byte) (TagKeyIterator, error)
- func (is IndexSet) TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error)
- func (is IndexSet) TagSets(sfile *SeriesFile, name []byte, opt query.IteratorOptions) ([]*query.TagSet, error)
- func (is IndexSet) TagValueIterator(name, key []byte) (TagValueIterator, error)
- func (is IndexSet) TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error)
- type IngressMetrics
- type IntegerArray
- type IntegerArrayCursor
- type KeyValue
- type KeyValues
- type LimitError
- type MeasurementFieldSet
- func (fs *MeasurementFieldSet) ApplyChanges() error
- func (fs *MeasurementFieldSet) Bytes() int
- func (fs *MeasurementFieldSet) ChangesPath() string
- func (fs *MeasurementFieldSet) Close() error
- func (fs *MeasurementFieldSet) CreateFieldsIfNotExists(name []byte) *MeasurementFields
- func (fs *MeasurementFieldSet) Delete(name string)
- func (fs *MeasurementFieldSet) DeleteWithLock(name string, fn func() error) error
- func (fs *MeasurementFieldSet) Fields(name []byte) *MeasurementFields
- func (fs *MeasurementFieldSet) FieldsByString(name string) *MeasurementFields
- func (fs *MeasurementFieldSet) IsEmpty() bool
- func (fs *MeasurementFieldSet) MeasurementNames() []string
- func (fs *MeasurementFieldSet) Save(changes FieldChanges) error
- func (fs *MeasurementFieldSet) SetMeasurementFieldSetWriter(queueLength int, logger *zap.Logger)
- func (fs *MeasurementFieldSet) WriteToFile() error
- type MeasurementFields
- func (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType) error
- func (m *MeasurementFields) Field(name string) *Field
- func (m *MeasurementFields) FieldBytes(name []byte) *Field
- func (m *MeasurementFields) FieldKeys() []string
- func (m *MeasurementFields) FieldN() int
- func (m *MeasurementFields) FieldSet() map[string]influxql.DataType
- func (m *MeasurementFields) ForEachField(fn func(name string, typ influxql.DataType) bool)
- func (m *MeasurementFields) HasField(name string) bool
- type MeasurementIterator
- type MeasurementIterators
- type MeasurementSliceIterator
- type MetricKey
- type MetricValue
- type NewEngineFunc
- type NewIndexFunc
- type PartialWriteError
- type PointBatcher
- type PointBatcherStats
- type SeriesCursor
- type SeriesCursorRequest
- type SeriesCursorRow
- type SeriesElem
- type SeriesFile
- func (f *SeriesFile) Close() (err error)
- func (f *SeriesFile) CreateSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags, tracker StatsTracker) ([]uint64, error)
- func (f *SeriesFile) DeleteSeriesID(id uint64) error
- func (f *SeriesFile) DisableCompactions()
- func (f *SeriesFile) EnableCompactions()
- func (f *SeriesFile) FileSize() (n int64, err error)
- func (f *SeriesFile) HasSeries(name []byte, tags models.Tags, buf []byte) bool
- func (f *SeriesFile) IsDeleted(id uint64) bool
- func (f *SeriesFile) Open() error
- func (f *SeriesFile) Partitions() []*SeriesPartition
- func (f *SeriesFile) Path() string
- func (f *SeriesFile) Retain() func()
- func (f *SeriesFile) Series(id uint64) ([]byte, models.Tags)
- func (f *SeriesFile) SeriesCount() uint64
- func (f *SeriesFile) SeriesID(name []byte, tags models.Tags, buf []byte) uint64
- func (f *SeriesFile) SeriesIDIterator() SeriesIDIterator
- func (f *SeriesFile) SeriesIDPartition(id uint64) *SeriesPartition
- func (f *SeriesFile) SeriesIDPartitionID(id uint64) int
- func (f *SeriesFile) SeriesKey(id uint64) []byte
- func (f *SeriesFile) SeriesKeyPartition(key []byte) *SeriesPartition
- func (f *SeriesFile) SeriesKeyPartitionID(key []byte) int
- func (f *SeriesFile) SeriesKeys(ids []uint64) [][]byte
- func (f *SeriesFile) SeriesKeysPartitionIDs(keys [][]byte) []int
- func (f *SeriesFile) SeriesPartitionPath(i int) string
- func (f *SeriesFile) Wait()
- func (f *SeriesFile) WithMaxCompactionConcurrency(maxCompactionConcurrency int)
- type SeriesIDElem
- type SeriesIDElems
- type SeriesIDIterator
- func DifferenceSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator
- func FilterUndeletedSeriesIDIterator(sfile *SeriesFile, itr SeriesIDIterator) SeriesIDIterator
- func IntersectSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator
- func MergeSeriesIDIterators(itrs ...SeriesIDIterator) SeriesIDIterator
- func UnionSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator
- type SeriesIDIterators
- type SeriesIDSet
- func (s *SeriesIDSet) Add(id uint64)
- func (s *SeriesIDSet) AddMany(ids ...uint64)
- func (s *SeriesIDSet) AddNoLock(id uint64)
- func (s *SeriesIDSet) And(other *SeriesIDSet) *SeriesIDSet
- func (s *SeriesIDSet) AndNot(other *SeriesIDSet) *SeriesIDSet
- func (s *SeriesIDSet) Bytes() int
- func (s *SeriesIDSet) Cardinality() uint64
- func (s *SeriesIDSet) Clear()
- func (s *SeriesIDSet) ClearNoLock()
- func (s *SeriesIDSet) Clone() *SeriesIDSet
- func (s *SeriesIDSet) CloneNoLock() *SeriesIDSet
- func (s *SeriesIDSet) Contains(id uint64) bool
- func (s *SeriesIDSet) ContainsNoLock(id uint64) bool
- func (s *SeriesIDSet) Diff(other *SeriesIDSet)
- func (s *SeriesIDSet) Equals(other *SeriesIDSet) bool
- func (s *SeriesIDSet) ForEach(f func(id uint64))
- func (s *SeriesIDSet) ForEachNoLock(f func(id uint64))
- func (s *SeriesIDSet) Iterator() SeriesIDSetIterable
- func (s *SeriesIDSet) Merge(others ...*SeriesIDSet)
- func (s *SeriesIDSet) MergeInPlace(other *SeriesIDSet)
- func (s *SeriesIDSet) Remove(id uint64)
- func (s *SeriesIDSet) RemoveNoLock(id uint64)
- func (s *SeriesIDSet) Slice() []uint64
- func (s *SeriesIDSet) String() string
- func (s *SeriesIDSet) UnmarshalBinary(data []byte) error
- func (s *SeriesIDSet) UnmarshalBinaryUnsafe(data []byte) error
- func (s *SeriesIDSet) WriteTo(w io.Writer) (int64, error)
- type SeriesIDSetIterable
- type SeriesIDSetIterator
- type SeriesIDSetIterators
- type SeriesIDSets
- type SeriesIDSliceIterator
- type SeriesIndex
- func (idx *SeriesIndex) Clone() *SeriesIndex
- func (idx *SeriesIndex) Close() (err error)
- func (idx *SeriesIndex) Count() uint64
- func (idx *SeriesIndex) Delete(id uint64)
- func (idx *SeriesIndex) FindIDByNameTags(segments []*SeriesSegment, name []byte, tags models.Tags, buf []byte) uint64
- func (idx *SeriesIndex) FindIDBySeriesKey(segments []*SeriesSegment, key []byte) uint64
- func (idx *SeriesIndex) FindIDListByNameTags(segments []*SeriesSegment, names [][]byte, tagsSlice []models.Tags, buf []byte) (ids []uint64, ok bool)
- func (idx *SeriesIndex) FindOffsetByID(id uint64) int64
- func (idx *SeriesIndex) InMemCount() uint64
- func (idx *SeriesIndex) Insert(key []byte, id uint64, offset int64)
- func (idx *SeriesIndex) IsDeleted(id uint64) bool
- func (idx *SeriesIndex) OnDiskCount() uint64
- func (idx *SeriesIndex) Open() (err error)
- func (idx *SeriesIndex) Recover(segments []*SeriesSegment) error
- type SeriesIndexHeader
- type SeriesIterator
- type SeriesKeyIterator
- type SeriesPartition
- func (p *SeriesPartition) AppendSeriesIDs(a []uint64) []uint64
- func (p *SeriesPartition) Close() (err error)
- func (p *SeriesPartition) Compacting() bool
- func (p *SeriesPartition) CreateSeriesListIfNotExists(keys [][]byte, keyPartitionIDs []int, ids []uint64, tracker StatsTracker) error
- func (p *SeriesPartition) DeleteSeriesID(id uint64) error
- func (p *SeriesPartition) DisableCompactions()
- func (p *SeriesPartition) EnableCompactions()
- func (p *SeriesPartition) FileSize() (n int64, err error)
- func (p *SeriesPartition) FindIDBySeriesKey(key []byte) uint64
- func (p *SeriesPartition) ID() int
- func (p *SeriesPartition) Index() *SeriesIndex
- func (p *SeriesPartition) IndexPath() string
- func (p *SeriesPartition) IsDeleted(id uint64) bool
- func (p *SeriesPartition) Open() error
- func (p *SeriesPartition) Path() string
- func (p *SeriesPartition) Segments() []*SeriesSegment
- func (p *SeriesPartition) Series(id uint64) ([]byte, models.Tags)
- func (p *SeriesPartition) SeriesCount() uint64
- func (p *SeriesPartition) SeriesKey(id uint64) []byte
- type SeriesPartitionCompactor
- type SeriesSegment
- func (s *SeriesSegment) AppendSeriesIDs(a []uint64) []uint64
- func (s *SeriesSegment) CanWrite(data []byte) bool
- func (s *SeriesSegment) Clone() *SeriesSegment
- func (s *SeriesSegment) Close() (err error)
- func (s *SeriesSegment) CloseForWrite() (err error)
- func (s *SeriesSegment) CompactToPath(path string, index *SeriesIndex) error
- func (s *SeriesSegment) Data() []byte
- func (s *SeriesSegment) Flush() error
- func (s *SeriesSegment) ForEachEntry(fn func(flag uint8, id uint64, offset int64, key []byte) error) error
- func (s *SeriesSegment) ID() uint16
- func (s *SeriesSegment) InitForWrite() (err error)
- func (s *SeriesSegment) MaxSeriesID() uint64
- func (s *SeriesSegment) Open() error
- func (s *SeriesSegment) Path() string
- func (s *SeriesSegment) Size() int64
- func (s *SeriesSegment) Slice(pos uint32) []byte
- func (s *SeriesSegment) WriteLogEntry(data []byte) (offset int64, err error)
- type SeriesSegmentHeader
- type Shard
- func (s *Shard) Backup(w io.Writer, basePath string, since time.Time) error
- func (s *Shard) Close() error
- func (s *Shard) CreateCursorIterator(ctx context.Context) (CursorIterator, error)
- func (s *Shard) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error)
- func (s *Shard) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error)
- func (s *Shard) CreateSnapshot(skipCacheOk bool) (string, error)
- func (s *Shard) Database() string
- func (s *Shard) DeleteMeasurement(name []byte) error
- func (s *Shard) DeleteSeriesRange(itr SeriesIterator, min, max int64) error
- func (s *Shard) DeleteSeriesRangeWithPredicate(itr SeriesIterator, ...) error
- func (s *Shard) Digest() (io.ReadCloser, int64, error, string)
- func (s *Shard) DiskSize() (int64, error)
- func (s *Shard) Engine() (Engine, error)
- func (s *Shard) Export(w io.Writer, basePath string, start time.Time, end time.Time) error
- func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
- func (s *Shard) ForEachMeasurementName(fn func(name []byte) error) error
- func (s *Shard) Free() error
- func (s *Shard) ID() uint64
- func (s *Shard) Import(r io.Reader, basePath string) error
- func (s *Shard) InUse() (bool, error)
- func (s *Shard) Index() (Index, error)
- func (s *Shard) IndexType() string
- func (s *Shard) IsIdle() (state bool, reason string)
- func (s *Shard) LastModified() time.Time
- func (s *Shard) MeasurementExists(name []byte) (bool, error)
- func (s *Shard) MeasurementFields(name []byte) *MeasurementFields
- func (s *Shard) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error)
- func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error)
- func (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error)
- func (s *Shard) Open() error
- func (s *Shard) Path() string
- func (s *Shard) Restore(r io.Reader, basePath string) error
- func (s *Shard) RetentionPolicy() string
- func (s *Shard) ScheduleFullCompaction() error
- func (s *Shard) SeriesFile() (*SeriesFile, error)
- func (s *Shard) SeriesN() int64
- func (s *Shard) SeriesSketches() (estimator.Sketch, estimator.Sketch, error)
- func (s *Shard) SetCompactionsEnabled(enabled bool)
- func (s *Shard) SetEnabled(enabled bool)
- func (s *Shard) SetNewReadersBlocked(blocked bool) error
- func (s *Shard) Statistics(tags map[string]string) []models.Statistic
- func (s *Shard) TagKeyCardinality(name, key []byte) int
- func (s *Shard) WithLogger(log *zap.Logger)
- func (s *Shard) WritePoints(points []models.Point, tracker StatsTracker) error
- func (s *Shard) WriteTo(w io.Writer) (int64, error)
- type ShardError
- type ShardGroup
- type ShardStatistics
- type Shards
- func (a Shards) CallType(name string, args []influxql.DataType) (influxql.DataType, error)
- func (a Shards) CreateIterator(ctx context.Context, measurement *influxql.Measurement, ...) (query.Iterator, error)
- func (a Shards) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (_ SeriesCursor, err error)
- func (a Shards) ExpandSources(sources influxql.Sources) (influxql.Sources, error)
- func (a Shards) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
- func (a Shards) FieldKeysByMeasurement(name []byte) []string
- func (a Shards) FieldKeysByPredicate(expr influxql.Expr) (map[string][]string, error)
- func (a Shards) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error)
- func (a Shards) Len() int
- func (a Shards) Less(i, j int) bool
- func (a Shards) MapType(measurement, field string) influxql.DataType
- func (a Shards) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error)
- func (a Shards) MeasurementsByRegex(re *regexp.Regexp) []string
- func (a Shards) Swap(i, j int)
- type StatsTracker
- type Store
- func (s *Store) BackupShard(id uint64, since time.Time, w io.Writer) error
- func (s *Store) Close() error
- func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error
- func (s *Store) CreateShardSnapshot(id uint64, skipCacheOk bool) (string, error)
- func (s *Store) Databases() []string
- func (s *Store) DeleteDatabase(name string) error
- func (s *Store) DeleteMeasurement(database, name string) error
- func (s *Store) DeleteRetentionPolicy(database, name string) error
- func (s *Store) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error
- func (s *Store) DeleteShard(shardID uint64) error
- func (s *Store) DiskSize() (int64, error)
- func (s *Store) ExpandSources(sources influxql.Sources) (influxql.Sources, error)
- func (s *Store) ExportShard(id uint64, start time.Time, end time.Time, w io.Writer) error
- func (s *Store) ImportShard(id uint64, r io.Reader) error
- func (s *Store) IndexBytes() int
- func (s *Store) MeasurementNames(ctx context.Context, auth query.FineAuthorizer, database string, ...) ([][]byte, error)
- func (s *Store) MeasurementSeriesCounts(database string) (measuments int, series int)
- func (s *Store) MeasurementsCardinality(ctx context.Context, database string) (int64, error)
- func (s *Store) MeasurementsSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error)
- func (s *Store) Open() error
- func (s *Store) OpenShard(sh *Shard, force bool) error
- func (s *Store) Path() string
- func (s *Store) RestoreShard(id uint64, r io.Reader) error
- func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error)
- func (s *Store) SeriesSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error)
- func (s *Store) SetShardEnabled(shardID uint64, enabled bool) error
- func (s *Store) SetShardNewReadersBlocked(shardID uint64, blocked bool) error
- func (s *Store) SetShardOpenErrorForTest(shardID uint64, err error)
- func (s *Store) Shard(id uint64) *Shard
- func (s *Store) ShardDigest(id uint64) (io.ReadCloser, int64, error)
- func (s *Store) ShardGroup(ids []uint64) ShardGroup
- func (s *Store) ShardIDs() []uint64
- func (s *Store) ShardInUse(shardID uint64) (bool, error)
- func (s *Store) ShardN() int
- func (s *Store) ShardRelativePath(id uint64) (string, error)
- func (s *Store) Shards(ids []uint64) []*Shard
- func (s *Store) Statistics(tags map[string]string) []models.Statistic
- func (s *Store) TagKeys(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, ...) ([]TagKeys, error)
- func (s *Store) TagValues(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, ...) ([]TagValues, error)
- func (s *Store) WithLogger(log *zap.Logger)
- func (s *Store) WriteToShard(writeCtx WriteContext, shardID uint64, points []models.Point) error
- type StoreStatistics
- type StringArray
- type StringArrayCursor
- type TagKeyIterator
- type TagKeyIterators
- type TagKeys
- type TagKeysSlice
- type TagValueIterator
- type TagValueIterators
- type TagValues
- type TagValuesSlice
- type UnsignedArray
- type UnsignedArrayCursor
- type WriteContext
Constants ¶
const ( // DefaultEngine is the default engine for new shards DefaultEngine = "tsm1" // DefaultIndex is the default index for new shards DefaultIndex = InmemIndexName // DefaultCacheMaxMemorySize is the maximum size a shard's cache can // reach before it starts rejecting writes. DefaultCacheMaxMemorySize = 1024 * 1024 * 1024 // 1GB // DefaultCacheSnapshotMemorySize is the size at which the engine will // snapshot the cache and write it to a TSM file, freeing up memory DefaultCacheSnapshotMemorySize = 25 * 1024 * 1024 // 25MB // DefaultCacheSnapshotWriteColdDuration is the length of time at which // the engine will snapshot the cache and write it to a new TSM file if // the shard hasn't received writes or deletes DefaultCacheSnapshotWriteColdDuration = time.Duration(10 * time.Minute) // DefaultCompactFullWriteColdDuration is the duration at which the engine // will compact all TSM files in a shard if it hasn't received a write or delete DefaultCompactFullWriteColdDuration = time.Duration(4 * time.Hour) // DefaultCompactThroughput is the rate limit in bytes per second that we // will allow TSM compactions to write to disk. Not that short bursts are allowed // to happen at a possibly larger value, set by DefaultCompactThroughputBurst. // A value of 0 here will disable compaction rate limiting DefaultCompactThroughput = 48 * 1024 * 1024 // DefaultCompactThroughputBurst is the rate limit in bytes per second that we // will allow TSM compactions to write to disk. If this is not set, the burst value // will be set to equal the normal throughput DefaultCompactThroughputBurst = 48 * 1024 * 1024 // DefaultMaxPointsPerBlock is the maximum number of points in an encoded // block in a TSM file DefaultMaxPointsPerBlock = 1000 // DefaultMaxSeriesPerDatabase is the maximum number of series a node can hold per database. // This limit only applies to the "inmem" index. DefaultMaxSeriesPerDatabase = 1000000 // DefaultMaxValuesPerTag is the maximum number of values a tag can have within a measurement. DefaultMaxValuesPerTag = 100000 // DefaultMaxConcurrentCompactions is the maximum number of concurrent full and level compactions // that can run at one time. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. DefaultMaxConcurrentCompactions = 0 // DefaultMaxIndexLogFileSize is the default threshold, in bytes, when an index // write-ahead log file will compact into an index file. DefaultMaxIndexLogFileSize = 1 * 1024 * 1024 // 1MB // DefaultMaxConcurrentDeletes is the default number of concurrent DELETE calls on a shard. DefaultMaxConcurrentDeletes = 1 // DefaultSeriesIDSetCacheSize is the default number of series ID sets to cache in the TSI index. DefaultSeriesIDSetCacheSize = 100 // DefaultSeriesFileMaxConcurrentSnapshotCompactions is the maximum number of concurrent series // partition snapshot compactions that can run at one time. // A value of 0 results in runtime.GOMAXPROCS(0). DefaultSeriesFileMaxConcurrentSnapshotCompactions = 0 )
const ( InmemIndexName = "inmem" TSI1IndexName = "tsi1" )
Available index types.
const ( SeriesIndexVersion = 1 SeriesIndexMagic = "SIDX" )
const ( SeriesIndexElemSize = 16 // offset + id SeriesIndexLoadFactor = 90 // rhh load factor SeriesIndexHeaderSize = 0 + 4 + 1 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 0 )
const ( SeriesSegmentVersion = 1 SeriesSegmentMagic = "SSEG" SeriesSegmentHeaderSize = 4 + 1 // magic + version )
const ( SeriesEntryFlagSize = 1 SeriesEntryHeaderSize = 1 + 8 // flag + id SeriesEntryInsertFlag = 0x01 SeriesEntryTombstoneFlag = 0x02 )
Series entry constants.
const ( AddMeasurementField = ChangeType(internal.ChangeType_AddMeasurementField) DeleteMeasurement = ChangeType(internal.ChangeType_DeleteMeasurement) )
const ( HintedHandoffUser = "_systemuser_hintedhandoff" UnknownUser = "_systemuser_unknown" // for when user authentication is off SelectIntoUser = "_systemuser_selectinto" CollectdUser = "_systemuser_collectd" OpenTsdbUser = "_systemuser_opentsdb" GraphiteUser = "_systemuser_graphite" UdpUser = "_systemuser_udpwriter" MonitorUser = "_systemuser_monitor" )
'Fake' user names for write loggging / metrics
const DefaultSeriesPartitionCompactThreshold = 1 << 17 // 128K
DefaultSeriesPartitionCompactThreshold is the number of series IDs to hold in the in-memory series map before compacting and rebuilding the on-disk representation.
const EOF = query.ZeroTime
EOF represents a "not found" key returned by a Cursor.
const (
FieldsChangeFile = "fields.idxl"
)
const MaxFieldValueLength = 1048576
const SeriesFileDirectory = "_series"
SeriesFileDirectory is the name of the directory containing series files for a database.
const (
// SeriesFilePartitionN is the number of partitions a series file is split into.
SeriesFilePartitionN = 8
)
const SeriesIDSize = 8
SeriesIDSize is the size in bytes of a series key ID.
Variables ¶
var ( // ErrFormatNotFound is returned when no format can be determined from a path. ErrFormatNotFound = errors.New("format not found") // ErrUnknownEngineFormat is returned when the engine format is // unknown. ErrUnknownEngineFormat is currently returned if a format // other than tsm1 is encountered. ErrUnknownEngineFormat = errors.New("unknown engine format") )
var ( ErrSeriesFileClosed = errors.New("tsdb: series file closed") ErrInvalidSeriesPartitionID = errors.New("tsdb: invalid series partition id") )
var ( ErrSeriesPartitionClosed = errors.New("tsdb: series partition closed") ErrSeriesPartitionCompactionCancelled = errors.New("tsdb: series partition compaction cancelled") )
var ( ErrInvalidSeriesSegment = errors.New("invalid series segment") ErrInvalidSeriesSegmentVersion = errors.New("invalid series segment version") ErrSeriesSegmentNotWritable = errors.New("series segment not writable") )
var ( // ErrFieldTypeConflict is returned when a new field already exists with a different type. ErrFieldTypeConflict = errors.New("field type conflict") // ErrFieldNotFound is returned when a field cannot be found. ErrFieldNotFound = errors.New("field not found") // ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID // there is no mapping for. ErrFieldUnmappedID = errors.New("field ID not mapped") // ErrEngineClosed is returned when a caller attempts indirectly to // access the shard's underlying engine. ErrEngineClosed = errors.New("engine is closed") // ErrShardDisabled is returned when a the shard is not available for // queries or writes. ErrShardDisabled = errors.New("shard is disabled") // ErrUnknownFieldsFormat is returned when the fields index file is not identifiable by // the file's magic number. ErrUnknownFieldsFormat = errors.New("unknown field index format") // ErrUnknownFieldType is returned when the type of a field cannot be determined. ErrUnknownFieldType = errors.New("unknown field type") // ErrShardNotIdle is returned when an operation requring the shard to be idle/cold is // attempted on a hot shard. ErrShardNotIdle = errors.New("shard not idle") )
var ( // ErrShardNotFound is returned when trying to get a non existing shard. ErrShardNotFound = fmt.Errorf("shard not found") // ErrStoreClosed is returned when trying to use a closed Store. ErrStoreClosed = fmt.Errorf("store is closed") // ErrShardDeletion is returned when trying to create a shard that is being deleted ErrShardDeletion = errors.New("shard is being deleted") // ErrMultipleIndexTypes is returned when trying to do deletes on a database with // multiple index types. ErrMultipleIndexTypes = errors.New("cannot delete data. DB contains shards using both inmem and tsi1 indexes. Please convert all shards to use the same index type to delete data.") )
var ErrIndexClosing = errors.New("index is closing")
ErrIndexClosing can be returned to from an Index method if the index is currently closing.
var ErrInvalidSeriesIndex = errors.New("invalid series index")
var NewInmemIndex func(name string, sfile *SeriesFile) (interface{}, error)
NewInmemIndex returns a new "inmem" index type.
Functions ¶
func AppendSeriesEntry ¶ added in v1.5.0
func AppendSeriesKey ¶ added in v1.5.0
AppendSeriesKey serializes name and tags to a byte slice. The total length is prepended as a uvarint.
func CompareSeriesKeys ¶ added in v1.5.0
func GenerateSeriesKeys ¶ added in v1.5.0
GenerateSeriesKeys generates series keys for a list of names & tags using a single large memory block.
func IsValidSeriesEntryFlag ¶ added in v1.6.1
IsValidSeriesEntryFlag returns true if flag is valid.
func IsValidSeriesSegmentFilename ¶ added in v1.5.0
IsValidSeriesSegmentFilename returns true if filename is a 4-character lowercase hexadecimal number.
func JoinSeriesOffset ¶ added in v1.5.0
JoinSeriesOffset returns an offset that combines the 2-byte segmentID and 4-byte pos.
func MakeTagsKey ¶ added in v1.3.0
MakeTagsKey converts a tag set to bytes for use as a lookup key.
func MarshalTags ¶ added in v0.9.3
MarshalTags converts a tag set to bytes for use as a lookup key.
func NewFieldKeysIterator ¶ added in v0.11.0
NewFieldKeysIterator returns an iterator that can be iterated over to retrieve field keys.
func NewMeasurementSliceIterator ¶ added in v1.5.0
func NewMeasurementSliceIterator(names [][]byte) *measurementSliceIterator
NewMeasurementSliceIterator returns an iterator over a slice of in-memory measurement names.
func NewSeriesPointIterator ¶ added in v1.5.0
func NewSeriesPointIterator(indexSet IndexSet, opt query.IteratorOptions) (_ query.Iterator, err error)
newSeriesPointIterator returns a new instance of seriesPointIterator.
func NewSeriesQueryAdapterIterator ¶ added in v1.5.0
func NewSeriesQueryAdapterIterator(sfile *SeriesFile, itr SeriesIDIterator, fieldset *MeasurementFieldSet, opt query.IteratorOptions) query.Iterator
NewSeriesQueryAdapterIterator returns a new instance of SeriesQueryAdapterIterator.
func NewShardError ¶ added in v0.11.0
NewShardError returns a new ShardError.
func NewTagKeySliceIterator ¶ added in v1.5.0
func NewTagKeySliceIterator(keys [][]byte) *tagKeySliceIterator
NewTagKeySliceIterator returns a TagKeyIterator that iterates over a slice.
func NewTagKeysIterator ¶ added in v0.11.0
NewTagKeysIterator returns a new instance of TagKeysIterator.
func NewTagValueSliceIterator ¶ added in v1.5.0
func NewTagValueSliceIterator(values [][]byte) *tagValueSliceIterator
NewTagValueSliceIterator returns a TagValueIterator that iterates over a slice.
func ParseSeriesKey ¶ added in v1.5.0
ParseSeriesKey extracts the name & tags from a series key.
func ParseSeriesKeyInto ¶ added in v1.5.5
ParseSeriesKeyInto extracts the name and tags for data, parsing the tags into dstTags, which is then returened.
The returned dstTags may have a different length and capacity.
func ParseSeriesSegmentFilename ¶ added in v1.5.0
ParseSeriesSegmentFilename returns the id represented by the hexadecimal filename.
func ReadAllSeriesIDIterator ¶ added in v1.5.0
func ReadAllSeriesIDIterator(itr SeriesIDIterator) ([]uint64, error)
ReadAllSeriesIDIterator returns all ids from the iterator.
func ReadSeriesEntry ¶ added in v1.5.0
func ReadSeriesKey ¶ added in v1.5.0
ReadSeriesKey returns the series key from the beginning of the buffer.
func ReadSeriesKeyFromSegments ¶ added in v1.5.0
func ReadSeriesKeyFromSegments(a []*SeriesSegment, offset int64) []byte
ReadSeriesKeyFromSegments returns a series key from an offset within a set of segments.
func ReadSeriesKeyLen ¶ added in v1.5.0
func ReadSeriesKeyMeasurement ¶ added in v1.5.0
func ReadSeriesKeyTag ¶ added in v1.5.0
func ReadSeriesKeyTagN ¶ added in v1.5.0
func RegisterEngine ¶ added in v0.9.3
func RegisterEngine(name string, fn NewEngineFunc)
RegisterEngine registers a storage engine initializer by name.
func RegisterIndex ¶ added in v1.3.0
func RegisterIndex(name string, fn NewIndexFunc)
RegisterIndex registers a storage index initializer by name.
func RegisteredEngines ¶ added in v0.9.5
func RegisteredEngines() []string
RegisteredEngines returns the slice of currently registered engines.
func RegisteredIndexes ¶ added in v1.3.0
func RegisteredIndexes() []string
RegisteredIndexes returns the slice of currently registered indexes.
func SeriesKeySize ¶ added in v1.5.0
SeriesKeySize returns the number of bytes required to encode a series key.
func SeriesKeysSize ¶ added in v1.5.0
SeriesKeysSize returns the number of bytes required to encode a list of name/tags.
func SeriesSegmentSize ¶ added in v1.5.0
SeriesSegmentSize returns the maximum size of the segment. The size goes up by powers of 2 starting from 4MB and reaching 256MB.
func SplitSeriesOffset ¶ added in v1.5.0
SplitSeriesOffset splits a offset into its 2-byte segmentID and 4-byte pos parts.
func ValidateFields ¶ added in v1.9.4
func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error
ValidateFields will return a PartialWriteError if:
- the point has inconsistent fields, or
- the point has fields that are too long
Types ¶
type BooleanArray ¶ added in v1.7.0
type BooleanArray = cursors.BooleanArray
func NewBooleanArrayLen ¶ added in v1.7.0
func NewBooleanArrayLen(sz int) *BooleanArray
type BooleanArrayCursor ¶ added in v1.7.0
type BooleanArrayCursor = cursors.BooleanArrayCursor
type ChangeType ¶ added in v1.11.0
type ChangeType int
type CompactionPlannerCreator ¶ added in v1.6.0
type CompactionPlannerCreator func(cfg Config) interface{}
type Config ¶
type Config struct { Dir string `toml:"dir"` Engine string `toml:"-"` Index string `toml:"index-version"` // General WAL configuration options WALDir string `toml:"wal-dir"` // WALFsyncDelay is the amount of time that a write will wait before fsyncing. A duration // greater than 0 can be used to batch up multiple fsync calls. This is useful for slower // disks or when WAL write contention is seen. A value of 0 fsyncs every write to the WAL. WALFsyncDelay toml.Duration `toml:"wal-fsync-delay"` // Enables unicode validation on series keys on write. ValidateKeys bool `toml:"validate-keys"` // When true, skips size validation on fields SkipFieldSizeValidation bool `toml:"skip-field-size-validation"` // Enables strict error handling. For example, forces SELECT INTO to err out on INF values. StrictErrorHandling bool `toml:"strict-error-handling"` // Query logging QueryLogEnabled bool `toml:"query-log-enabled"` // Compaction options for tsm1 (descriptions above with defaults) CacheMaxMemorySize toml.Size `toml:"cache-max-memory-size"` CacheSnapshotMemorySize toml.Size `toml:"cache-snapshot-memory-size"` CacheSnapshotWriteColdDuration toml.Duration `toml:"cache-snapshot-write-cold-duration"` CompactFullWriteColdDuration toml.Duration `toml:"compact-full-write-cold-duration"` CompactThroughput toml.Size `toml:"compact-throughput"` CompactThroughputBurst toml.Size `toml:"compact-throughput-burst"` // Options for ingress metrics IngressMetricByMeasurement bool `toml:"ingress-metric-by-measurement-enabled"` IngressMetricByLogin bool `toml:"ingress-metric-by-login-enabled"` // MaxSeriesPerDatabase is the maximum number of series a node can hold per database. // When this limit is exceeded, writes return a 'max series per database exceeded' error. // A value of 0 disables the limit. This limit only applies when using the "inmem" index. MaxSeriesPerDatabase int `toml:"max-series-per-database"` // MaxValuesPerTag is the maximum number of tag values a single tag key can have within // a measurement. When the limit is execeeded, writes return an error. // A value of 0 disables the limit. MaxValuesPerTag int `toml:"max-values-per-tag"` // MaxConcurrentCompactions is the maximum number of concurrent level and full compactions // that can be running at one time across all shards. Compactions scheduled to run when the // limit is reached are blocked until a running compaction completes. Snapshot compactions are // not affected by this limit. A value of 0 limits compactions to runtime.GOMAXPROCS(0). MaxConcurrentCompactions int `toml:"max-concurrent-compactions"` // MaxIndexLogFileSize is the threshold, in bytes, when an index write-ahead log file will // compact into an index file. Lower sizes will cause log files to be compacted more quickly // and result in lower heap usage at the expense of write throughput. Higher sizes will // be compacted less frequently, store more series in-memory, and provide higher write throughput. MaxIndexLogFileSize toml.Size `toml:"max-index-log-file-size"` // MaxConcurrentDeletes is the maximum number of simultaneous DELETE calls on a shard // The default is 1, which was the previous hard-coded value. MaxConcurrentDeletes int `toml:"max-concurrent-deletes"` // SeriesIDSetCacheSize is the number items that can be cached within the TSI index. TSI caching can help // with query performance when the same tag key/value predicates are commonly used on queries. // Setting series-id-set-cache-size to 0 disables the cache. SeriesIDSetCacheSize int `toml:"series-id-set-cache-size"` // SeriesFileMaxConcurrentSnapshotCompactions is the maximum number of concurrent snapshot compactions // that can be running at one time across all series partitions in a database. Snapshots scheduled // to run when the limit is reached are blocked until a running snaphsot completes. Only snapshot // compactions are affected by this limit. A value of 0 limits snapshot compactions to the lesser of // 8 (series file partition quantity) and runtime.GOMAXPROCS(0). SeriesFileMaxConcurrentSnapshotCompactions int `toml:"series-file-max-concurrent-snapshot-compactions"` TraceLoggingEnabled bool `toml:"trace-logging-enabled"` // TSMWillNeed controls whether we hint to the kernel that we intend to // page in mmap'd sections of TSM files. This setting defaults to off, as it has // been found to be problematic in some cases. It may help users who have // slow disks. TSMWillNeed bool `toml:"tsm-use-madv-willneed"` }
Config holds the configuration for the tsbd package.
func (Config) Diagnostics ¶ added in v1.3.0
func (c Config) Diagnostics() (*diagnostics.Diagnostics, error)
Diagnostics returns a diagnostics representation of a subset of the Config.
type CursorIterator ¶ added in v1.6.0
type CursorIterator = cursors.CursorIterator
type CursorIterators ¶ added in v1.6.0
type CursorIterators = cursors.CursorIterators
func CreateCursorIterators ¶ added in v1.6.0
func CreateCursorIterators(ctx context.Context, shards []*Shard) (CursorIterators, error)
type CursorRequest ¶ added in v1.4.0
type CursorRequest = cursors.CursorRequest
type CursorStats ¶ added in v1.7.2
type CursorStats = cursors.CursorStats
type Engine ¶ added in v0.9.3
type Engine interface { Open() error Close() error SetEnabled(enabled bool) SetCompactionsEnabled(enabled bool) ScheduleFullCompaction() error SetNewReadersBlocked(blocked bool) error InUse() (bool, error) WithLogger(*zap.Logger) LoadMetadataIndex(shardID uint64, index Index) error CreateSnapshot(skipCacheOk bool) (string, error) Backup(w io.Writer, basePath string, since time.Time) error Export(w io.Writer, basePath string, start time.Time, end time.Time) error Restore(r io.Reader, basePath string) error Import(r io.Reader, basePath string) error Digest() (io.ReadCloser, int64, error) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) CreateCursorIterator(ctx context.Context) (CursorIterator, error) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) WritePoints(points []models.Point, tracker StatsTracker) error CreateSeriesIfNotExists(key, name []byte, tags models.Tags, tracker StatsTracker) error CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags, tracker StatsTracker) error DeleteSeriesRange(itr SeriesIterator, min, max int64) error DeleteSeriesRangeWithPredicate(itr SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) SeriesN() int64 MeasurementExists(name []byte) (bool, error) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) MeasurementFieldSet() *MeasurementFieldSet MeasurementFields(measurement []byte) *MeasurementFields ForEachMeasurementName(fn func(name []byte) error) error DeleteMeasurement(name []byte) error HasTagKey(name, key []byte) (bool, error) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) TagKeyCardinality(name, key []byte) int // Statistics will return statistics relevant to this engine. Statistics(tags map[string]string) []models.Statistic LastModified() time.Time DiskSize() int64 IsIdle() (bool, string) Free() error io.WriterTo }
Engine represents a swappable storage engine for the shard.
func NewEngine ¶ added in v0.9.3
func NewEngine(id uint64, i Index, path string, walPath string, sfile *SeriesFile, options EngineOptions) (Engine, error)
NewEngine returns an instance of an engine based on its format. If the path does not exist then the DefaultFormat is used.
type EngineFormat ¶ added in v0.9.5
type EngineFormat int
EngineFormat represents the format for an engine.
const ( // TSM1Format is the format used by the tsm1 engine. TSM1Format EngineFormat = 2 )
type EngineOptions ¶ added in v0.9.3
type EngineOptions struct { EngineVersion string IndexVersion string InmemIndex interface{} // shared in-memory index // Limits the concurrent number of TSM files that can be loaded at once. OpenLimiter limiter.Fixed // CompactionDisabled specifies shards should not schedule compactions. // This option is intended for offline tooling. CompactionDisabled bool CompactionPlannerCreator CompactionPlannerCreator CompactionLimiter limiter.Fixed CompactionThroughputLimiter limiter.Rate WALEnabled bool MonitorDisabled bool // DatabaseFilter is a predicate controlling which databases may be opened. // If no function is set, all databases will be opened. DatabaseFilter func(database string) bool // RetentionPolicyFilter is a predicate controlling which combination of database and retention policy may be opened. // nil will allow all combinations to pass. RetentionPolicyFilter func(database, rp string) bool // ShardFilter is a predicate controlling which combination of database, retention policy and shard group may be opened. // nil will allow all combinations to pass. ShardFilter func(database, rp string, id uint64) bool Config Config SeriesIDSets SeriesIDSets OnNewEngine func(Engine) FileStoreObserver FileStoreObserver }
EngineOptions represents the options used to initialize the engine.
func NewEngineOptions ¶ added in v0.9.3
func NewEngineOptions() EngineOptions
NewEngineOptions constructs an EngineOptions object with safe default values. This should only be used in tests; production environments should read from a config file.
type ErrPreviousShardFail ¶ added in v1.10.0
type ErrPreviousShardFail struct {
// contains filtered or unexported fields
}
func (ErrPreviousShardFail) Error ¶ added in v1.10.0
func (e ErrPreviousShardFail) Error() string
func (ErrPreviousShardFail) Is ¶ added in v1.10.0
func (e ErrPreviousShardFail) Is(err error) bool
func (ErrPreviousShardFail) Unwrap ¶ added in v1.10.0
func (e ErrPreviousShardFail) Unwrap() error
type Field ¶ added in v0.9.3
type Field struct { ID uint8 `json:"id,omitempty"` Name string `json:"name,omitempty"` Type influxql.DataType `json:"type,omitempty"` }
Field represents a series field. All of the fields must be hashable.
type FieldChange ¶ added in v1.11.0
type FieldChange struct { FieldCreate ChangeType ChangeType }
type FieldChanges ¶ added in v1.11.0
type FieldChanges []*FieldChange
func MeasurementsToFieldChangeDeletions ¶ added in v1.11.0
func MeasurementsToFieldChangeDeletions(measurements []string) FieldChanges
type FieldCreate ¶ added in v0.9.3
FieldCreate holds information for a field to create on a measurement.
type FileStoreObserver ¶ added in v1.6.0
type FileStoreObserver interface { // FileFinishing is called before a file is renamed to it's final name. FileFinishing(path string) error // FileUnlinking is called before a file is unlinked. FileUnlinking(path string) error }
FileStoreObserver is passed notifications before the file store adds or deletes files. In this way, it can be sure to observe every file that is added or removed even in the presence of process death.
type FloatArray ¶ added in v1.7.0
type FloatArray = cursors.FloatArray
func NewFloatArrayLen ¶ added in v1.7.0
func NewFloatArrayLen(sz int) *FloatArray
type FloatArrayCursor ¶ added in v1.7.0
type FloatArrayCursor = cursors.FloatArrayCursor
type Index ¶ added in v1.3.0
type Index interface { Open() error Close() error WithLogger(*zap.Logger) Database() string MeasurementExists(name []byte) (bool, error) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) DropMeasurement(name []byte) error ForEachMeasurementName(fn func(name []byte) error) error InitializeSeries(keys, names [][]byte, tags []models.Tags) error CreateSeriesIfNotExists(key, name []byte, tags models.Tags, tracker StatsTracker) error CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags, tracker StatsTracker) error DropSeries(seriesID uint64, key []byte, cascade bool) error DropSeriesList(seriesID []uint64, key [][]byte, cascade bool) error DropMeasurementIfSeriesNotExist(name []byte) (bool, error) // Used to clean up series in inmem index that were dropped with a shard. DropSeriesGlobal(key []byte) error MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) SeriesN() int64 SeriesSketches() (estimator.Sketch, estimator.Sketch, error) SeriesIDSet() *SeriesIDSet HasTagKey(name, key []byte) (bool, error) HasTagValue(name, key, value []byte) (bool, error) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) TagKeyCardinality(name, key []byte) int // InfluxQL system iterators MeasurementIterator() (MeasurementIterator, error) TagKeyIterator(name []byte) (TagKeyIterator, error) TagValueIterator(name, key []byte) (TagValueIterator, error) MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) // Sets a shared fieldset from the engine. FieldSet() *MeasurementFieldSet SetFieldSet(fs *MeasurementFieldSet) // Size of the index on disk, if applicable. DiskSizeBytes() int64 // Bytes estimates the memory footprint of this Index, in bytes. Bytes() int // To be removed w/ tsi1. SetFieldName(measurement []byte, name string) Type() string // Returns a unique reference ID to the index instance. // For inmem, returns a reference to the backing Index, not ShardIndex. UniqueReferenceID() uintptr Rebuild() }
func MustOpenIndex ¶ added in v1.3.0
func MustOpenIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index
func NewIndex ¶ added in v1.3.0
func NewIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) (Index, error)
NewIndex returns an instance of an index based on its format. If the path does not exist then the DefaultFormat is used.
type IndexFormat ¶ added in v1.3.0
type IndexFormat int
IndexFormat represents the format for an index.
const ( // InMemFormat is the format used by the original in-memory shared index. InMemFormat IndexFormat = 1 // TSI1Format is the format used by the tsi1 index. TSI1Format IndexFormat = 2 )
type IndexSet ¶ added in v1.5.0
type IndexSet struct { Indexes []Index // The set of indexes comprising this IndexSet. SeriesFile *SeriesFile // The Series File associated with the db for this set. // contains filtered or unexported fields }
IndexSet represents a list of indexes, all belonging to one database.
func (IndexSet) DedupeInmemIndexes ¶ added in v1.5.0
DedupeInmemIndexes returns an index set which removes duplicate indexes. Useful because inmem indexes are shared by shards per database.
func (IndexSet) ForEachMeasurementTagKey ¶ added in v1.5.0
ForEachMeasurementTagKey iterates over all tag keys in a measurement and applies the provided function.
func (IndexSet) HasField ¶ added in v1.5.1
HasField determines if any of the field sets on the set of indexes in the IndexSet have the provided field for the provided measurement.
func (IndexSet) HasInmemIndex ¶ added in v1.6.0
HasInmemIndex returns true if any in-memory index is in use.
func (IndexSet) HasTagKey ¶ added in v1.5.0
HasTagKey returns true if the tag key exists in any index for the provided measurement.
func (IndexSet) HasTagValue ¶ added in v1.5.0
HasTagValue returns true if the tag value exists in any index for the provided measurement and tag key.
func (IndexSet) MatchTagValueSeriesIDIterator ¶ added in v1.5.0
func (is IndexSet) MatchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (SeriesIDIterator, error)
MatchTagValueSeriesIDIterator returns a series iterator for tags which match value. If matches is false, returns iterators which do not match value.
func (IndexSet) MeasurementIterator ¶ added in v1.5.0
func (is IndexSet) MeasurementIterator() (MeasurementIterator, error)
MeasurementIterator returns an iterator over all measurements in the index.
func (IndexSet) MeasurementNamesByExpr ¶ added in v1.5.0
func (is IndexSet) MeasurementNamesByExpr(auth query.FineAuthorizer, expr influxql.Expr) (_ [][]byte, err error)
MeasurementNamesByExpr returns a slice of measurement names matching the provided condition. If no condition is provided then all names are returned.
func (IndexSet) MeasurementNamesByPredicate ¶ added in v1.8.3
func (is IndexSet) MeasurementNamesByPredicate(auth query.FineAuthorizer, expr influxql.Expr) (_ [][]byte, err error)
MeasurementNamesByPredicate returns a slice of measurement names matching the provided condition. If no condition is provided then all names are returned. This behaves differently from MeasurementNamesByExpr because it will return measurements using flux predicates.
func (IndexSet) MeasurementSeriesByExprIterator ¶ added in v1.5.0
func (is IndexSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error)
MeasurementSeriesByExprIterator returns a series iterator for a measurement that is filtered by expr. If expr only contains time expressions then this call is equivalent to MeasurementSeriesIDIterator().
func (IndexSet) MeasurementSeriesIDIterator ¶ added in v1.5.0
func (is IndexSet) MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error)
MeasurementSeriesIDIterator returns an iterator over all non-tombstoned series for the provided measurement.
func (IndexSet) MeasurementSeriesKeyByExprIterator ¶ added in v1.9.0
func (is IndexSet) MeasurementSeriesKeyByExprIterator(name []byte, expr influxql.Expr, auth query.FineAuthorizer) (SeriesKeyIterator, error)
MeasurementSeriesKeyByExprIterator iterates through series, filtered by an expression on the tags. Any non-tag expressions will be filtered as if the field had the zero value.
func (IndexSet) MeasurementSeriesKeysByExpr ¶ added in v1.5.0
MeasurementSeriesKeysByExpr returns a list of series keys matching expr.
func (IndexSet) MeasurementTagKeyValuesByExpr ¶ added in v1.5.0
func (is IndexSet) MeasurementTagKeyValuesByExpr(auth query.FineAuthorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool, log *zap.Logger) ([][]string, error)
MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.
func (IndexSet) MeasurementTagKeysByExpr ¶ added in v1.5.0
func (is IndexSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error)
MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.
func (IndexSet) TagKeyHasAuthorizedSeries ¶ added in v1.5.0
func (is IndexSet) TagKeyHasAuthorizedSeries(auth query.FineAuthorizer, name, tagKey []byte) (bool, error)
TagKeyHasAuthorizedSeries determines if there exists an authorized series for the provided measurement name and tag key.
func (IndexSet) TagKeyIterator ¶ added in v1.5.0
func (is IndexSet) TagKeyIterator(name []byte) (TagKeyIterator, error)
TagKeyIterator returns a key iterator for a measurement.
func (IndexSet) TagKeySeriesIDIterator ¶ added in v1.5.0
func (is IndexSet) TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error)
TagKeySeriesIDIterator returns a series iterator for all values across a single key.
func (IndexSet) TagSets ¶ added in v1.5.0
func (is IndexSet) TagSets(sfile *SeriesFile, name []byte, opt query.IteratorOptions) ([]*query.TagSet, error)
TagSets returns an ordered list of tag sets for a measurement by dimension and filtered by an optional conditional expression.
func (IndexSet) TagValueIterator ¶ added in v1.5.0
func (is IndexSet) TagValueIterator(name, key []byte) (TagValueIterator, error)
TagValueIterator returns a value iterator for a tag key.
func (IndexSet) TagValueSeriesIDIterator ¶ added in v1.5.0
func (is IndexSet) TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error)
TagValueSeriesIDIterator returns a series iterator for a single tag value.
type IngressMetrics ¶ added in v1.9.0
type IngressMetrics struct {
// contains filtered or unexported fields
}
func (*IngressMetrics) AddMetric ¶ added in v1.9.0
func (i *IngressMetrics) AddMetric(measurement, db, rp, login string, points, values, series int64)
func (*IngressMetrics) ForEach ¶ added in v1.9.0
func (i *IngressMetrics) ForEach(f func(m MetricKey, points, values, series int64))
type IntegerArray ¶ added in v1.7.0
type IntegerArray = cursors.IntegerArray
func NewIntegerArrayLen ¶ added in v1.7.0
func NewIntegerArrayLen(sz int) *IntegerArray
type IntegerArrayCursor ¶ added in v1.7.0
type IntegerArrayCursor = cursors.IntegerArrayCursor
type KeyValue ¶ added in v1.0.0
type KeyValue struct {
Key, Value string
}
KeyValue holds a string key and a string value.
type KeyValues ¶ added in v1.0.0
type KeyValues []KeyValue
KeyValues is a sortable slice of KeyValue.
type LimitError ¶ added in v1.3.0
type LimitError struct {
Reason string
}
LimitError represents an error caused by a configurable limit.
func (*LimitError) Error ¶ added in v1.3.0
func (e *LimitError) Error() string
type MeasurementFieldSet ¶ added in v1.3.0
type MeasurementFieldSet struct {
// contains filtered or unexported fields
}
MeasurementFieldSet represents a collection of fields by measurement. This safe for concurrent use.
func NewMeasurementFieldSet ¶ added in v1.3.0
func NewMeasurementFieldSet(path string, logger *zap.Logger) (*MeasurementFieldSet, error)
NewMeasurementFieldSet returns a new instance of MeasurementFieldSet.
func (*MeasurementFieldSet) ApplyChanges ¶ added in v1.11.0
func (fs *MeasurementFieldSet) ApplyChanges() error
func (*MeasurementFieldSet) Bytes ¶ added in v1.6.0
func (fs *MeasurementFieldSet) Bytes() int
Bytes estimates the memory footprint of this MeasurementFieldSet, in bytes.
func (*MeasurementFieldSet) ChangesPath ¶ added in v1.11.0
func (fs *MeasurementFieldSet) ChangesPath() string
func (*MeasurementFieldSet) Close ¶ added in v1.9.3
func (fs *MeasurementFieldSet) Close() error
func (*MeasurementFieldSet) CreateFieldsIfNotExists ¶ added in v1.3.0
func (fs *MeasurementFieldSet) CreateFieldsIfNotExists(name []byte) *MeasurementFields
CreateFieldsIfNotExists returns fields for a measurement by name.
func (*MeasurementFieldSet) Delete ¶ added in v1.3.0
func (fs *MeasurementFieldSet) Delete(name string)
Delete removes a field set for a measurement.
func (*MeasurementFieldSet) DeleteWithLock ¶ added in v1.3.0
func (fs *MeasurementFieldSet) DeleteWithLock(name string, fn func() error) error
DeleteWithLock executes fn and removes a field set from a measurement under lock.
func (*MeasurementFieldSet) Fields ¶ added in v1.3.0
func (fs *MeasurementFieldSet) Fields(name []byte) *MeasurementFields
Fields returns fields for a measurement by name.
func (*MeasurementFieldSet) FieldsByString ¶ added in v1.5.1
func (fs *MeasurementFieldSet) FieldsByString(name string) *MeasurementFields
FieldsByString returns fields for a measurment by name.
func (*MeasurementFieldSet) IsEmpty ¶ added in v1.5.0
func (fs *MeasurementFieldSet) IsEmpty() bool
func (*MeasurementFieldSet) MeasurementNames ¶ added in v1.10.1
func (fs *MeasurementFieldSet) MeasurementNames() []string
MeasurementNames returns the names of all the measurements in the field set in lexicographic order.
func (*MeasurementFieldSet) Save ¶ added in v1.5.0
func (fs *MeasurementFieldSet) Save(changes FieldChanges) error
func (*MeasurementFieldSet) SetMeasurementFieldSetWriter ¶ added in v1.9.3
func (fs *MeasurementFieldSet) SetMeasurementFieldSetWriter(queueLength int, logger *zap.Logger)
SetMeasurementFieldSetWriter - initialize the queue for write requests and start the background write process
func (*MeasurementFieldSet) WriteToFile ¶ added in v1.11.0
func (fs *MeasurementFieldSet) WriteToFile() error
WriteToFile: Write the new index to a temp file and rename when it's sync'd This locks the MeasurementFieldSet during the marshaling, the write, and the rename.
type MeasurementFields ¶ added in v0.9.3
type MeasurementFields struct {
// contains filtered or unexported fields
}
MeasurementFields holds the fields of a measurement and their codec.
func NewMeasurementFields ¶ added in v0.12.1
func NewMeasurementFields() *MeasurementFields
NewMeasurementFields returns an initialised *MeasurementFields value.
func (*MeasurementFields) CreateFieldIfNotExists ¶ added in v0.9.3
func (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType) error
CreateFieldIfNotExists creates a new field with an autoincrementing ID. Returns an error if 255 fields have already been created on the measurement or the fields already exists with a different type.
func (*MeasurementFields) Field ¶ added in v0.12.1
func (m *MeasurementFields) Field(name string) *Field
Field returns the field for name, or nil if there is no field for name.
func (*MeasurementFields) FieldBytes ¶ added in v1.1.0
func (m *MeasurementFields) FieldBytes(name []byte) *Field
FieldBytes returns the field for name, or nil if there is no field for name. FieldBytes should be preferred to Field when the caller has a []byte, because it avoids a string allocation, which can't be avoided if the caller converts the []byte to a string and calls Field.
func (*MeasurementFields) FieldKeys ¶ added in v1.4.0
func (m *MeasurementFields) FieldKeys() []string
func (*MeasurementFields) FieldN ¶ added in v1.3.0
func (m *MeasurementFields) FieldN() int
func (*MeasurementFields) FieldSet ¶ added in v1.0.0
func (m *MeasurementFields) FieldSet() map[string]influxql.DataType
FieldSet returns the set of fields and their types for the measurement.
func (*MeasurementFields) ForEachField ¶ added in v1.5.0
func (m *MeasurementFields) ForEachField(fn func(name string, typ influxql.DataType) bool)
func (*MeasurementFields) HasField ¶ added in v1.3.0
func (m *MeasurementFields) HasField(name string) bool
type MeasurementIterator ¶ added in v0.11.0
MeasurementIterator represents a iterator over a list of measurements.
func MergeMeasurementIterators ¶ added in v1.5.0
func MergeMeasurementIterators(itrs ...MeasurementIterator) MeasurementIterator
MergeMeasurementIterators returns an iterator that merges a set of iterators. Iterators that are first in the list take precedence and a deletion by those early iterators will invalidate elements by later iterators.
type MeasurementIterators ¶ added in v1.5.0
type MeasurementIterators []MeasurementIterator
func (MeasurementIterators) Close ¶ added in v1.5.0
func (a MeasurementIterators) Close() (err error)
type MeasurementSliceIterator ¶ added in v1.9.4
type MeasurementSliceIterator interface { MeasurementIterator UnderlyingSlice() [][]byte }
type MetricKey ¶ added in v1.9.0
type MetricKey struct {
// contains filtered or unexported fields
}
type MetricValue ¶ added in v1.9.0
type MetricValue struct {
// contains filtered or unexported fields
}
type NewEngineFunc ¶ added in v0.9.3
type NewEngineFunc func(id uint64, i Index, path string, walPath string, sfile *SeriesFile, options EngineOptions) Engine
NewEngineFunc creates a new engine.
type NewIndexFunc ¶ added in v1.3.0
type NewIndexFunc func(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index
NewIndexFunc creates a new index.
type PartialWriteError ¶ added in v1.1.0
type PartialWriteError struct { Reason string Dropped int // A sorted slice of series keys that were dropped. DroppedKeys [][]byte }
PartialWriteError indicates a write request could only write a portion of the requested values.
func (PartialWriteError) Error ¶ added in v1.1.0
func (e PartialWriteError) Error() string
type PointBatcher ¶
type PointBatcher struct {
// contains filtered or unexported fields
}
PointBatcher accepts Points and will emit a batch of those points when either a) the batch reaches a certain size, or b) a certain time passes.
func NewPointBatcher ¶
func NewPointBatcher(sz int, bp int, d time.Duration) *PointBatcher
NewPointBatcher returns a new PointBatcher. sz is the batching size, bp is the maximum number of batches that may be pending. d is the time after which a batch will be emitted after the first point is received for the batch, regardless of its size.
func (*PointBatcher) Flush ¶
func (b *PointBatcher) Flush()
Flush instructs the batcher to emit any pending points in a batch, regardless of batch size. If there are no pending points, no batch is emitted.
func (*PointBatcher) In ¶
func (b *PointBatcher) In() chan<- models.Point
In returns the channel to which points should be written.
func (*PointBatcher) Out ¶
func (b *PointBatcher) Out() <-chan []models.Point
Out returns the channel from which batches should be read.
func (*PointBatcher) Start ¶
func (b *PointBatcher) Start()
Start starts the batching process. Returns the in and out channels for points and point-batches respectively.
func (*PointBatcher) Stats ¶
func (b *PointBatcher) Stats() *PointBatcherStats
Stats returns a PointBatcherStats object for the PointBatcher. While the each statistic should be closely correlated with each other statistic, it is not guaranteed.
func (*PointBatcher) Stop ¶
func (b *PointBatcher) Stop()
Stop stops the batching process. Stop waits for the batching routine to stop before returning.
type PointBatcherStats ¶
type PointBatcherStats struct { BatchTotal uint64 // Total count of batches transmitted. PointTotal uint64 // Total count of points processed. SizeTotal uint64 // Number of batches that reached size threshold. TimeoutTotal uint64 // Number of timeouts that occurred. }
PointBatcherStats are the statistics each batcher tracks.
type SeriesCursor ¶ added in v1.6.0
type SeriesCursor interface { Close() error Next() (*SeriesCursorRow, error) }
type SeriesCursorRequest ¶ added in v1.6.0
type SeriesCursorRequest struct {
Measurements MeasurementIterator
}
type SeriesCursorRow ¶ added in v1.6.0
func (*SeriesCursorRow) Compare ¶ added in v1.6.0
func (r *SeriesCursorRow) Compare(other *SeriesCursorRow) int
type SeriesElem ¶ added in v1.5.0
type SeriesElem interface { Name() []byte Tags() models.Tags Deleted() bool // InfluxQL expression associated with series during filtering. Expr() influxql.Expr }
SeriesElem represents a generic series element.
type SeriesFile ¶ added in v1.5.0
SeriesFile represents the section of the index that holds series data.
func NewSeriesFile ¶ added in v1.5.0
func NewSeriesFile(path string) *SeriesFile
NewSeriesFile returns a new instance of SeriesFile.
func (*SeriesFile) Close ¶ added in v1.5.0
func (f *SeriesFile) Close() (err error)
Close unmaps the data file.
func (*SeriesFile) CreateSeriesListIfNotExists ¶ added in v1.5.0
func (f *SeriesFile) CreateSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags, tracker StatsTracker) ([]uint64, error)
CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist. The returned ids slice returns IDs for every name+tags, creating new series IDs as needed.
func (*SeriesFile) DeleteSeriesID ¶ added in v1.5.0
func (f *SeriesFile) DeleteSeriesID(id uint64) error
DeleteSeriesID flags a series as permanently deleted. If the series is reintroduced later then it must create a new id.
func (*SeriesFile) DisableCompactions ¶ added in v1.5.0
func (f *SeriesFile) DisableCompactions()
DisableCompactions prevents new compactions from running.
func (*SeriesFile) EnableCompactions ¶ added in v1.5.0
func (f *SeriesFile) EnableCompactions()
EnableCompactions allows compactions to run.
func (*SeriesFile) FileSize ¶ added in v1.8.0
func (f *SeriesFile) FileSize() (n int64, err error)
FileSize returns the size of all partitions, in bytes.
func (*SeriesFile) IsDeleted ¶ added in v1.5.0
func (f *SeriesFile) IsDeleted(id uint64) bool
IsDeleted returns true if the ID has been deleted before.
func (*SeriesFile) Open ¶ added in v1.5.0
func (f *SeriesFile) Open() error
Open memory maps the data file at the file's path.
func (*SeriesFile) Partitions ¶ added in v1.5.0
func (f *SeriesFile) Partitions() []*SeriesPartition
Partitions returns all partitions.
func (*SeriesFile) Path ¶ added in v1.5.0
func (f *SeriesFile) Path() string
Path returns the path to the file.
func (*SeriesFile) Retain ¶ added in v1.5.0
func (f *SeriesFile) Retain() func()
Retain adds a reference count to the file. It returns a release func.
func (*SeriesFile) Series ¶ added in v1.5.0
func (f *SeriesFile) Series(id uint64) ([]byte, models.Tags)
Series returns the parsed series name and tags for an offset.
func (*SeriesFile) SeriesCount ¶ added in v1.5.0
func (f *SeriesFile) SeriesCount() uint64
SeriesCount returns the number of series.
func (*SeriesFile) SeriesIDIterator ¶ added in v1.5.0
func (f *SeriesFile) SeriesIDIterator() SeriesIDIterator
SeriesIDIterator returns an iterator over all the series.
func (*SeriesFile) SeriesIDPartition ¶ added in v1.5.0
func (f *SeriesFile) SeriesIDPartition(id uint64) *SeriesPartition
func (*SeriesFile) SeriesIDPartitionID ¶ added in v1.5.0
func (f *SeriesFile) SeriesIDPartitionID(id uint64) int
func (*SeriesFile) SeriesKey ¶ added in v1.5.0
func (f *SeriesFile) SeriesKey(id uint64) []byte
SeriesKey returns the series key for a given id.
func (*SeriesFile) SeriesKeyPartition ¶ added in v1.5.0
func (f *SeriesFile) SeriesKeyPartition(key []byte) *SeriesPartition
func (*SeriesFile) SeriesKeyPartitionID ¶ added in v1.5.0
func (f *SeriesFile) SeriesKeyPartitionID(key []byte) int
func (*SeriesFile) SeriesKeys ¶ added in v1.5.0
func (f *SeriesFile) SeriesKeys(ids []uint64) [][]byte
SeriesKeys returns a list of series keys from a list of ids.
func (*SeriesFile) SeriesKeysPartitionIDs ¶ added in v1.5.0
func (f *SeriesFile) SeriesKeysPartitionIDs(keys [][]byte) []int
func (*SeriesFile) SeriesPartitionPath ¶ added in v1.5.0
func (f *SeriesFile) SeriesPartitionPath(i int) string
SeriesPartitionPath returns the path to a given partition.
func (*SeriesFile) Wait ¶ added in v1.5.0
func (f *SeriesFile) Wait()
Wait waits for all Retains to be released.
func (*SeriesFile) WithMaxCompactionConcurrency ¶ added in v1.8.0
func (f *SeriesFile) WithMaxCompactionConcurrency(maxCompactionConcurrency int)
type SeriesIDElem ¶ added in v1.5.0
SeriesIDElem represents a single series and optional expression.
type SeriesIDElems ¶ added in v1.5.0
type SeriesIDElems []SeriesIDElem
SeriesIDElems represents a list of series id elements.
func (SeriesIDElems) Len ¶ added in v1.5.0
func (a SeriesIDElems) Len() int
func (SeriesIDElems) Less ¶ added in v1.5.0
func (a SeriesIDElems) Less(i, j int) bool
func (SeriesIDElems) Swap ¶ added in v1.5.0
func (a SeriesIDElems) Swap(i, j int)
type SeriesIDIterator ¶ added in v1.5.0
type SeriesIDIterator interface { Next() (SeriesIDElem, error) Close() error }
SeriesIDIterator represents a iterator over a list of series ids.
func DifferenceSeriesIDIterators ¶ added in v1.5.0
func DifferenceSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator
DifferenceSeriesIDIterators returns an iterator that only returns series which occur the first iterator but not the second iterator.
func FilterUndeletedSeriesIDIterator ¶ added in v1.5.0
func FilterUndeletedSeriesIDIterator(sfile *SeriesFile, itr SeriesIDIterator) SeriesIDIterator
FilterUndeletedSeriesIDIterator returns an iterator which filters all deleted series.
func IntersectSeriesIDIterators ¶ added in v1.5.0
func IntersectSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator
IntersectSeriesIDIterators returns an iterator that only returns series which occur in both iterators. If both series have associated expressions then they are combined together.
func MergeSeriesIDIterators ¶ added in v1.5.0
func MergeSeriesIDIterators(itrs ...SeriesIDIterator) SeriesIDIterator
MergeSeriesIDIterators returns an iterator that merges a set of iterators. Iterators that are first in the list take precedence and a deletion by those early iterators will invalidate elements by later iterators.
func UnionSeriesIDIterators ¶ added in v1.5.0
func UnionSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator
UnionSeriesIDIterators returns an iterator that returns series from both both iterators. If both series have associated expressions then they are combined together.
type SeriesIDIterators ¶ added in v1.5.0
type SeriesIDIterators []SeriesIDIterator
func (SeriesIDIterators) Close ¶ added in v1.5.0
func (a SeriesIDIterators) Close() (err error)
type SeriesIDSet ¶ added in v1.5.0
SeriesIDSet represents a lockable bitmap of series ids.
func NewSeriesIDSet ¶ added in v1.5.0
func NewSeriesIDSet(a ...uint64) *SeriesIDSet
NewSeriesIDSet returns a new instance of SeriesIDSet.
func (*SeriesIDSet) Add ¶ added in v1.5.0
func (s *SeriesIDSet) Add(id uint64)
Add adds the series id to the set.
func (*SeriesIDSet) AddMany ¶ added in v1.7.0
func (s *SeriesIDSet) AddMany(ids ...uint64)
AddMany adds multiple ids to the SeriesIDSet. AddMany takes a lock, so may not be optimal to call many times with few ids.
func (*SeriesIDSet) AddNoLock ¶ added in v1.5.0
func (s *SeriesIDSet) AddNoLock(id uint64)
AddNoLock adds the series id to the set. Add is not safe for use from multiple goroutines. Callers must manage synchronization.
func (*SeriesIDSet) And ¶ added in v1.6.1
func (s *SeriesIDSet) And(other *SeriesIDSet) *SeriesIDSet
And returns a new SeriesIDSet containing elements that were present in s and other.
func (*SeriesIDSet) AndNot ¶ added in v1.5.0
func (s *SeriesIDSet) AndNot(other *SeriesIDSet) *SeriesIDSet
AndNot returns a new SeriesIDSet containing elements that were present in s, but not present in other.
func (*SeriesIDSet) Bytes ¶ added in v1.6.0
func (s *SeriesIDSet) Bytes() int
Bytes estimates the memory footprint of this SeriesIDSet, in bytes.
func (*SeriesIDSet) Cardinality ¶ added in v1.5.0
func (s *SeriesIDSet) Cardinality() uint64
Cardinality returns the cardinality of the SeriesIDSet.
func (*SeriesIDSet) Clear ¶ added in v1.7.0
func (s *SeriesIDSet) Clear()
Clear clears the underlying bitmap for re-use. Clear is safe for use by multiple goroutines.
func (*SeriesIDSet) ClearNoLock ¶ added in v1.7.0
func (s *SeriesIDSet) ClearNoLock()
ClearNoLock clears the underlying bitmap for re-use without taking a lock.
func (*SeriesIDSet) Clone ¶ added in v1.6.1
func (s *SeriesIDSet) Clone() *SeriesIDSet
Clone returns a new SeriesIDSet with a deep copy of the underlying bitmap.
func (*SeriesIDSet) CloneNoLock ¶ added in v1.6.1
func (s *SeriesIDSet) CloneNoLock() *SeriesIDSet
CloneNoLock calls Clone without taking a lock.
func (*SeriesIDSet) Contains ¶ added in v1.5.0
func (s *SeriesIDSet) Contains(id uint64) bool
Contains returns true if the id exists in the set.
func (*SeriesIDSet) ContainsNoLock ¶ added in v1.5.0
func (s *SeriesIDSet) ContainsNoLock(id uint64) bool
ContainsNoLock returns true if the id exists in the set. ContainsNoLock is not safe for use from multiple goroutines. The caller must manage synchronization.
func (*SeriesIDSet) Diff ¶ added in v1.5.0
func (s *SeriesIDSet) Diff(other *SeriesIDSet)
Diff removes from s any elements also present in other.
func (*SeriesIDSet) Equals ¶ added in v1.5.0
func (s *SeriesIDSet) Equals(other *SeriesIDSet) bool
Equals returns true if other and s are the same set of ids.
func (*SeriesIDSet) ForEach ¶ added in v1.5.0
func (s *SeriesIDSet) ForEach(f func(id uint64))
ForEach calls f for each id in the set. The function is applied to the IDs in ascending order.
func (*SeriesIDSet) ForEachNoLock ¶ added in v1.6.1
func (s *SeriesIDSet) ForEachNoLock(f func(id uint64))
ForEachNoLock calls f for each id in the set without taking a lock.
func (*SeriesIDSet) Iterator ¶ added in v1.6.1
func (s *SeriesIDSet) Iterator() SeriesIDSetIterable
Iterator returns an iterator to the underlying bitmap. This iterator is not protected by a lock.
func (*SeriesIDSet) Merge ¶ added in v1.5.0
func (s *SeriesIDSet) Merge(others ...*SeriesIDSet)
Merge merged the contents of others into s. The caller does not need to provide s as an argument, and the contents of s will always be present in s after Merge returns.
func (*SeriesIDSet) MergeInPlace ¶ added in v1.7.0
func (s *SeriesIDSet) MergeInPlace(other *SeriesIDSet)
MergeInPlace merges other into s, modifying s in the process.
func (*SeriesIDSet) Remove ¶ added in v1.5.0
func (s *SeriesIDSet) Remove(id uint64)
Remove removes the id from the set.
func (*SeriesIDSet) RemoveNoLock ¶ added in v1.5.0
func (s *SeriesIDSet) RemoveNoLock(id uint64)
RemoveNoLock removes the id from the set. RemoveNoLock is not safe for use from multiple goroutines. The caller must manage synchronization.
func (*SeriesIDSet) Slice ¶ added in v1.7.0
func (s *SeriesIDSet) Slice() []uint64
Slice returns a slice of series ids.
func (*SeriesIDSet) String ¶ added in v1.5.0
func (s *SeriesIDSet) String() string
func (*SeriesIDSet) UnmarshalBinary ¶ added in v1.5.0
func (s *SeriesIDSet) UnmarshalBinary(data []byte) error
UnmarshalBinary unmarshals data into the set.
func (*SeriesIDSet) UnmarshalBinaryUnsafe ¶ added in v1.7.0
func (s *SeriesIDSet) UnmarshalBinaryUnsafe(data []byte) error
UnmarshalBinaryUnsafe unmarshals data into the set. References to the underlying data are used so data should not be reused by caller.
type SeriesIDSetIterable ¶ added in v1.6.1
type SeriesIDSetIterator ¶ added in v1.6.1
type SeriesIDSetIterator interface { SeriesIDIterator SeriesIDSet() *SeriesIDSet }
SeriesIDSetIterator represents an iterator that can produce a SeriesIDSet.
func NewSeriesIDSetIterator ¶ added in v1.6.1
func NewSeriesIDSetIterator(ss *SeriesIDSet) SeriesIDSetIterator
func NewSeriesIDSetIteratorWithCloser ¶ added in v1.7.11
func NewSeriesIDSetIteratorWithCloser(ss *SeriesIDSet, closer io.Closer) SeriesIDSetIterator
func NewSeriesIDSetIterators ¶ added in v1.6.1
func NewSeriesIDSetIterators(itrs []SeriesIDIterator) []SeriesIDSetIterator
NewSeriesIDSetIterators returns a slice of SeriesIDSetIterator if all itrs can be type casted. Otherwise returns nil.
type SeriesIDSetIterators ¶ added in v1.7.11
type SeriesIDSetIterators []SeriesIDSetIterator
func (SeriesIDSetIterators) Close ¶ added in v1.7.11
func (a SeriesIDSetIterators) Close() (err error)
type SeriesIDSets ¶ added in v1.5.0
type SeriesIDSets interface {
ForEach(f func(ids *SeriesIDSet)) error
}
SeriesIDSets provides access to the total set of series IDs
type SeriesIDSliceIterator ¶ added in v1.5.0
type SeriesIDSliceIterator struct {
// contains filtered or unexported fields
}
SeriesIDSliceIterator iterates over a slice of series ids.
func NewSeriesIDSliceIterator ¶ added in v1.5.0
func NewSeriesIDSliceIterator(ids []uint64) *SeriesIDSliceIterator
NewSeriesIDSliceIterator returns a SeriesIDIterator that iterates over a slice.
func (*SeriesIDSliceIterator) Close ¶ added in v1.5.0
func (itr *SeriesIDSliceIterator) Close() error
func (*SeriesIDSliceIterator) Next ¶ added in v1.5.0
func (itr *SeriesIDSliceIterator) Next() (SeriesIDElem, error)
Next returns the next series id in the slice.
func (*SeriesIDSliceIterator) SeriesIDSet ¶ added in v1.6.1
func (itr *SeriesIDSliceIterator) SeriesIDSet() *SeriesIDSet
SeriesIDSet returns a set of all remaining ids.
type SeriesIndex ¶ added in v1.5.0
type SeriesIndex struct {
// contains filtered or unexported fields
}
SeriesIndex represents an index of key-to-id & id-to-offset mappings.
func NewSeriesIndex ¶ added in v1.5.0
func NewSeriesIndex(path string) *SeriesIndex
func (*SeriesIndex) Clone ¶ added in v1.5.0
func (idx *SeriesIndex) Clone() *SeriesIndex
Clone returns a copy of idx for use during compaction. In-memory maps are not cloned.
func (*SeriesIndex) Close ¶ added in v1.5.0
func (idx *SeriesIndex) Close() (err error)
Close unmaps the index file.
func (*SeriesIndex) Count ¶ added in v1.5.0
func (idx *SeriesIndex) Count() uint64
Count returns the number of series in the index.
func (*SeriesIndex) Delete ¶ added in v1.5.0
func (idx *SeriesIndex) Delete(id uint64)
Delete marks the series id as deleted.
func (*SeriesIndex) FindIDByNameTags ¶ added in v1.5.0
func (idx *SeriesIndex) FindIDByNameTags(segments []*SeriesSegment, name []byte, tags models.Tags, buf []byte) uint64
func (*SeriesIndex) FindIDBySeriesKey ¶ added in v1.5.0
func (idx *SeriesIndex) FindIDBySeriesKey(segments []*SeriesSegment, key []byte) uint64
func (*SeriesIndex) FindIDListByNameTags ¶ added in v1.5.0
func (idx *SeriesIndex) FindIDListByNameTags(segments []*SeriesSegment, names [][]byte, tagsSlice []models.Tags, buf []byte) (ids []uint64, ok bool)
func (*SeriesIndex) FindOffsetByID ¶ added in v1.5.0
func (idx *SeriesIndex) FindOffsetByID(id uint64) int64
func (*SeriesIndex) InMemCount ¶ added in v1.5.0
func (idx *SeriesIndex) InMemCount() uint64
InMemCount returns the number of series in the in-memory index.
func (*SeriesIndex) Insert ¶ added in v1.5.0
func (idx *SeriesIndex) Insert(key []byte, id uint64, offset int64)
func (*SeriesIndex) IsDeleted ¶ added in v1.5.0
func (idx *SeriesIndex) IsDeleted(id uint64) bool
IsDeleted returns true if series id has been deleted.
func (*SeriesIndex) OnDiskCount ¶ added in v1.5.0
func (idx *SeriesIndex) OnDiskCount() uint64
OnDiskCount returns the number of series in the on-disk index.
func (*SeriesIndex) Open ¶ added in v1.5.0
func (idx *SeriesIndex) Open() (err error)
Open memory-maps the index file.
func (*SeriesIndex) Recover ¶ added in v1.5.0
func (idx *SeriesIndex) Recover(segments []*SeriesSegment) error
Recover rebuilds the in-memory index for all new entries.
type SeriesIndexHeader ¶ added in v1.5.0
type SeriesIndexHeader struct { Version uint8 MaxSeriesID uint64 MaxOffset int64 Count uint64 Capacity int64 KeyIDMap struct { Offset int64 Size int64 } IDOffsetMap struct { Offset int64 Size int64 } }
SeriesIndexHeader represents the header of a series index.
func NewSeriesIndexHeader ¶ added in v1.5.0
func NewSeriesIndexHeader() SeriesIndexHeader
NewSeriesIndexHeader returns a new instance of SeriesIndexHeader.
func ReadSeriesIndexHeader ¶ added in v1.5.0
func ReadSeriesIndexHeader(data []byte) (hdr SeriesIndexHeader, err error)
ReadSeriesIndexHeader returns the header from data.
type SeriesIterator ¶ added in v1.5.0
type SeriesIterator interface { Close() error Next() (SeriesElem, error) }
SeriesIterator represents a iterator over a list of series.
func NewSeriesIteratorAdapter ¶ added in v1.5.0
func NewSeriesIteratorAdapter(sfile *SeriesFile, itr SeriesIDIterator) SeriesIterator
NewSeriesIteratorAdapter returns an adapter for converting series ids to series.
type SeriesKeyIterator ¶ added in v1.9.0
SeriesKeyIterator represents an iterator over a list of SeriesKeys
type SeriesPartition ¶ added in v1.5.0
type SeriesPartition struct { CompactThreshold int Logger *zap.Logger // contains filtered or unexported fields }
SeriesPartition represents a subset of series file data.
func NewSeriesPartition ¶ added in v1.5.0
func NewSeriesPartition(id int, path string, compactionLimiter limiter.Fixed) *SeriesPartition
NewSeriesPartition returns a new instance of SeriesPartition.
func (*SeriesPartition) AppendSeriesIDs ¶ added in v1.5.0
func (p *SeriesPartition) AppendSeriesIDs(a []uint64) []uint64
AppendSeriesIDs returns a list of all series ids.
func (*SeriesPartition) Close ¶ added in v1.5.0
func (p *SeriesPartition) Close() (err error)
Close unmaps the data files.
func (*SeriesPartition) Compacting ¶ added in v1.6.0
func (p *SeriesPartition) Compacting() bool
Compacting returns if the SeriesPartition is currently compacting.
func (*SeriesPartition) CreateSeriesListIfNotExists ¶ added in v1.5.0
func (p *SeriesPartition) CreateSeriesListIfNotExists(keys [][]byte, keyPartitionIDs []int, ids []uint64, tracker StatsTracker) error
CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist. The ids parameter is modified to contain series IDs for all keys belonging to this partition.
func (*SeriesPartition) DeleteSeriesID ¶ added in v1.5.0
func (p *SeriesPartition) DeleteSeriesID(id uint64) error
DeleteSeriesID flags a series as permanently deleted. If the series is reintroduced later then it must create a new id.
func (*SeriesPartition) DisableCompactions ¶ added in v1.5.0
func (p *SeriesPartition) DisableCompactions()
func (*SeriesPartition) EnableCompactions ¶ added in v1.5.0
func (p *SeriesPartition) EnableCompactions()
func (*SeriesPartition) FileSize ¶ added in v1.8.0
func (p *SeriesPartition) FileSize() (n int64, err error)
FileSize returns the size of all partitions, in bytes.
func (*SeriesPartition) FindIDBySeriesKey ¶ added in v1.5.0
func (p *SeriesPartition) FindIDBySeriesKey(key []byte) uint64
FindIDBySeriesKey return the series id for the series key.
func (*SeriesPartition) ID ¶ added in v1.5.0
func (p *SeriesPartition) ID() int
ID returns the partition id.
func (*SeriesPartition) Index ¶ added in v1.8.0
func (p *SeriesPartition) Index() *SeriesIndex
Index returns the partition's index.
func (*SeriesPartition) IndexPath ¶ added in v1.5.0
func (p *SeriesPartition) IndexPath() string
IndexPath returns the path to the series index.
func (*SeriesPartition) IsDeleted ¶ added in v1.5.0
func (p *SeriesPartition) IsDeleted(id uint64) bool
IsDeleted returns true if the ID has been deleted before.
func (*SeriesPartition) Open ¶ added in v1.5.0
func (p *SeriesPartition) Open() error
Open memory maps the data file at the partition's path.
func (*SeriesPartition) Path ¶ added in v1.5.0
func (p *SeriesPartition) Path() string
Path returns the path to the partition.
func (*SeriesPartition) Segments ¶ added in v1.8.0
func (p *SeriesPartition) Segments() []*SeriesSegment
Segments returns a list of partition segments. Used for testing.
func (*SeriesPartition) Series ¶ added in v1.5.0
func (p *SeriesPartition) Series(id uint64) ([]byte, models.Tags)
Series returns the parsed series name and tags for an offset.
func (*SeriesPartition) SeriesCount ¶ added in v1.5.0
func (p *SeriesPartition) SeriesCount() uint64
SeriesCount returns the number of series.
func (*SeriesPartition) SeriesKey ¶ added in v1.5.0
func (p *SeriesPartition) SeriesKey(id uint64) []byte
SeriesKey returns the series key for a given id.
type SeriesPartitionCompactor ¶ added in v1.5.0
type SeriesPartitionCompactor struct {
// contains filtered or unexported fields
}
SeriesPartitionCompactor represents an object reindexes a series partition and optionally compacts segments.
func NewSeriesPartitionCompactor ¶ added in v1.5.0
func NewSeriesPartitionCompactor() *SeriesPartitionCompactor
NewSeriesPartitionCompactor returns a new instance of SeriesPartitionCompactor.
func (*SeriesPartitionCompactor) Compact ¶ added in v1.5.0
func (c *SeriesPartitionCompactor) Compact(p *SeriesPartition) error
Compact rebuilds the series partition index.
type SeriesSegment ¶ added in v1.5.0
type SeriesSegment struct {
// contains filtered or unexported fields
}
SeriesSegment represents a log of series entries.
func CloneSeriesSegments ¶ added in v1.5.0
func CloneSeriesSegments(a []*SeriesSegment) []*SeriesSegment
CloneSeriesSegments returns a copy of a slice of segments.
func CreateSeriesSegment ¶ added in v1.5.0
func CreateSeriesSegment(id uint16, path string) (*SeriesSegment, error)
CreateSeriesSegment generates an empty segment at path.
func FindSegment ¶ added in v1.5.0
func FindSegment(a []*SeriesSegment, id uint16) *SeriesSegment
FindSegment returns a segment by id.
func NewSeriesSegment ¶ added in v1.5.0
func NewSeriesSegment(id uint16, path string) *SeriesSegment
NewSeriesSegment returns a new instance of SeriesSegment.
func (*SeriesSegment) AppendSeriesIDs ¶ added in v1.5.0
func (s *SeriesSegment) AppendSeriesIDs(a []uint64) []uint64
AppendSeriesIDs appends all the segments ids to a slice. Returns the new slice.
func (*SeriesSegment) CanWrite ¶ added in v1.5.0
func (s *SeriesSegment) CanWrite(data []byte) bool
CanWrite returns true if segment has space to write entry data.
func (*SeriesSegment) Clone ¶ added in v1.5.0
func (s *SeriesSegment) Clone() *SeriesSegment
Clone returns a copy of the segment. Excludes the write handler, if set.
func (*SeriesSegment) Close ¶ added in v1.5.0
func (s *SeriesSegment) Close() (err error)
Close unmaps the segment.
func (*SeriesSegment) CloseForWrite ¶ added in v1.5.0
func (s *SeriesSegment) CloseForWrite() (err error)
func (*SeriesSegment) CompactToPath ¶ added in v1.8.0
func (s *SeriesSegment) CompactToPath(path string, index *SeriesIndex) error
CompactToPath rewrites the segment to a new file and removes tombstoned entries.
func (*SeriesSegment) Data ¶ added in v1.6.0
func (s *SeriesSegment) Data() []byte
Data returns the raw data.
func (*SeriesSegment) Flush ¶ added in v1.5.0
func (s *SeriesSegment) Flush() error
Flush flushes the buffer to disk.
func (*SeriesSegment) ForEachEntry ¶ added in v1.5.0
func (s *SeriesSegment) ForEachEntry(fn func(flag uint8, id uint64, offset int64, key []byte) error) error
ForEachEntry executes fn for every entry in the segment.
func (*SeriesSegment) ID ¶ added in v1.5.0
func (s *SeriesSegment) ID() uint16
ID returns the id the segment was initialized with.
func (*SeriesSegment) InitForWrite ¶ added in v1.5.0
func (s *SeriesSegment) InitForWrite() (err error)
InitForWrite initializes a write handle for the segment. This is only used for the last segment in the series file.
func (*SeriesSegment) MaxSeriesID ¶ added in v1.5.0
func (s *SeriesSegment) MaxSeriesID() uint64
MaxSeriesID returns the highest series id in the segment.
func (*SeriesSegment) Open ¶ added in v1.5.0
func (s *SeriesSegment) Open() error
Open memory maps the data file at the file's path.
func (*SeriesSegment) Path ¶ added in v1.8.0
func (s *SeriesSegment) Path() string
Path returns the file path to the segment.
func (*SeriesSegment) Size ¶ added in v1.5.0
func (s *SeriesSegment) Size() int64
Size returns the size of the data in the segment. This is only populated once InitForWrite() is called.
func (*SeriesSegment) Slice ¶ added in v1.5.0
func (s *SeriesSegment) Slice(pos uint32) []byte
Slice returns a byte slice starting at pos.
func (*SeriesSegment) WriteLogEntry ¶ added in v1.5.0
func (s *SeriesSegment) WriteLogEntry(data []byte) (offset int64, err error)
WriteLogEntry writes entry data into the segment. Returns the offset of the beginning of the entry.
type SeriesSegmentHeader ¶ added in v1.5.0
type SeriesSegmentHeader struct {
Version uint8
}
SeriesSegmentHeader represents the header of a series segment.
func NewSeriesSegmentHeader ¶ added in v1.5.0
func NewSeriesSegmentHeader() SeriesSegmentHeader
NewSeriesSegmentHeader returns a new instance of SeriesSegmentHeader.
func ReadSeriesSegmentHeader ¶ added in v1.5.0
func ReadSeriesSegmentHeader(data []byte) (hdr SeriesSegmentHeader, err error)
ReadSeriesSegmentHeader returns the header from data.
type Shard ¶
type Shard struct { EnableOnOpen bool // CompactionDisabled specifies the shard should not schedule compactions. // This option is intended for offline tooling. CompactionDisabled bool // contains filtered or unexported fields }
Shard represents a self-contained time series database. An inverted index of the measurement and tag data is kept along with the raw time series data. Data can be split across many shards. The query engine in TSDB is responsible for combining the output of many shards into a single query result.
func NewShard ¶
func NewShard(id uint64, path string, walPath string, sfile *SeriesFile, opt EngineOptions) *Shard
NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index
func (*Shard) Backup ¶ added in v1.3.6
Backup backs up the shard by creating a tar archive of all TSM files that have been modified since the provided time. See Engine.Backup for more details.
func (*Shard) CreateCursorIterator ¶ added in v1.6.0
func (s *Shard) CreateCursorIterator(ctx context.Context) (CursorIterator, error)
func (*Shard) CreateIterator ¶ added in v0.11.0
func (s *Shard) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error)
CreateIterator returns an iterator for the data in the shard.
func (*Shard) CreateSeriesCursor ¶ added in v1.6.0
func (s *Shard) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error)
func (*Shard) CreateSnapshot ¶ added in v1.0.0
CreateSnapshot will return a path to a temp directory containing hard links to the underlying shard files.
func (*Shard) DeleteMeasurement ¶ added in v0.9.3
DeleteMeasurement deletes a measurement and all underlying series.
func (*Shard) DeleteSeriesRange ¶ added in v0.13.0
func (s *Shard) DeleteSeriesRange(itr SeriesIterator, min, max int64) error
DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive)
func (*Shard) DeleteSeriesRangeWithPredicate ¶ added in v1.6.0
func (s *Shard) DeleteSeriesRangeWithPredicate(itr SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error
DeleteSeriesRangeWithPredicate deletes all values from for seriesKeys between min and max (inclusive) for which predicate() returns true. If predicate() is nil, then all values in range are deleted.
func (*Shard) Engine ¶ added in v1.5.5
engine safely (under an RLock) returns a reference to the shard's Engine, or an error if the Engine is closed, or the shard is currently disabled.
The shard's Engine should always be accessed via a call to engine(), rather than directly referencing Shard.engine.
If a caller needs an Engine reference but is already under a lock, then they should use engineNoLock().
func (*Shard) FieldDimensions ¶ added in v0.11.0
func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)
FieldDimensions returns unique sets of fields and dimensions across a list of sources.
func (*Shard) ForEachMeasurementName ¶ added in v1.3.6
ForEachMeasurementName iterates over each measurement in the shard.
func (*Shard) Import ¶ added in v1.3.0
Import imports data to the underlying engine for the shard. r should be a reader from a backup created by Backup.
func (*Shard) Index ¶ added in v1.4.0
Index returns a reference to the underlying index. It returns an error if the index is nil.
func (*Shard) IndexType ¶ added in v1.3.0
IndexType returns the index version being used for this shard.
IndexType returns the empty string if it is called before the shard is opened, since it is only that point that the underlying index type is known.
func (*Shard) IsIdle ¶ added in v1.3.0
IsIdle return true if the shard is not receiving writes and is fully compacted.
func (*Shard) LastModified ¶ added in v1.2.0
LastModified returns the time when this shard was last modified.
func (*Shard) MeasurementExists ¶ added in v1.3.0
MeasurementExists returns true if the shard contains name. TODO(edd): This method is currently only being called from tests; do we really need it?
func (*Shard) MeasurementFields ¶ added in v1.3.0
func (s *Shard) MeasurementFields(name []byte) *MeasurementFields
MeasurementFields returns fields for a measurement.
func (*Shard) MeasurementNamesByPredicate ¶ added in v1.8.3
MeasurementNamesByPredicate returns fields for a measurement filtered by an expression.
func (*Shard) MeasurementNamesByRegex ¶ added in v1.3.6
MeasurementNamesByRegex returns names of measurements matching the regular expression.
func (*Shard) MeasurementsSketches ¶ added in v1.3.0
MeasurementsSketches returns the measurement sketches for the shard.
func (*Shard) Restore ¶ added in v1.0.0
Restore restores data to the underlying engine for the shard. The shard is reopened after restore.
func (*Shard) RetentionPolicy ¶ added in v1.3.0
RetentionPolicy returns the retention policy of the shard.
func (*Shard) ScheduleFullCompaction ¶ added in v1.5.0
ScheduleFullCompaction forces a full compaction to be schedule on the shard.
func (*Shard) SeriesFile ¶ added in v1.5.5
func (s *Shard) SeriesFile() (*SeriesFile, error)
SeriesFile returns a reference the underlying series file. If return an error if the series file is nil.
func (*Shard) SeriesSketches ¶ added in v1.3.0
SeriesSketches returns the measurement sketches for the shard.
func (*Shard) SetCompactionsEnabled ¶ added in v1.3.0
SetCompactionsEnabled enables or disable shard background compactions.
func (*Shard) SetEnabled ¶ added in v1.0.0
SetEnabled enables the shard for queries and write. When disabled, all writes and queries return an error and compactions are stopped for the shard.
func (*Shard) SetNewReadersBlocked ¶ added in v1.11.6
SetNewReadersBlocked sets if new readers can access the shard. If blocked is true, the number of reader blocks is incremented and new readers will receive an error instead of shard access. If blocked is false, the number of reader blocks is decremented. If the reader blocks drops to 0, then new readers will be granted access to the shard.
func (*Shard) Statistics ¶ added in v1.0.0
Statistics returns statistics for periodic monitoring.
func (*Shard) TagKeyCardinality ¶ added in v1.3.0
func (*Shard) WithLogger ¶ added in v1.2.0
WithLogger sets the logger on the shard. It must be called before Open.
func (*Shard) WritePoints ¶
func (s *Shard) WritePoints(points []models.Point, tracker StatsTracker) error
WritePoints() will write the raw data points and any new metadata to the index in the shard.
type ShardError ¶ added in v0.11.0
type ShardError struct { Err error // contains filtered or unexported fields }
A ShardError implements the error interface, and contains extra context about the shard that generated the error.
func (ShardError) Error ¶ added in v0.11.0
func (e ShardError) Error() string
Error returns the string representation of the error, to satisfy the error interface.
func (ShardError) Unwrap ¶ added in v1.11.7
func (e ShardError) Unwrap() error
Unwrap returns the underlying error.
type ShardGroup ¶ added in v1.2.0
type ShardGroup interface { MeasurementsByRegex(re *regexp.Regexp) []string FieldKeysByMeasurement(name []byte) []string FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) MapType(measurement, field string) influxql.DataType CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) ExpandSources(sources influxql.Sources) (influxql.Sources, error) }
type ShardStatistics ¶ added in v1.0.0
type ShardStatistics struct { WriteReq int64 WriteReqOK int64 WriteReqErr int64 FieldsCreated int64 WritePointsErr int64 WritePointsDropped int64 WritePointsOK int64 WriteValuesOK int64 BytesWritten int64 DiskBytes int64 }
ShardStatistics maintains statistics for a shard.
type Shards ¶ added in v0.11.0
type Shards []*Shard
Shards represents a sortable list of shards.
func (Shards) CreateIterator ¶ added in v1.2.0
func (a Shards) CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error)
func (Shards) CreateSeriesCursor ¶ added in v1.6.0
func (a Shards) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (_ SeriesCursor, err error)
func (Shards) ExpandSources ¶ added in v1.2.0
func (Shards) FieldDimensions ¶ added in v1.2.0
func (Shards) FieldKeysByMeasurement ¶ added in v1.6.1
FieldKeysByMeasurement returns a de-duplicated, sorted, set of field keys for the provided measurement name.
func (Shards) FieldKeysByPredicate ¶ added in v1.8.3
FieldKeysByPredicate returns the field keys for series that match the given predicate.
func (Shards) IteratorCost ¶ added in v1.4.0
func (a Shards) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error)
func (Shards) MeasurementNamesByPredicate ¶ added in v1.8.3
MeasurementNamesByPredicate returns the measurements that match the given predicate.
func (Shards) MeasurementsByRegex ¶ added in v1.2.0
MeasurementsByRegex returns the unique set of measurements matching the provided regex, for all the shards.
type StatsTracker ¶ added in v1.9.0
type StatsTracker struct { AddedPoints func(points, values int64) AddedMeasurementPoints func(measurement []byte, points, values int64) AddedSeries func(newSeries int64) AddedMeasurementSeries func(measurement []byte, newSeries int64) }
func NoopStatsTracker ¶ added in v1.9.0
func NoopStatsTracker() StatsTracker
type Store ¶
type Store struct { SeriesFileMaxSize int64 // Determines size of series file mmap. Can be altered in tests. EngineOptions EngineOptions Logger *zap.Logger // contains filtered or unexported fields }
Store manages shards and indexes for databases.
func NewStore ¶
NewStore returns a new store with the given path and a default configuration. The returned store must be initialized by calling Open before using it.
func (*Store) BackupShard ¶ added in v0.10.0
BackupShard will get the shard and have the engine backup since the passed in time to the writer.
func (*Store) Close ¶
Close closes the store and all associated shards. After calling Close accessing shards through the Store will result in ErrStoreClosed being returned.
func (*Store) CreateShard ¶
CreateShard creates a shard with the given id and retention policy on a database.
func (*Store) CreateShardSnapshot ¶ added in v1.0.0
CreateShardSnapShot will create a hard link to the underlying shard and return a path. The caller is responsible for cleaning up (removing) the file path returned.
func (*Store) Databases ¶ added in v0.9.4
Databases returns the names of all databases managed by the store.
func (*Store) DeleteDatabase ¶
DeleteDatabase will close all shards associated with a database and remove the directory and files from disk.
func (*Store) DeleteMeasurement ¶ added in v0.11.0
DeleteMeasurement removes a measurement and all associated series from a database.
func (*Store) DeleteRetentionPolicy ¶ added in v0.11.0
DeleteRetentionPolicy will close all shards associated with the provided retention policy, remove the retention policy directories on both the DB and WAL, and remove all shard files from disk.
func (*Store) DeleteSeries ¶ added in v0.11.0
func (s *Store) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error
DeleteSeries loops through the local shards and deletes the series data for the passed in series keys.
func (*Store) DeleteShard ¶
DeleteShard removes a shard from disk.
func (*Store) DiskSize ¶ added in v0.9.4
DiskSize returns the size of all the shard files in bytes. This size does not include the WAL size.
func (*Store) ExpandSources ¶ added in v0.11.0
ExpandSources expands sources against all local shards.
func (*Store) ExportShard ¶ added in v1.5.0
func (*Store) ImportShard ¶ added in v1.3.0
ImportShard imports the contents of r to a given shard. All files in the backup are added as new files which may cause duplicated data to occur requiring more expensive compactions.
func (*Store) IndexBytes ¶ added in v1.6.0
func (*Store) MeasurementNames ¶ added in v1.3.0
func (s *Store) MeasurementNames(ctx context.Context, auth query.FineAuthorizer, database string, retentionPolicy string, cond influxql.Expr) ([][]byte, error)
MeasurementNames returns a slice of all measurements. Measurements accepts an optional condition expression. If cond is nil, then all measurements for the database will be returned. retentionPolicy is only valid for tsi databases, inmem will error.
func (*Store) MeasurementSeriesCounts ¶ added in v1.3.0
MeasurementSeriesCounts returns the number of measurements and series in all the shards' indices.
func (*Store) MeasurementsCardinality ¶ added in v1.3.0
MeasurementsCardinality returns an estimation of the measurement cardinality for the provided database.
Cardinality is calculated using a sketch-based estimation. The result of this method cannot be combined with any other results.
func (*Store) MeasurementsSketches ¶ added in v1.5.0
func (s *Store) MeasurementsSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error)
MeasurementsSketches returns the sketches associated with the measurement data in all the shards in the provided database.
The returned sketches can be combined with other sketches to provide an estimation across distributed databases.
func (*Store) Open ¶
Open initializes the store, creating all necessary directories, loading all shards as well as initializing periodic maintenance of them.
func (*Store) RestoreShard ¶ added in v1.0.0
RestoreShard restores a backup from r to a given shard. This will only overwrite files included in the backup.
func (*Store) SeriesCardinality ¶ added in v1.3.0
SeriesCardinality returns the exact series cardinality for the provided database.
Cardinality is calculated exactly by unioning all shards' bitsets of series IDs. The result of this method cannot be combined with any other results.
func (*Store) SeriesSketches ¶ added in v1.5.0
func (s *Store) SeriesSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error)
SeriesSketches returns the sketches associated with the series data in all the shards in the provided database.
The returned sketches can be combined with other sketches to provide an estimation across distributed databases.
func (*Store) SetShardEnabled ¶ added in v1.0.0
SetShardEnabled enables or disables a shard for read and writes.
func (*Store) SetShardNewReadersBlocked ¶ added in v1.11.6
SetShardNewReadersBlocked sets if new readers can access the shard. If blocked is true, the number of reader blocks is incremented and new readers will receive an error instead of shard access. If blocked is false, the number of reader blocks is decremented. If the reader blocks drops to 0, then new readers will be granted access to the shard.
func (*Store) SetShardOpenErrorForTest ¶ added in v1.10.0
func (*Store) ShardDigest ¶ added in v1.5.0
ShardDigest returns a digest of the shard with the specified ID.
func (*Store) ShardGroup ¶ added in v1.2.0
func (s *Store) ShardGroup(ids []uint64) ShardGroup
ShardGroup returns a ShardGroup with a list of shards by id.
func (*Store) ShardInUse ¶ added in v1.11.6
ShardInUse returns true if a shard is in-use (e.g. has active readers). SetShardNewReadersBlocked(id, true) should be called before checking ShardInUse to prevent race conditions where a reader could gain access to the shard immediately after ShardInUse is called.
func (*Store) ShardRelativePath ¶ added in v0.10.0
ShardRelativePath will return the relative path to the shard, i.e., <database>/<retention>/<id>.
func (*Store) Statistics ¶ added in v1.0.0
Statistics returns statistics for period monitoring.
func (*Store) TagKeys ¶ added in v1.4.0
func (s *Store) TagKeys(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]TagKeys, error)
TagKeys returns the tag keys in the given database, matching the condition.
func (*Store) TagValues ¶ added in v1.0.0
func (s *Store) TagValues(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]TagValues, error)
TagValues returns the tag keys and values for the provided shards, where the tag values satisfy the provided condition.
func (*Store) WithLogger ¶ added in v1.2.0
WithLogger sets the logger for the store.
func (*Store) WriteToShard ¶
WriteToShard writes a list of points to a shard identified by its ID.
type StoreStatistics ¶ added in v1.9.0
type StringArray ¶ added in v1.7.0
type StringArray = cursors.StringArray
func NewStringArrayLen ¶ added in v1.7.0
func NewStringArrayLen(sz int) *StringArray
type StringArrayCursor ¶ added in v1.7.0
type StringArrayCursor = cursors.StringArrayCursor
type TagKeyIterator ¶ added in v1.5.0
TagKeyIterator represents a iterator over a list of tag keys.
func MergeTagKeyIterators ¶ added in v1.5.0
func MergeTagKeyIterators(itrs ...TagKeyIterator) TagKeyIterator
MergeTagKeyIterators returns an iterator that merges a set of iterators.
type TagKeyIterators ¶ added in v1.5.0
type TagKeyIterators []TagKeyIterator
func (TagKeyIterators) Close ¶ added in v1.5.0
func (a TagKeyIterators) Close() (err error)
type TagKeysSlice ¶ added in v1.4.0
type TagKeysSlice []TagKeys
func (TagKeysSlice) Len ¶ added in v1.4.0
func (a TagKeysSlice) Len() int
func (TagKeysSlice) Less ¶ added in v1.4.0
func (a TagKeysSlice) Less(i, j int) bool
func (TagKeysSlice) Swap ¶ added in v1.4.0
func (a TagKeysSlice) Swap(i, j int)
type TagValueIterator ¶ added in v1.5.0
TagValueIterator represents a iterator over a list of tag values.
func MergeTagValueIterators ¶ added in v1.5.0
func MergeTagValueIterators(itrs ...TagValueIterator) TagValueIterator
MergeTagValueIterators returns an iterator that merges a set of iterators.
type TagValueIterators ¶ added in v1.5.0
type TagValueIterators []TagValueIterator
func (TagValueIterators) Close ¶ added in v1.5.0
func (a TagValueIterators) Close() (err error)
type TagValuesSlice ¶ added in v1.3.0
type TagValuesSlice []TagValues
func (TagValuesSlice) Len ¶ added in v1.3.0
func (a TagValuesSlice) Len() int
func (TagValuesSlice) Less ¶ added in v1.3.0
func (a TagValuesSlice) Less(i, j int) bool
func (TagValuesSlice) Swap ¶ added in v1.3.0
func (a TagValuesSlice) Swap(i, j int)
type UnsignedArray ¶ added in v1.7.0
type UnsignedArray = cursors.UnsignedArray
func NewUnsignedArrayLen ¶ added in v1.7.0
func NewUnsignedArrayLen(sz int) *UnsignedArray
type UnsignedArrayCursor ¶ added in v1.7.0
type UnsignedArrayCursor = cursors.UnsignedArrayCursor
type WriteContext ¶ added in v1.9.0
type WriteContext struct { // Could be a system UserId, e.g. HintedHandoffUser UserId string }
WriteContext holds some request-scoped details about the write, for metrics and logging. Currently just the UserId login name. Eventually it could also hold a context.Context for cancellation.
Source Files ¶
Directories ¶
Path | Synopsis |
---|---|
mock
Package mock is a generated GoMock package.
|
Package mock is a generated GoMock package. |
Package engine can be imported to initialize and register all available TSDB engines.
|
Package engine can be imported to initialize and register all available TSDB engines. |
tsm1
Package tsm1 provides a TSDB in the Time Structured Merge tree format.
|
Package tsm1 provides a TSDB in the Time Structured Merge tree format. |
inmem
Package inmem implements a shared, in-memory index for each database.
|
Package inmem implements a shared, in-memory index for each database. |
tsi1
Package tsi1 provides a memory-mapped index implementation that supports high cardinality series.
|
Package tsi1 provides a memory-mapped index implementation that supports high cardinality series. |