Documentation ¶
Index ¶
- Constants
- Variables
- func BlockCount(block []byte) int
- func BlockType(block []byte) (byte, error)
- func CountTimestamps(b []byte) int
- func NewEngine(path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine
- func NewMultiFieldCursor(fields []string, cursors []tsdb.Cursor, ascending bool) tsdb.Cursor
- func ParseTSMFileName(name string) (int, int, error)
- func SeriesFieldKey(seriesKey, field string) string
- func ZigZagDecode(v uint64) int64
- func ZigZagEncode(x int64) uint64
- type BlockIterator
- type BoolDecoder
- type BoolEncoder
- type BoolValue
- type Cache
- func (c *Cache) ClearSnapshot(snapshot *Cache)
- func (c *Cache) Deduplicate()
- func (c *Cache) Delete(keys []string)
- func (c *Cache) Keys() []string
- func (c *Cache) Lock()
- func (c *Cache) MaxSize() uint64
- func (c *Cache) Size() uint64
- func (c *Cache) Snapshot() *Cache
- func (c *Cache) Store() map[string]*entry
- func (c *Cache) Unlock()
- func (c *Cache) Values(key string) Values
- func (c *Cache) Write(key string, values []Value) error
- func (c *Cache) WriteMulti(values map[string][]Value) error
- type CacheLoader
- type CompactionGroup
- type CompactionPlanner
- type Compactor
- type DefaultPlanner
- type DeleteWALEntry
- type EmptyValue
- type Engine
- func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error
- func (e *Engine) Begin(writable bool) (tsdb.Tx, error)
- func (e *Engine) Close() error
- func (e *Engine) DeleteMeasurement(name string, seriesKeys []string) error
- func (e *Engine) DeleteSeries(seriesKeys []string) error
- func (e *Engine) Format() tsdb.EngineFormat
- func (e *Engine) KeyCursor(key string) *KeyCursor
- func (e *Engine) LoadMetadataIndex(_ *tsdb.Shard, index *tsdb.DatabaseIndex, ...) error
- func (e *Engine) Open() error
- func (e *Engine) Path() string
- func (e *Engine) PerformMaintenance()
- func (e *Engine) SeriesCount() (n int, err error)
- func (e *Engine) SetLogOutput(w io.Writer)
- func (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool
- func (e *Engine) WritePoints(points []models.Point, ...) error
- func (e *Engine) WriteSnapshot() error
- func (e *Engine) WriteTo(w io.Writer) (n int64, err error)
- type FileStat
- type FileStore
- func (f *FileStore) Add(files ...TSMFile)
- func (f *FileStore) BlockCount(path string, idx int) int
- func (f *FileStore) Close() error
- func (f *FileStore) Count() int
- func (f *FileStore) CurrentGeneration() int
- func (f *FileStore) Delete(keys []string) error
- func (f *FileStore) KeyCursor(key string) *KeyCursor
- func (f *FileStore) Keys() []string
- func (f *FileStore) LastModified() time.Time
- func (f *FileStore) NextGeneration() int
- func (f *FileStore) Open() error
- func (f *FileStore) Read(key string, t time.Time) ([]Value, error)
- func (f *FileStore) Remove(paths ...string)
- func (f *FileStore) Replace(oldFiles, newFiles []string) error
- func (f *FileStore) Stats() []FileStat
- func (f *FileStore) Type(key string) (byte, error)
- type FloatDecoder
- type FloatEncoder
- type FloatValue
- type IndexEntry
- type Int64Decoder
- type Int64Encoder
- type Int64Value
- type KeyCursor
- type KeyIterator
- type SegmentInfo
- type StringDecoder
- type StringEncoder
- type StringValue
- type TSMFile
- type TSMIndex
- type TSMReader
- func (t *TSMReader) BlockIterator() *BlockIterator
- func (t *TSMReader) Close() error
- func (t *TSMReader) Contains(key string) bool
- func (t *TSMReader) ContainsValue(key string, ts time.Time) bool
- func (t *TSMReader) Delete(keys []string) error
- func (t *TSMReader) Entries(key string) []*IndexEntry
- func (t *TSMReader) HasTombstones() bool
- func (t *TSMReader) IndexSize() uint32
- func (t *TSMReader) Key(index int) (string, []*IndexEntry)
- func (t *TSMReader) KeyAt(idx int) string
- func (t *TSMReader) KeyCount() int
- func (t *TSMReader) KeyRange() (string, string)
- func (t *TSMReader) Keys() []string
- func (t *TSMReader) LastModified() time.Time
- func (t *TSMReader) Path() string
- func (t *TSMReader) Read(key string, timestamp time.Time) ([]Value, error)
- func (t *TSMReader) ReadAll(key string) ([]Value, error)
- func (t *TSMReader) ReadAt(entry *IndexEntry, vals []Value) ([]Value, error)
- func (t *TSMReader) Remove() error
- func (t *TSMReader) Size() uint32
- func (t *TSMReader) Stats() FileStat
- func (t *TSMReader) TimeRange() (time.Time, time.Time)
- func (t *TSMReader) TombstoneFiles() []FileStat
- func (t *TSMReader) Type(key string) (byte, error)
- type TSMReaderOptions
- type TSMWriter
- type TimeDecoder
- type TimeEncoder
- type Tombstoner
- type Value
- type Values
- func (a Values) Deduplicate() Values
- func (a Values) Encode(buf []byte) ([]byte, error)
- func (a Values) InfluxQLType() (influxql.DataType, error)
- func (a Values) Len() int
- func (a Values) Less(i, j int) bool
- func (a Values) MaxTime() int64
- func (a Values) MinTime() int64
- func (a Values) Size() int
- func (a Values) Swap(i, j int)
- type WAL
- func (l *WAL) Close() error
- func (l *WAL) CloseSegment() error
- func (l *WAL) ClosedSegments() ([]string, error)
- func (l *WAL) Delete(keys []string) (int, error)
- func (l *WAL) LastWriteTime() time.Time
- func (l *WAL) Open() error
- func (l *WAL) Path() string
- func (l *WAL) Remove(files []string) error
- func (l *WAL) WritePoints(values map[string][]Value) (int, error)
- type WALEntry
- type WALSegmentReader
- type WALSegmentWriter
- type WalEntryType
- type WriteWALEntry
Constants ¶
const ( CompactionTempExtension = "tmp" TSMFileExtension = "tsm" )
const ( // BlockFloat64 designates a block encodes float64 values BlockFloat64 = byte(0) // BlockInt64 designates a block encodes int64 values BlockInt64 = byte(1) // BlockBool designates a block encodes bool values BlockBool = byte(2) // BlockString designates a block encodes string values BlockString = byte(3) )
const ( // DefaultSegmentSize of 10MB is the size at which segment files will be rolled over DefaultSegmentSize = 10 * 1024 * 1024 // FileExtension is the file extension we expect for wal segments WALFileExtension = "wal" WALFilePrefix = "_" )
const ( // MagicNumber is written as the first 4 bytes of a data file to // identify the file as a tsm1 formatted file MagicNumber uint32 = 0x16D116D1 Version byte = 1 )
Variables ¶
var ( ErrNoValues = fmt.Errorf("no values written") ErrTSMClosed = fmt.Errorf("tsm file closed") )
var ErrCacheInvalidCheckpoint = fmt.Errorf("invalid checkpoint")
var ErrCacheMemoryExceeded = fmt.Errorf("cache maximum memory size exceeded")
var ErrWALClosed = fmt.Errorf("WAL closed")
Functions ¶
func BlockCount ¶ added in v0.10.0
func BlockType ¶ added in v0.9.6
BlockType returns the type of value encoded in a block or an error if the block type is unknown.
func CountTimestamps ¶ added in v0.10.0
func NewMultiFieldCursor ¶
NewMultiFieldCursor returns an instance of Cursor that joins the results of cursors.
func ParseTSMFileName ¶ added in v0.9.6
ParseTSMFileName parses the generation and sequence from a TSM file name.
func SeriesFieldKey ¶
SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID
func ZigZagDecode ¶
ZigZagDecode converts a previously zigzag encoded uint64 back to a int64
func ZigZagEncode ¶
ZigZagEncode converts a int64 to a uint64 by zig zagging negative and positive values across even and odd numbers. Eg. [0,-1,1,-2] becomes [0, 1, 2, 3]
Types ¶
type BlockIterator ¶ added in v0.10.0
type BlockIterator struct {
// contains filtered or unexported fields
}
BlockIterator allows iterating over each block in a TSM file in order. It provides raw access to the block bytes without decoding them.
func (*BlockIterator) Next ¶ added in v0.10.0
func (b *BlockIterator) Next() bool
func (*BlockIterator) PeekNext ¶ added in v0.10.0
func (b *BlockIterator) PeekNext() string
type BoolDecoder ¶
BoolDecoder decodes a series of bools from an in-memory buffer.
func NewBoolDecoder ¶
func NewBoolDecoder(b []byte) BoolDecoder
NewBoolDecoder returns a new instance of BoolDecoder.
type BoolEncoder ¶
BoolEncoder encodes a series of bools to an in-memory buffer.
func NewBoolEncoder ¶
func NewBoolEncoder() BoolEncoder
NewBoolEncoder returns a new instance of BoolEncoder.
type BoolValue ¶
type BoolValue struct {
// contains filtered or unexported fields
}
func DecodeBoolBlock ¶ added in v0.9.6
type Cache ¶ added in v0.9.6
type Cache struct {
// contains filtered or unexported fields
}
Cache maintains an in-memory store of Values for a set of keys.
func NewCache ¶ added in v0.9.6
NewCache returns an instance of a cache which will use a maximum of maxSize bytes of memory.
func (*Cache) ClearSnapshot ¶ added in v0.9.6
ClearSnapshot will remove the snapshot cache from the list of flushing caches and adjust the size
func (*Cache) Deduplicate ¶ added in v0.10.0
func (c *Cache) Deduplicate()
Deduplicate sorts the snapshot before returning it. The compactor and any queries coming in while it writes will need the values sorted
func (*Cache) Keys ¶ added in v0.9.6
Keys returns a sorted slice of all keys under management by the cache.
func (*Cache) MaxSize ¶ added in v0.9.6
MaxSize returns the maximum number of bytes the cache may consume.
func (*Cache) Size ¶ added in v0.9.6
Size returns the number of point-calcuated bytes the cache currently uses.
func (*Cache) Snapshot ¶ added in v0.9.6
Snapshot will take a snapshot of the current cache, add it to the slice of caches that are being flushed, and reset the current cache with new values
func (*Cache) Store ¶ added in v0.9.6
Store returns the underlying cache store. This is not goroutine safe! Protect access by using the Lock and Unlock functions on Cache.
func (*Cache) Values ¶ added in v0.9.6
Values returns a copy of all values, deduped and sorted, for the given key.
type CacheLoader ¶ added in v0.9.6
CacheLoader processes a set of WAL segment files, and loads a cache with the data contained within those files. Processing of the supplied files take place in the order they exist in the files slice.
func NewCacheLoader ¶ added in v0.9.6
func NewCacheLoader(files []string) *CacheLoader
NewCacheLoader returns a new instance of a CacheLoader.
func (*CacheLoader) Load ¶ added in v0.9.6
func (cl *CacheLoader) Load(cache *Cache) error
Load returns a cache loaded with the data contained within the segment files. If, during reading of a segment file, corruption is encountered, that segment file is truncated up to and including the last valid byte, and processing continues with the next segment file.
type CompactionGroup ¶ added in v0.10.0
type CompactionGroup []string
type CompactionPlanner ¶ added in v0.9.6
type CompactionPlanner interface { Plan(lastWrite time.Time) []CompactionGroup PlanLevel(level int) []CompactionGroup }
CompactionPlanner determines what TSM files and WAL segments to include in a given compaction run.
type Compactor ¶ added in v0.9.6
type Compactor struct { Dir string Cancel chan struct{} Size int FileStore interface { NextGeneration() int } }
Compactor merges multiple TSM files into new files or writes a Cache into 1 or more TSM files
func (*Compactor) Clone ¶ added in v0.9.6
Clone will return a new compactor that can be used even if the engine is closed
func (*Compactor) CompactFast ¶ added in v0.10.0
Compact will write multiple smaller TSM files into 1 or more larger files
func (*Compactor) CompactFull ¶ added in v0.10.0
Compact will write multiple smaller TSM files into 1 or more larger files
type DefaultPlanner ¶ added in v0.9.6
type DefaultPlanner struct { FileStore interface { Stats() []FileStat LastModified() time.Time BlockCount(path string, idx int) int } // CompactFullWriteColdDuration specifies the length of time after // which if no writes have been committed to the WAL, the engine will // do a full compaction of the TSM files in this shard. This duration // should always be greater than the CacheFlushWriteColdDuraion CompactFullWriteColdDuration time.Duration // contains filtered or unexported fields }
DefaultPlanner implements CompactionPlanner using a strategy to roll up multiple generations of TSM files into larger files in stages. It attempts to minimize the number of TSM files on disk while rolling up a bounder number of files.
func (*DefaultPlanner) Plan ¶ added in v0.9.6
func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup
Plan returns a set of TSM files to rewrite for level 4 or higher. The planning returns multiple groups if possible to allow compactions to run concurrently.
func (*DefaultPlanner) PlanLevel ¶ added in v0.10.0
func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup
PlanLevel returns a set of TSM files to rewrite for a specific level
type DeleteWALEntry ¶ added in v0.9.6
type DeleteWALEntry struct {
Keys []string
}
DeleteWALEntry represents the deletion of multiple series.
func (*DeleteWALEntry) Encode ¶ added in v0.9.6
func (w *DeleteWALEntry) Encode(dst []byte) ([]byte, error)
func (*DeleteWALEntry) MarshalBinary ¶ added in v0.9.6
func (w *DeleteWALEntry) MarshalBinary() ([]byte, error)
func (*DeleteWALEntry) Type ¶ added in v0.9.6
func (w *DeleteWALEntry) Type() WalEntryType
func (*DeleteWALEntry) UnmarshalBinary ¶ added in v0.9.6
func (w *DeleteWALEntry) UnmarshalBinary(b []byte) error
type EmptyValue ¶
type EmptyValue struct { }
func (*EmptyValue) Size ¶
func (e *EmptyValue) Size() int
func (*EmptyValue) String ¶ added in v0.9.6
func (e *EmptyValue) String() string
func (*EmptyValue) Time ¶
func (e *EmptyValue) Time() time.Time
func (*EmptyValue) UnixNano ¶
func (e *EmptyValue) UnixNano() int64
func (*EmptyValue) Value ¶
func (e *EmptyValue) Value() interface{}
type Engine ¶
type Engine struct { WAL *WAL Cache *Cache Compactor *Compactor CompactionPlan CompactionPlanner FileStore *FileStore MaxPointsPerBlock int // CacheFlushMemorySizeThreshold specifies the minimum size threshodl for // the cache when the engine should write a snapshot to a TSM file CacheFlushMemorySizeThreshold uint64 // CacheFlushWriteColdDuration specifies the length of time after which if // no writes have been committed to the WAL, the engine will write // a snapshot of the cache to a TSM file CacheFlushWriteColdDuration time.Duration // contains filtered or unexported fields }
Engine represents a storage engine with compressed blocks.
func (*Engine) Backup ¶ added in v0.10.0
Backup will write a tar archive of any TSM files modified since the passed in time to the passed in writer. The basePath will be prepended to the names of the files in the archive. It will force a snapshot of the WAL first then perform the backup with a read lock against the file store. This means that new TSM files will not be able to be created in this shard while the backup is running. For shards that are still acively getting writes, this could cause the WAL to backup, increasing memory usage and evenutally rejecting writes.
func (*Engine) DeleteMeasurement ¶
DeleteMeasurement deletes a measurement and all related series.
func (*Engine) DeleteSeries ¶
DeleteSeries deletes the series from the engine.
func (*Engine) Format ¶
func (e *Engine) Format() tsdb.EngineFormat
Format returns the format type of this engine
func (*Engine) LoadMetadataIndex ¶
func (e *Engine) LoadMetadataIndex(_ *tsdb.Shard, index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error
LoadMetadataIndex loads the shard metadata into memory.
func (*Engine) PerformMaintenance ¶
func (e *Engine) PerformMaintenance()
PerformMaintenance is for periodic maintenance of the store. A no-op for b1
func (*Engine) SeriesCount ¶
SeriesCount returns the number of series buckets on the shard.
func (*Engine) ShouldCompactCache ¶ added in v0.10.0
ShouldCompactCache returns true if the Cache is over its flush threshold or if the passed in lastWriteTime is older than the write cold threshold
func (*Engine) WritePoints ¶
func (e *Engine) WritePoints(points []models.Point, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error
WritePoints writes metadata and point data into the engine. Returns an error if new points are added to an existing key.
func (*Engine) WriteSnapshot ¶ added in v0.10.0
WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done.
type FileStat ¶ added in v0.9.6
type FileStat struct { Path string HasTombstone bool Size uint32 LastModified time.Time MinTime, MaxTime time.Time MinKey, MaxKey string }
func (FileStat) ContainsKey ¶ added in v0.9.6
func (FileStat) OverlapsKeyRange ¶ added in v0.9.6
type FileStore ¶ added in v0.9.6
func NewFileStore ¶ added in v0.9.6
func (*FileStore) BlockCount ¶ added in v0.10.0
BlockCount returns number of values stored in the block at location idx in the file at path. If path does not match any file in the store, 0 is returned. If idx is out of range for the number of blocks in the file, 0 is returned.
func (*FileStore) CurrentGeneration ¶ added in v0.9.6
CurrentGeneration returns the current generation of the TSM files
func (*FileStore) LastModified ¶ added in v0.9.6
LastModified returns the last time the file store was updated with new TSM files or a delete
func (*FileStore) NextGeneration ¶ added in v0.9.6
NextGeneration returns the max file ID + 1
func (*FileStore) Remove ¶ added in v0.9.6
Remove removes the files with matching paths from the set of active files. It does not remove the paths from disk.
type FloatDecoder ¶
type FloatDecoder struct {
// contains filtered or unexported fields
}
FloatDecoder decodes a byte slice into multipe float64 values
func NewFloatDecoder ¶
func NewFloatDecoder(b []byte) (*FloatDecoder, error)
func (*FloatDecoder) Error ¶
func (it *FloatDecoder) Error() error
func (*FloatDecoder) Next ¶
func (it *FloatDecoder) Next() bool
func (*FloatDecoder) Values ¶
func (it *FloatDecoder) Values() float64
type FloatEncoder ¶
type FloatEncoder struct {
// contains filtered or unexported fields
}
FloatEncoder encodes multiple float64s into a byte slice
func NewFloatEncoder ¶
func NewFloatEncoder() *FloatEncoder
func (*FloatEncoder) Bytes ¶
func (s *FloatEncoder) Bytes() ([]byte, error)
func (*FloatEncoder) Finish ¶
func (s *FloatEncoder) Finish()
func (*FloatEncoder) Push ¶
func (s *FloatEncoder) Push(v float64)
type FloatValue ¶
type FloatValue struct {
// contains filtered or unexported fields
}
func DecodeFloatBlock ¶ added in v0.9.6
func DecodeFloatBlock(block []byte, a []FloatValue) ([]FloatValue, error)
func (*FloatValue) Size ¶
func (f *FloatValue) Size() int
func (*FloatValue) String ¶ added in v0.9.6
func (f *FloatValue) String() string
func (*FloatValue) Time ¶
func (f *FloatValue) Time() time.Time
func (*FloatValue) UnixNano ¶
func (f *FloatValue) UnixNano() int64
func (*FloatValue) Value ¶
func (f *FloatValue) Value() interface{}
type IndexEntry ¶
type IndexEntry struct {
// The min and max time of all points stored in the block.
MinTime, MaxTime time.Time
// The absolute position in the file where this block is located.
Offset int64
// The size in bytes of the block in the file.
Size uint32
}
IndexEntry is the index information for a given block in a TSM file.
func (*IndexEntry) Contains ¶
func (e *IndexEntry) Contains(t time.Time) bool
Returns true if this IndexEntry may contain values for the given time. The min and max times are inclusive.
func (*IndexEntry) OverlapsTimeRange ¶ added in v0.9.6
func (e *IndexEntry) OverlapsTimeRange(min, max time.Time) bool
func (*IndexEntry) String ¶ added in v0.9.6
func (e *IndexEntry) String() string
func (*IndexEntry) UnmarshalBinary ¶
func (e *IndexEntry) UnmarshalBinary(b []byte) error
type Int64Decoder ¶
Int64Decoder decodes a byte slice into int64s
func NewInt64Decoder ¶
func NewInt64Decoder(b []byte) Int64Decoder
type Int64Encoder ¶
Int64Encoder encoders int64 into byte slices
func NewInt64Encoder ¶
func NewInt64Encoder() Int64Encoder
type Int64Value ¶
type Int64Value struct {
// contains filtered or unexported fields
}
func DecodeInt64Block ¶ added in v0.9.6
func DecodeInt64Block(block []byte, a []Int64Value) ([]Int64Value, error)
func (*Int64Value) Size ¶
func (v *Int64Value) Size() int
func (*Int64Value) String ¶
func (f *Int64Value) String() string
func (*Int64Value) Time ¶
func (v *Int64Value) Time() time.Time
func (*Int64Value) UnixNano ¶
func (v *Int64Value) UnixNano() int64
func (*Int64Value) Value ¶
func (v *Int64Value) Value() interface{}
type KeyCursor ¶ added in v0.9.6
type KeyCursor struct {
// contains filtered or unexported fields
}
type KeyIterator ¶ added in v0.9.6
type KeyIterator interface { Next() bool Read() (string, time.Time, time.Time, []byte, error) Close() error }
KeyIterator allows iteration over set of keys and values in sorted order.
func NewCacheKeyIterator ¶ added in v0.9.6
func NewCacheKeyIterator(cache *Cache, size int) KeyIterator
func NewTSMKeyIterator ¶ added in v0.9.6
func NewTSMKeyIterator(size int, fast bool, readers ...*TSMReader) (KeyIterator, error)
type SegmentInfo ¶ added in v0.9.6
type SegmentInfo struct {
// contains filtered or unexported fields
}
SegmentInfo represents metadata about a segment.
type StringDecoder ¶
func NewStringDecoder ¶
func NewStringDecoder(b []byte) (StringDecoder, error)
type StringEncoder ¶
func NewStringEncoder ¶
func NewStringEncoder() StringEncoder
type StringValue ¶
type StringValue struct {
// contains filtered or unexported fields
}
func DecodeStringBlock ¶ added in v0.9.6
func DecodeStringBlock(block []byte, a []StringValue) ([]StringValue, error)
func (*StringValue) Size ¶
func (v *StringValue) Size() int
func (*StringValue) String ¶
func (f *StringValue) String() string
func (*StringValue) Time ¶
func (v *StringValue) Time() time.Time
func (*StringValue) UnixNano ¶
func (v *StringValue) UnixNano() int64
func (*StringValue) Value ¶
func (v *StringValue) Value() interface{}
type TSMFile ¶ added in v0.9.6
type TSMFile interface { // Path returns the underlying file path for the TSMFile. If the file // has not be written or loaded from disk, the zero value is returne. Path() string // Read returns all the values in the block where time t resides Read(key string, t time.Time) ([]Value, error) // Read returns all the values in the block identified by entry. ReadAt(entry *IndexEntry, values []Value) ([]Value, error) // Entries returns the index entries for all blocks for the given key. Entries(key string) []*IndexEntry // Returns true if the TSMFile may contain a value with the specified // key and time ContainsValue(key string, t time.Time) bool // Contains returns true if the file contains any values for the given // key. Contains(key string) bool // TimeRange returns the min and max time across all keys in the file. TimeRange() (time.Time, time.Time) // KeyRange returns the min and max keys in the file. KeyRange() (string, string) // Keys returns all keys contained in the file. Keys() []string // Type returns the block type of the values stored for the key. Returns one of // BlockFloat64, BlockInt64, BlockBool, BlockString. If key does not exist, // an error is returned. Type(key string) (byte, error) // Delete removes the keys from the set of keys available in this file. Delete(keys []string) error // HasTombstones returns true if file contains values that have been deleted. HasTombstones() bool // TombstoneFiles returns the tombstone filestats if there are any tombstones // written for this file. TombstoneFiles() []FileStat // Close the underlying file resources Close() error // Size returns the size of the file on disk in bytes. Size() uint32 // Remove deletes the file from the filesystem Remove() error // Stats returns summary information about the TSM file. Stats() FileStat // BlockIterator returns an iterator pointing to the first block in the file and // allows sequential iteration to each every block. BlockIterator() *BlockIterator }
type TSMIndex ¶
type TSMIndex interface { // Add records a new block entry for a key in the index. Add(key string, blockType byte, minTime, maxTime time.Time, offset int64, size uint32) // Delete removes the given keys from the index. Delete(keys []string) // Contains return true if the given key exists in the index. Contains(key string) bool // ContainsValue returns true if key and time might exists in this file. This function could // return true even though the actual point does not exists. For example, the key may // exists in this file, but not have point exactly at time t. ContainsValue(key string, timestamp time.Time) bool // Entries returns all index entries for a key. Entries(key string) []*IndexEntry // Entry returns the index entry for the specified key and timestamp. If no entry // matches the key and timestamp, nil is returned. Entry(key string, timestamp time.Time) *IndexEntry // Keys returns the unique set of keys in the index. Keys() []string // Key returns the key in the index at the given postion. Key(index int) (string, []*IndexEntry) // KeyAt returns the key in the index at the given postion. KeyAt(index int) string // KeyCount returns the count of unique keys in the index. KeyCount() int // Size returns the size of a the current index in bytes Size() uint32 // TimeRange returns the min and max time across all keys in the file. TimeRange() (time.Time, time.Time) // KeyRange returns the min and max keys in the file. KeyRange() (string, string) // Type returns the block type of the values stored for the key. Returns one of // BlockFloat64, BlockInt64, BlockBool, BlockString. If key does not exist, // an error is returned. Type(key string) (byte, error) // MarshalBinary returns a byte slice encoded version of the index. MarshalBinary() ([]byte, error) // UnmarshalBinary populates an index from an encoded byte slice // representation of an index. UnmarshalBinary(b []byte) error // Write writes the index contents to a writer Write(w io.Writer) error }
TSMIndex represent the index section of a TSM file. The index records all blocks, their locations, sizes, min and max times.
func NewDirectIndex ¶
func NewDirectIndex() TSMIndex
func NewIndirectIndex ¶
func NewIndirectIndex() TSMIndex
type TSMReader ¶ added in v0.9.6
type TSMReader struct {
// contains filtered or unexported fields
}
func NewTSMReader ¶
func NewTSMReader(r io.ReadSeeker) (*TSMReader, error)
func NewTSMReaderWithOptions ¶ added in v0.9.6
func NewTSMReaderWithOptions(opt TSMReaderOptions) (*TSMReader, error)
func (*TSMReader) BlockIterator ¶ added in v0.10.0
func (t *TSMReader) BlockIterator() *BlockIterator
func (*TSMReader) ContainsValue ¶ added in v0.9.6
ContainsValue returns true if key and time might exists in this file. This function could return true even though the actual point does not exists. For example, the key may exists in this file, but not have point exactly at time t.
func (*TSMReader) Entries ¶ added in v0.9.6
func (t *TSMReader) Entries(key string) []*IndexEntry
func (*TSMReader) HasTombstones ¶ added in v0.9.6
HasTombstones return true if there are any tombstone entries recorded.
func (*TSMReader) KeyRange ¶ added in v0.9.6
KeyRange returns the min and max key across all keys in the file.
func (*TSMReader) LastModified ¶ added in v0.9.6
func (*TSMReader) ReadAt ¶ added in v0.9.6
func (t *TSMReader) ReadAt(entry *IndexEntry, vals []Value) ([]Value, error)
func (*TSMReader) Remove ¶ added in v0.9.6
Remove removes any underlying files stored on disk for this reader.
func (*TSMReader) TimeRange ¶ added in v0.9.6
TimeRange returns the min and max time across all keys in the file.
func (*TSMReader) TombstoneFiles ¶ added in v0.10.0
TombstoneFiles returns any tombstone files associated with this TSM file.
type TSMReaderOptions ¶ added in v0.9.6
type TSMReaderOptions struct { // Reader is used to create file IO based reader. Reader io.ReadSeeker // MMAPFile is used to create an MMAP based reader. MMAPFile *os.File }
type TSMWriter ¶
type TSMWriter interface { // Write writes a new block for key containing and values. Writes append // blocks in the order that the Write function is called. The caller is // responsible for ensuring keys and blocks or sorted appropriately. // Values are encoded as a full block. The caller is responsible for // ensuring a fixed number of values are encoded in each block as wells as // ensuring the Values are sorted. The first and last timestamp values are // used as the minimum and maximum values for the index entry. Write(key string, values Values) error // WriteBlock writes a new block for key containing the bytes in block. WriteBlock appends // blocks in the order that the WriteBlock function is called. The caller is // responsible for ensuring keys and blocks are sorted appropriately, and that the // block and index information is correct for the block. The minTime and maxTime // timestamp values are used as the minimum and maximum values for the index entry. WriteBlock(key string, minTime, maxTime time.Time, block []byte) error // WriteIndex finishes the TSM write streams and writes the index. WriteIndex() error // Closes any underlying file resources. Close() error // Size returns the current size in bytes of the file Size() uint32 }
TSMWriter writes TSM formatted key and values.
type TimeDecoder ¶
TimeDecoder decodes byte slices to time.Time values.
func NewTimeDecoder ¶
func NewTimeDecoder(b []byte) TimeDecoder
type TimeEncoder ¶
TimeEncoder encodes time.Time to byte slices.
type Tombstoner ¶ added in v0.9.6
type Tombstoner struct { // Path is the location of the file to record tombstone. This should be the // full path to a TSM file. Path string // contains filtered or unexported fields }
func (*Tombstoner) Add ¶ added in v0.9.6
func (t *Tombstoner) Add(keys []string) error
func (*Tombstoner) Delete ¶ added in v0.9.6
func (t *Tombstoner) Delete() error
func (*Tombstoner) HasTombstones ¶ added in v0.9.6
func (t *Tombstoner) HasTombstones() bool
HasTombstones return true if there are any tombstone entries recorded.
func (*Tombstoner) ReadAll ¶ added in v0.9.6
func (t *Tombstoner) ReadAll() ([]string, error)
func (*Tombstoner) TombstoneFiles ¶ added in v0.10.0
func (t *Tombstoner) TombstoneFiles() []FileStat
TombstoneFiles returns any tombstone files associated with this TSM file.
type Value ¶
type Value interface { Time() time.Time UnixNano() int64 Value() interface{} Size() int String() string }
func DecodeBlock ¶
DecodeBlock takes a byte array and will decode into values of the appropriate type based on the block.
type Values ¶
type Values []Value
Values represented a time ascending sorted collection of Value types. the underlying type should be the same across all values, but the interface makes the code cleaner.
func (Values) Deduplicate ¶
Deduplicate returns a new Values slice with any values that have the same timestamp removed. The Value that appears last in the slice is the one that is kept.
func (Values) Encode ¶
Encode converts the values to a byte slice. If there are no values, this function panics.
func (Values) InfluxQLType ¶ added in v0.9.6
InfluxQLType returns the influxql.DataType the values map to.
type WAL ¶ added in v0.9.6
type WAL struct { // WALOutput is the writer used by the logger. LogOutput io.Writer // SegmentSize is the file size at which a segment file will be rotated SegmentSize int // LoggingEnabled specifies if detailed logs should be output LoggingEnabled bool // contains filtered or unexported fields }
func (*WAL) Close ¶ added in v0.9.6
Close will finish any flush that is currently in process and close file handles
func (*WAL) CloseSegment ¶ added in v0.9.6
CloseSegment closes the current segment if it is non-empty and opens a new one.
func (*WAL) ClosedSegments ¶ added in v0.9.6
func (*WAL) Delete ¶ added in v0.9.6
Delete deletes the given keys, returning the segment ID for the operation.
func (*WAL) LastWriteTime ¶ added in v0.9.6
LastWriteTime is the last time anything was written to the WAL
func (*WAL) Open ¶ added in v0.9.6
Open opens and initializes the Log. Will recover from previous unclosed shutdowns
type WALEntry ¶ added in v0.9.6
type WALEntry interface { Type() WalEntryType Encode(dst []byte) ([]byte, error) MarshalBinary() ([]byte, error) UnmarshalBinary(b []byte) error }
WALEntry is record stored in each WAL segment. Each entry has a type and an opaque, type dependent byte slice data attribute.
type WALSegmentReader ¶ added in v0.9.6
type WALSegmentReader struct {
// contains filtered or unexported fields
}
WALSegmentReader reads WAL segments.
func NewWALSegmentReader ¶ added in v0.9.6
func NewWALSegmentReader(r io.ReadCloser) *WALSegmentReader
func (*WALSegmentReader) Close ¶ added in v0.9.6
func (r *WALSegmentReader) Close() error
func (*WALSegmentReader) Count ¶ added in v0.9.6
func (r *WALSegmentReader) Count() int64
Count returns the total number of bytes read successfully from the segment, as of the last call to Read(). The segment is guaranteed to be valid up to and including this number of bytes.
func (*WALSegmentReader) Error ¶ added in v0.9.6
func (r *WALSegmentReader) Error() error
func (*WALSegmentReader) Next ¶ added in v0.9.6
func (r *WALSegmentReader) Next() bool
Next indicates if there is a value to read
func (*WALSegmentReader) Read ¶ added in v0.9.6
func (r *WALSegmentReader) Read() (WALEntry, error)
type WALSegmentWriter ¶ added in v0.9.6
type WALSegmentWriter struct {
// contains filtered or unexported fields
}
WALSegmentWriter writes WAL segments.
func NewWALSegmentWriter ¶ added in v0.9.6
func NewWALSegmentWriter(w io.WriteCloser) *WALSegmentWriter
func (*WALSegmentWriter) Write ¶ added in v0.9.6
func (w *WALSegmentWriter) Write(entryType WalEntryType, compressed []byte) error
type WalEntryType ¶ added in v0.9.6
type WalEntryType byte
WalEntryType is a byte written to a wal segment file that indicates what the following compressed block contains
const ( WriteWALEntryType WalEntryType = 0x01 DeleteWALEntryType WalEntryType = 0x02 )
type WriteWALEntry ¶ added in v0.9.6
WriteWALEntry represents a write of points.
func (*WriteWALEntry) Encode ¶ added in v0.9.6
func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error)
Encode converts the WriteWALEntry into a byte stream using dst if it is large enough. If dst is too small, the slice will be grown to fit the encoded entry.
func (*WriteWALEntry) MarshalBinary ¶ added in v0.9.6
func (w *WriteWALEntry) MarshalBinary() ([]byte, error)
func (*WriteWALEntry) Type ¶ added in v0.9.6
func (w *WriteWALEntry) Type() WalEntryType
func (*WriteWALEntry) UnmarshalBinary ¶ added in v0.9.6
func (w *WriteWALEntry) UnmarshalBinary(b []byte) error