Documentation ¶
Index ¶
- Constants
- Variables
- func NewFacade(c Chunk, blockSize, targetSize int) encoding.Chunk
- func SupportedEncoding() string
- func UncompressedSize(c encoding.Chunk) (int, bool)
- type Block
- type BufioReaderPool
- type Chunk
- type Encoding
- type Facade
- type GzipPool
- type LZ4Pool
- type MemChunk
- func (c *MemChunk) Append(entry *logproto.Entry) error
- func (c *MemChunk) BlockCount() int
- func (c *MemChunk) Blocks(mintT, maxtT time.Time) []Block
- func (c *MemChunk) Bounds() (fromT, toT time.Time)
- func (c *MemChunk) Bytes() ([]byte, error)
- func (c *MemChunk) Close() error
- func (c *MemChunk) CompressedSize() int
- func (c *MemChunk) Encoding() Encoding
- func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, direction logproto.Direction, ...) (iter.EntryIterator, error)
- func (c *MemChunk) SampleIterator(ctx context.Context, mintT, maxtT time.Time, filter logql.LineFilter, ...) iter.SampleIterator
- func (c *MemChunk) Size() int
- func (c *MemChunk) SpaceFor(e *logproto.Entry) bool
- func (c *MemChunk) UncompressedSize() int
- func (c *MemChunk) Utilization() float64
- type NoopPool
- type ReaderPool
- type SnappyPool
- type WriterPool
Constants ¶
const GzipLogChunk = encoding.Encoding(128)
GzipLogChunk is a cortex encoding type for our chunks. Deprecated: the chunk encoding/compression format is inside the chunk data.
const LogChunk = encoding.Encoding(129)
LogChunk is a cortex encoding type for our chunks.
Variables ¶
var ( ErrChunkFull = errors.New("chunk full") ErrOutOfOrder = errors.New("entry out of order") ErrInvalidSize = errors.New("invalid size") ErrInvalidFlag = errors.New("invalid flag") ErrInvalidChecksum = errors.New("invalid chunk checksum") )
Errors returned by the chunk interface.
var ( // Gzip is the gnu zip compression pool Gzip = GzipPool{/* contains filtered or unexported fields */} Lz4_64k = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_64k is the l4z compression pool, with 64k buffer size Lz4_256k = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_256k uses 256k buffer Lz4_1M = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_1M uses 1M buffer Lz4_4M = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_4M uses 4M buffer // Snappy is the snappy compression pool Snappy SnappyPool // Noop is the no compression pool Noop NoopPool // BufReaderPool is bufio.Reader pool BufReaderPool = &BufioReaderPool{ pool: sync.Pool{ New: func() interface{} { return bufio.NewReader(nil) }, }, } // BytesBufferPool is a bytes buffer used for lines decompressed. // Buckets [0.5KB,1KB,2KB,4KB,8KB] BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) )
Functions ¶
func SupportedEncoding ¶ added in v1.3.0
func SupportedEncoding() string
SupportedEncoding returns the list of supported Encoding.
Types ¶
type Block ¶ added in v1.6.0
type Block interface { // MinTime is the minimum time of entries in the block MinTime() int64 // MaxTime is the maximum time of entries in the block MaxTime() int64 // Offset is the offset/position of the block in the chunk. Offset is unique for a given block per chunk. Offset() int // Entries is the amount of entries in the block. Entries() int // Iterator returns an entry iterator for the block. Iterator(context.Context, logql.LineFilter) iter.EntryIterator // SampleIterator returns a sample iterator for the block. SampleIterator(context.Context, logql.LineFilter, logql.SampleExtractor) iter.SampleIterator }
Block is a chunk block.
type BufioReaderPool ¶ added in v0.2.0
type BufioReaderPool struct {
// contains filtered or unexported fields
}
BufioReaderPool is a bufio reader that uses sync.Pool.
func (*BufioReaderPool) Get ¶ added in v0.2.0
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader
Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
func (*BufioReaderPool) Put ¶ added in v0.2.0
func (bufPool *BufioReaderPool) Put(b *bufio.Reader)
Put puts the bufio.Reader back into the pool.
type Chunk ¶
type Chunk interface { Bounds() (time.Time, time.Time) SpaceFor(*logproto.Entry) bool Append(*logproto.Entry) error Iterator(ctx context.Context, from, through time.Time, direction logproto.Direction, filter logql.LineFilter) (iter.EntryIterator, error) SampleIterator(ctx context.Context, from, through time.Time, filter logql.LineFilter, extractor logql.SampleExtractor) iter.SampleIterator // Returns the list of blocks in the chunks. Blocks(mintT, maxtT time.Time) []Block Size() int Bytes() ([]byte, error) BlockCount() int Utilization() float64 UncompressedSize() int CompressedSize() int Close() error }
Chunk is the interface for the compressed logs chunk format.
func NewDumbChunk ¶
func NewDumbChunk() Chunk
NewDumbChunk returns a new chunk that isn't very good.
type Encoding ¶
type Encoding byte
Encoding is the identifier for a chunk encoding.
const ( EncNone Encoding = iota EncGZIP EncDumb EncLZ4_64k EncSnappy // Added for testing. EncLZ4_256k EncLZ4_1M EncLZ4_4M )
The different available encodings. Make sure to preserve the order, as these numeric values are written to the chunks!
func ParseEncoding ¶ added in v1.3.0
ParseEncoding parses an chunk encoding (compression algorithm) by its name.
type Facade ¶
Facade for compatibility with cortex chunk type, so we can use its chunk store.
func (*Facade) UnmarshalFromBuf ¶
UnmarshalFromBuf implements encoding.Chunk.
func (Facade) Utilization ¶ added in v0.4.0
Utilization implements encoding.Chunk.
type GzipPool ¶ added in v0.2.0
type GzipPool struct {
// contains filtered or unexported fields
}
GzipPool is a gun zip compression pool
func (*GzipPool) GetReader ¶ added in v0.2.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*GzipPool) GetWriter ¶ added in v0.2.0
func (pool *GzipPool) GetWriter(dst io.Writer) io.WriteCloser
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*GzipPool) PutWriter ¶ added in v0.2.0
func (pool *GzipPool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
type LZ4Pool ¶ added in v1.3.0
type LZ4Pool struct {
// contains filtered or unexported fields
}
func (*LZ4Pool) GetReader ¶ added in v1.3.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*LZ4Pool) GetWriter ¶ added in v1.3.0
func (pool *LZ4Pool) GetWriter(dst io.Writer) io.WriteCloser
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*LZ4Pool) PutWriter ¶ added in v1.3.0
func (pool *LZ4Pool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
type MemChunk ¶
type MemChunk struct {
// contains filtered or unexported fields
}
MemChunk implements compressed log chunks.
func NewByteChunk ¶
NewByteChunk returns a MemChunk on the passed bytes.
func NewMemChunk ¶
NewMemChunk returns a new in-mem chunk.
func (*MemChunk) BlockCount ¶ added in v1.6.0
BlockCount implements Chunk.
func (*MemChunk) CompressedSize ¶ added in v1.3.0
CompressedSize implements Chunk
func (*MemChunk) Iterator ¶
func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, direction logproto.Direction, filter logql.LineFilter) (iter.EntryIterator, error)
Iterator implements Chunk.
func (*MemChunk) SampleIterator ¶ added in v1.6.0
func (c *MemChunk) SampleIterator(ctx context.Context, mintT, maxtT time.Time, filter logql.LineFilter, extractor logql.SampleExtractor) iter.SampleIterator
Iterator implements Chunk.
func (*MemChunk) UncompressedSize ¶ added in v0.4.0
UncompressedSize implements Chunk.
func (*MemChunk) Utilization ¶ added in v0.4.0
Utilization implements Chunk.
type NoopPool ¶ added in v1.3.0
type NoopPool struct{}
func (*NoopPool) GetReader ¶ added in v1.3.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*NoopPool) GetWriter ¶ added in v1.3.0
func (pool *NoopPool) GetWriter(dst io.Writer) io.WriteCloser
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*NoopPool) PutWriter ¶ added in v1.3.0
func (pool *NoopPool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
type ReaderPool ¶ added in v1.3.0
ReaderPool similar to WriterPool but for reading chunks.
type SnappyPool ¶ added in v1.3.0
type SnappyPool struct {
// contains filtered or unexported fields
}
func (*SnappyPool) GetReader ¶ added in v1.3.0
func (pool *SnappyPool) GetReader(src io.Reader) io.Reader
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*SnappyPool) GetWriter ¶ added in v1.3.0
func (pool *SnappyPool) GetWriter(dst io.Writer) io.WriteCloser
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*SnappyPool) PutReader ¶ added in v1.3.0
func (pool *SnappyPool) PutReader(reader io.Reader)
PutReader places back in the pool a CompressionReader
func (*SnappyPool) PutWriter ¶ added in v1.3.0
func (pool *SnappyPool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
type WriterPool ¶ added in v1.3.0
type WriterPool interface { GetWriter(io.Writer) io.WriteCloser PutWriter(io.WriteCloser) }
WriterPool is a pool of io.Writer This is used by every chunk to avoid unnecessary allocations.