Documentation ¶
Index ¶
- Constants
- Variables
- func NewFacade(c Chunk) encoding.Chunk
- type BufioReaderPool
- type Chunk
- type CompressionPool
- type CompressionReader
- type CompressionWriter
- type Encoding
- type Facade
- type GzipPool
- type LazyChunk
- type MemChunk
- func (c *MemChunk) Append(entry *logproto.Entry) error
- func (c *MemChunk) Bounds() (fromT, toT time.Time)
- func (c *MemChunk) Bytes() ([]byte, error)
- func (c *MemChunk) Close() error
- func (c *MemChunk) Encoding() Encoding
- func (c *MemChunk) Iterator(mintT, maxtT time.Time, direction logproto.Direction, filter logql.Filter) (iter.EntryIterator, error)
- func (c *MemChunk) Size() int
- func (c *MemChunk) SpaceFor(*logproto.Entry) bool
Constants ¶
const GzipLogChunk = encoding.Encoding(128)
GzipLogChunk is a cortex encoding type for our chunks.
Variables ¶
var ( ErrChunkFull = errors.New("chunk full") ErrOutOfOrder = errors.New("entry out of order") ErrInvalidSize = errors.New("invalid size") ErrInvalidFlag = errors.New("invalid flag") ErrInvalidChecksum = errors.New("invalid checksum") )
Errors returned by the chunk interface.
var ( // Gzip is the gun zip compression pool Gzip GzipPool // BufReaderPool is bufio.Reader pool BufReaderPool = &BufioReaderPool{ pool: sync.Pool{ New: func() interface{} { return bufio.NewReader(nil) }, }, } // BytesBufferPool is a bytes buffer used for lines decompressed. // Buckets [0.5KB,1KB,2KB,4KB,8KB] BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) )
Functions ¶
Types ¶
type BufioReaderPool ¶ added in v0.2.0
type BufioReaderPool struct {
// contains filtered or unexported fields
}
BufioReaderPool is a bufio reader that uses sync.Pool.
func (*BufioReaderPool) Get ¶ added in v0.2.0
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader
Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
func (*BufioReaderPool) Put ¶ added in v0.2.0
func (bufPool *BufioReaderPool) Put(b *bufio.Reader)
Put puts the bufio.Reader back into the pool.
type Chunk ¶
type Chunk interface { Bounds() (time.Time, time.Time) SpaceFor(*logproto.Entry) bool Append(*logproto.Entry) error Iterator(from, through time.Time, direction logproto.Direction, filter logql.Filter) (iter.EntryIterator, error) Size() int Bytes() ([]byte, error) }
Chunk is the interface for the compressed logs chunk format.
func NewDumbChunk ¶
func NewDumbChunk() Chunk
NewDumbChunk returns a new chunk that isn't very good.
type CompressionPool ¶ added in v0.2.0
type CompressionPool interface { GetWriter(io.Writer) CompressionWriter PutWriter(CompressionWriter) GetReader(io.Reader) CompressionReader PutReader(CompressionReader) }
CompressionPool is a pool of CompressionWriter and CompressionReader This is used by every chunk to avoid unnecessary allocations.
type CompressionReader ¶
CompressionReader reads the compressed data.
type CompressionWriter ¶
type CompressionWriter interface { Write(p []byte) (int, error) Close() error Flush() error Reset(w io.Writer) }
CompressionWriter is the writer that compresses the data passed to it.
type Facade ¶
Facade for compatibility with cortex chunk type, so we can use its chunk store.
func (*Facade) UnmarshalFromBuf ¶
UnmarshalFromBuf implements encoding.Chunk.
type GzipPool ¶ added in v0.2.0
type GzipPool struct {
// contains filtered or unexported fields
}
GzipPool is a gun zip compression pool
func (*GzipPool) GetReader ¶ added in v0.2.0
func (pool *GzipPool) GetReader(src io.Reader) (reader CompressionReader)
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*GzipPool) GetWriter ¶ added in v0.2.0
func (pool *GzipPool) GetWriter(dst io.Writer) (writer CompressionWriter)
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*GzipPool) PutReader ¶ added in v0.2.0
func (pool *GzipPool) PutReader(reader CompressionReader)
PutReader places back in the pool a CompressionReader
func (*GzipPool) PutWriter ¶ added in v0.2.0
func (pool *GzipPool) PutWriter(writer CompressionWriter)
PutWriter places back in the pool a CompressionWriter
type MemChunk ¶
type MemChunk struct {
// contains filtered or unexported fields
}
MemChunk implements compressed log chunks.
func NewByteChunk ¶
NewByteChunk returns a MemChunk on the passed bytes.
func NewMemChunk ¶
NewMemChunk returns a new in-mem chunk for query.
func NewMemChunkSize ¶
NewMemChunkSize returns a new in-mem chunk. Mainly for config push size.