Documentation ¶
Index ¶
- Constants
- Variables
- func CopyBlock(ctx context.Context, meta *backend.BlockMeta, src backend.Reader, ...) error
- func CreateBlock(ctx context.Context, cfg *common.BlockConfig, meta *backend.BlockMeta, ...) (*backend.BlockMeta, error)
- func NewDataReader(r backend.ContextReader, encoding backend.Encoding) (common.DataReader, error)
- func NewDataWriter(writer io.Writer, encoding backend.Encoding) (common.DataWriter, error)
- func NewDedupingIterator(iter common.Iterator, combiner model.ObjectCombiner, dataEncoding string) (common.Iterator, error)
- func NewIndexReader(r backend.ContextReader, pageSizeBytes int, totalRecords int) (common.IndexReader, error)
- func NewIndexWriter(pageSizeBytes int) common.IndexWriter
- func NewIterator(reader io.Reader, o common.ObjectReaderWriter) common.Iterator
- func NewMultiblockIterator(ctx context.Context, inputs []common.Iterator, bufferSize int, ...) common.Iterator
- func NewObjectReaderWriter() common.ObjectReaderWriter
- func NewPrefetchIterator(ctx context.Context, iter common.Iterator, bufferSize int) common.Iterator
- func NewRecordIterator(r []common.Record, dataR common.DataReader, objectRW common.ObjectReaderWriter) common.Iterator
- func NewRecordReaderWriter() common.RecordReaderWriter
- type Appender
- type BackendBlock
- func (b *BackendBlock) BlockMeta() *backend.BlockMeta
- func (b *BackendBlock) FindTraceByID(ctx context.Context, id common.ID) (*tempopb.Trace, error)
- func (b *BackendBlock) Iterator(chunkSizeBytes uint32) (common.Iterator, error)
- func (b *BackendBlock) NewIndexReader() (common.IndexReader, error)
- func (b *BackendBlock) Search(ctx context.Context, req *tempopb.SearchRequest, opt common.SearchOptions) (resp *tempopb.SearchResponse, err error)
- type BufferedAppenderGeneric
- type Compactor
- type Encoding
- func (v Encoding) CopyBlock(ctx context.Context, meta *backend.BlockMeta, from backend.Reader, ...) error
- func (v Encoding) CreateBlock(ctx context.Context, cfg *common.BlockConfig, meta *backend.BlockMeta, ...) (*backend.BlockMeta, error)
- func (v Encoding) NewCompactor(opts common.CompactionOptions) common.Compactor
- func (v Encoding) OpenBlock(meta *backend.BlockMeta, r backend.Reader) (common.BackendBlock, error)
- func (v Encoding) Version() string
- type GzipPool
- func (pool *GzipPool) Encoding() backend.Encoding
- func (pool *GzipPool) GetReader(src io.Reader) (io.Reader, error)
- func (pool *GzipPool) GetWriter(dst io.Writer) (io.WriteCloser, error)
- func (pool *GzipPool) PutReader(reader io.Reader)
- func (pool *GzipPool) PutWriter(writer io.WriteCloser)
- func (pool *GzipPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)
- func (pool *GzipPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
- type LZ4Pool
- func (pool *LZ4Pool) Encoding() backend.Encoding
- func (pool *LZ4Pool) GetReader(src io.Reader) (io.Reader, error)
- func (pool *LZ4Pool) GetWriter(dst io.Writer) (io.WriteCloser, error)
- func (pool *LZ4Pool) PutReader(reader io.Reader)
- func (pool *LZ4Pool) PutWriter(writer io.WriteCloser)
- func (pool *LZ4Pool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)
- func (pool *LZ4Pool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
- type NoopPool
- func (pool *NoopPool) Encoding() backend.Encoding
- func (pool *NoopPool) GetReader(src io.Reader) (io.Reader, error)
- func (pool *NoopPool) GetWriter(dst io.Writer) (io.WriteCloser, error)
- func (pool *NoopPool) PutReader(reader io.Reader)
- func (pool *NoopPool) PutWriter(writer io.WriteCloser)
- func (pool *NoopPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)
- func (pool *NoopPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
- type PagedFinder
- type ReaderPool
- type S2Pool
- func (pool *S2Pool) Encoding() backend.Encoding
- func (pool *S2Pool) GetReader(src io.Reader) (io.Reader, error)
- func (pool *S2Pool) GetWriter(dst io.Writer) (io.WriteCloser, error)
- func (pool *S2Pool) PutReader(reader io.Reader)
- func (pool *S2Pool) PutWriter(writer io.WriteCloser)
- func (pool *S2Pool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)
- func (pool *S2Pool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
- type SnappyPool
- func (pool *SnappyPool) Encoding() backend.Encoding
- func (pool *SnappyPool) GetReader(src io.Reader) (io.Reader, error)
- func (pool *SnappyPool) GetWriter(dst io.Writer) (io.WriteCloser, error)
- func (pool *SnappyPool) PutReader(reader io.Reader)
- func (pool *SnappyPool) PutWriter(writer io.WriteCloser)
- func (pool *SnappyPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)
- func (pool *SnappyPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
- type StreamingBlock
- func (c *StreamingBlock) AddObject(id common.ID, object []byte) error
- func (c *StreamingBlock) BlockMeta() *backend.BlockMeta
- func (c *StreamingBlock) Complete(ctx context.Context, tracker backend.AppendTracker, w backend.Writer) (int, error)
- func (c *StreamingBlock) CurrentBufferLength() int
- func (c *StreamingBlock) CurrentBufferedObjects() int
- func (c *StreamingBlock) FlushBuffer(ctx context.Context, tracker backend.AppendTracker, w backend.Writer) (backend.AppendTracker, int, error)
- func (c *StreamingBlock) Length() int
- type WriterPool
- type ZstdPool
- func (pool *ZstdPool) Encoding() backend.Encoding
- func (pool *ZstdPool) GetReader(src io.Reader) (io.Reader, error)
- func (pool *ZstdPool) GetWriter(dst io.Writer) (io.WriteCloser, error)
- func (pool *ZstdPool) PutReader(reader io.Reader)
- func (pool *ZstdPool) PutWriter(writer io.WriteCloser)
- func (pool *ZstdPool) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error)
- func (pool *ZstdPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
Constants ¶
const DataHeaderLength = 0
DataHeaderLength is the length in bytes for the data header
const DefaultFlushSizeBytes int = 30 * 1024 * 1024 // 30 MiB
const IndexHeaderLength = int(uint64Size) // 64bit checksum (xxhash)
IndexHeaderLength is the length in bytes for the record header
const VersionString = "v2"
Variables ¶
var ( // Gzip is the gnu zip compression pool Gzip = GzipPool{/* contains filtered or unexported fields */} // Lz4_64k is the l4z compression pool, with 64k buffer size Lz4_64k = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_256k uses 256k buffer Lz4_256k = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_1M uses 1M buffer Lz4_1M = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_4M uses 4M buffer Lz4_4M = LZ4Pool{/* contains filtered or unexported fields */} // Snappy is the snappy compression pool Snappy SnappyPool // Noop is the no compression pool Noop NoopPool // Zstd Pool Zstd = ZstdPool{} // S2 Pool S2 = S2Pool{} // BytesBufferPool is a bytes buffer used for lines decompressed. // Buckets [0.5KB,1KB,2KB,4KB,8KB] BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) )
Functions ¶
func CopyBlock ¶ added in v1.4.0
func CopyBlock(ctx context.Context, meta *backend.BlockMeta, src backend.Reader, dest backend.Writer) error
CopyBlock copies a block from one backend to another. It is done at a low level, all encoding/formatting is preserved.
func CreateBlock ¶ added in v1.5.0
func NewDataReader ¶
func NewDataReader(r backend.ContextReader, encoding backend.Encoding) (common.DataReader, error)
NewDataReader constructs a v2 DataReader that handles paged...reading
func NewDataWriter ¶
NewDataWriter creates a paged page writer
func NewDedupingIterator ¶ added in v1.4.0
func NewDedupingIterator(iter common.Iterator, combiner model.ObjectCombiner, dataEncoding string) (common.Iterator, error)
NewDedupingIterator returns a dedupingIterator. This iterator is used to wrap another
iterator. It will dedupe consecutive objects with the same id using the ObjectCombiner.
func NewIndexReader ¶
func NewIndexReader(r backend.ContextReader, pageSizeBytes int, totalRecords int) (common.IndexReader, error)
NewIndexReader returns an index reader for a byte slice of marshalled ordered records. The index has not changed between v0 and v1.
func NewIndexWriter ¶
func NewIndexWriter(pageSizeBytes int) common.IndexWriter
NewIndexWriter returns an index writer that writes to the provided io.Writer. The index has not changed between v0 and v1.
func NewIterator ¶ added in v1.4.0
NewIterator returns the most basic iterator. It iterates over raw objects.
func NewMultiblockIterator ¶ added in v1.4.0
func NewMultiblockIterator(ctx context.Context, inputs []common.Iterator, bufferSize int, combiner model.ObjectCombiner, dataEncoding string, logger log.Logger) common.Iterator
NewMultiblockIterator Creates a new multiblock iterator. Iterates concurrently in a separate goroutine and results are buffered. Traces are deduped and combined using the object combiner.
func NewObjectReaderWriter ¶
func NewObjectReaderWriter() common.ObjectReaderWriter
func NewPrefetchIterator ¶ added in v1.4.0
NewPrefetchIterator Creates a new multiblock iterator. Iterates concurrently in a separate goroutine and results are buffered.
func NewRecordIterator ¶ added in v1.4.0
func NewRecordIterator(r []common.Record, dataR common.DataReader, objectRW common.ObjectReaderWriter) common.Iterator
NewRecordIterator returns a recordIterator. This iterator is used for iterating through
a series of objects by reading them one at a time from Records.
func NewRecordReaderWriter ¶
func NewRecordReaderWriter() common.RecordReaderWriter
Types ¶
type Appender ¶ added in v1.4.0
type Appender interface { Append(common.ID, []byte) error Complete() error Records() []common.Record RecordsForID(common.ID) []common.Record Length() int DataLength() uint64 }
Appender is capable of tracking objects and ids that are added to it
func NewAppender ¶ added in v1.4.0
func NewAppender(dataWriter common.DataWriter) Appender
NewAppender returns an appender. This appender simply appends new objects
to the provided dataWriter.
func NewBufferedAppender ¶ added in v1.4.0
func NewBufferedAppender(writer common.DataWriter, indexDownsample int, totalObjectsEstimate int) (Appender, error)
NewBufferedAppender returns an bufferedAppender. This appender builds a writes to
the provided writer and also builds a downsampled records slice.
func NewRecordAppender ¶ added in v1.4.0
NewRecordAppender returns an appender that stores records only.
type BackendBlock ¶ added in v1.4.0
type BackendBlock struct {
// contains filtered or unexported fields
}
BackendBlock represents a block already in the backend.
func NewBackendBlock ¶ added in v1.4.0
NewBackendBlock returns a BackendBlock for the given backend.BlockMeta
func (*BackendBlock) BlockMeta ¶ added in v1.4.0
func (b *BackendBlock) BlockMeta() *backend.BlockMeta
func (*BackendBlock) FindTraceByID ¶ added in v1.4.0
func (*BackendBlock) Iterator ¶ added in v1.4.0
func (b *BackendBlock) Iterator(chunkSizeBytes uint32) (common.Iterator, error)
Iterator returns an Iterator that iterates over the objects in the block from the backend
func (*BackendBlock) NewIndexReader ¶ added in v1.4.0
func (b *BackendBlock) NewIndexReader() (common.IndexReader, error)
func (*BackendBlock) Search ¶ added in v1.4.0
func (b *BackendBlock) Search(ctx context.Context, req *tempopb.SearchRequest, opt common.SearchOptions) (resp *tempopb.SearchResponse, err error)
type BufferedAppenderGeneric ¶ added in v1.4.0
type BufferedAppenderGeneric struct {
// contains filtered or unexported fields
}
bufferedAppender buffers objects into pages and builds a downsampled index
func NewBufferedAppenderGeneric ¶ added in v1.4.0
func NewBufferedAppenderGeneric(writer common.DataWriterGeneric, maxPageSize int) *BufferedAppenderGeneric
NewBufferedAppender returns an bufferedAppender. This appender builds a writes to
the provided writer and also builds a downsampled records slice.
func (*BufferedAppenderGeneric) Append ¶ added in v1.4.0
Append appends the id/object to the writer. Note that the caller is giving up ownership of the two byte arrays backing the slices.
Copies should be made and passed in if this is a problem
func (*BufferedAppenderGeneric) Complete ¶ added in v1.4.0
func (a *BufferedAppenderGeneric) Complete(ctx context.Context) error
Complete flushes all buffers and releases resources
func (*BufferedAppenderGeneric) Records ¶ added in v1.4.0
func (a *BufferedAppenderGeneric) Records() []common.Record
Records returns a slice of the current records
type Compactor ¶ added in v1.4.0
type Compactor struct {
// contains filtered or unexported fields
}
func NewCompactor ¶ added in v1.4.0
func NewCompactor(opts common.CompactionOptions) *Compactor
type Encoding ¶ added in v1.4.0
type Encoding struct{}
v2Encoding
func (Encoding) CreateBlock ¶ added in v1.5.0
func (Encoding) NewCompactor ¶ added in v1.4.0
func (v Encoding) NewCompactor(opts common.CompactionOptions) common.Compactor
type GzipPool ¶ added in v1.2.0
type GzipPool struct {
// contains filtered or unexported fields
}
GzipPool is a gun zip compression pool
func (*GzipPool) GetReader ¶ added in v1.2.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*GzipPool) GetWriter ¶ added in v1.2.0
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*GzipPool) PutWriter ¶ added in v1.2.0
func (pool *GzipPool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
func (*GzipPool) ResetReader ¶ added in v1.2.0
ResetReader implements ReaderPool
func (*GzipPool) ResetWriter ¶ added in v1.2.0
func (pool *GzipPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
ResetWriter implements WriterPool
type LZ4Pool ¶ added in v1.2.0
type LZ4Pool struct {
// contains filtered or unexported fields
}
LZ4Pool is an pool...of lz4s...
func (*LZ4Pool) GetReader ¶ added in v1.2.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*LZ4Pool) GetWriter ¶ added in v1.2.0
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*LZ4Pool) PutWriter ¶ added in v1.2.0
func (pool *LZ4Pool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
func (*LZ4Pool) ResetReader ¶ added in v1.2.0
ResetReader implements ReaderPool
func (*LZ4Pool) ResetWriter ¶ added in v1.2.0
func (pool *LZ4Pool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
ResetWriter implements WriterPool
type NoopPool ¶ added in v1.2.0
type NoopPool struct{}
NoopPool is for people who think compression is for the weak
func (*NoopPool) GetReader ¶ added in v1.2.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*NoopPool) GetWriter ¶ added in v1.2.0
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*NoopPool) PutWriter ¶ added in v1.2.0
func (pool *NoopPool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
func (*NoopPool) ResetReader ¶ added in v1.2.0
ResetReader implements ReaderPool
func (*NoopPool) ResetWriter ¶ added in v1.2.0
func (pool *NoopPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
ResetWriter implements WriterPool
type PagedFinder ¶ added in v1.4.0
type PagedFinder struct {
// contains filtered or unexported fields
}
func NewPagedFinder ¶ added in v1.4.0
func NewPagedFinder(index common.IndexReader, r common.DataReader, combiner model.ObjectCombiner, objectRW common.ObjectReaderWriter, dataEncoding string) *PagedFinder
NewPagedFinder returns a paged. This finder is used for searching
a set of records and returning an object. If a set of consecutive records has matching ids they will be combined using the ObjectCombiner.
type ReaderPool ¶ added in v1.2.0
type ReaderPool interface { GetReader(io.Reader) (io.Reader, error) PutReader(io.Reader) ResetReader(src io.Reader, resetReader io.Reader) (io.Reader, error) Encoding() backend.Encoding }
ReaderPool similar to WriterPool but for reading chunks.
type S2Pool ¶ added in v1.2.0
type S2Pool struct {
// contains filtered or unexported fields
}
S2Pool is one s short of s3
func (*S2Pool) GetReader ¶ added in v1.2.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*S2Pool) GetWriter ¶ added in v1.2.0
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*S2Pool) PutWriter ¶ added in v1.2.0
func (pool *S2Pool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
func (*S2Pool) ResetReader ¶ added in v1.2.0
ResetReader implements ReaderPool
func (*S2Pool) ResetWriter ¶ added in v1.2.0
func (pool *S2Pool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
ResetWriter implements WriterPool
type SnappyPool ¶ added in v1.2.0
type SnappyPool struct {
// contains filtered or unexported fields
}
SnappyPool is a really cool looking pool. Dang that pool is _snappy_.
func (*SnappyPool) Encoding ¶ added in v1.2.0
func (pool *SnappyPool) Encoding() backend.Encoding
Encoding implements WriterPool and ReaderPool
func (*SnappyPool) GetReader ¶ added in v1.2.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*SnappyPool) GetWriter ¶ added in v1.2.0
func (pool *SnappyPool) GetWriter(dst io.Writer) (io.WriteCloser, error)
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*SnappyPool) PutReader ¶ added in v1.2.0
func (pool *SnappyPool) PutReader(reader io.Reader)
PutReader places back in the pool a CompressionReader
func (*SnappyPool) PutWriter ¶ added in v1.2.0
func (pool *SnappyPool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
func (*SnappyPool) ResetReader ¶ added in v1.2.0
ResetReader implements ReaderPool
func (*SnappyPool) ResetWriter ¶ added in v1.2.0
func (pool *SnappyPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
ResetWriter implements WriterPool
type StreamingBlock ¶ added in v1.4.0
type StreamingBlock struct {
// contains filtered or unexported fields
}
func NewStreamingBlock ¶ added in v1.4.0
func NewStreamingBlock(cfg *common.BlockConfig, id uuid.UUID, tenantID string, metas []*backend.BlockMeta, estimatedObjects int) (*StreamingBlock, error)
NewStreamingBlock creates a ... new streaming block. Objects are appended one at a time to the backend.
func (*StreamingBlock) AddObject ¶ added in v1.4.0
func (c *StreamingBlock) AddObject(id common.ID, object []byte) error
func (*StreamingBlock) BlockMeta ¶ added in v1.4.0
func (c *StreamingBlock) BlockMeta() *backend.BlockMeta
func (*StreamingBlock) Complete ¶ added in v1.4.0
func (c *StreamingBlock) Complete(ctx context.Context, tracker backend.AppendTracker, w backend.Writer) (int, error)
Complete finishes writes the compactor metadata and closes all buffers and appenders
func (*StreamingBlock) CurrentBufferLength ¶ added in v1.4.0
func (c *StreamingBlock) CurrentBufferLength() int
func (*StreamingBlock) CurrentBufferedObjects ¶ added in v1.4.0
func (c *StreamingBlock) CurrentBufferedObjects() int
func (*StreamingBlock) FlushBuffer ¶ added in v1.4.0
func (c *StreamingBlock) FlushBuffer(ctx context.Context, tracker backend.AppendTracker, w backend.Writer) (backend.AppendTracker, int, error)
FlushBuffer flushes any existing objects to the backend
func (*StreamingBlock) Length ¶ added in v1.4.0
func (c *StreamingBlock) Length() int
type WriterPool ¶ added in v1.2.0
type WriterPool interface { GetWriter(io.Writer) (io.WriteCloser, error) PutWriter(io.WriteCloser) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error) Encoding() backend.Encoding }
WriterPool is a pool of io.Writer This is used by every chunk to avoid unnecessary allocations.
func GetWriterPool ¶ added in v1.2.0
func GetWriterPool(enc backend.Encoding) (WriterPool, error)
type ZstdPool ¶ added in v1.2.0
type ZstdPool struct { }
ZstdPool is a zstd compression pool
func (*ZstdPool) GetReader ¶ added in v1.2.0
GetReader gets or creates a new CompressionReader and reset it to read from src
func (*ZstdPool) GetWriter ¶ added in v1.2.0
GetWriter gets or creates a new CompressionWriter and reset it to write to dst
func (*ZstdPool) PutWriter ¶ added in v1.2.0
func (pool *ZstdPool) PutWriter(writer io.WriteCloser)
PutWriter places back in the pool a CompressionWriter
func (*ZstdPool) ResetReader ¶ added in v1.2.0
ResetReader implements ReaderPool
func (*ZstdPool) ResetWriter ¶ added in v1.2.0
func (pool *ZstdPool) ResetWriter(dst io.Writer, resetWriter io.WriteCloser) (io.WriteCloser, error)
ResetWriter implements WriterPool
Source Files ¶
- appender.go
- appender_buffered.go
- appender_buffered_generic.go
- appender_record.go
- backend_block.go
- block.go
- compactor.go
- create_block.go
- data_reader.go
- data_writer.go
- encoding.go
- finder_paged.go
- index_reader.go
- index_writer.go
- iterator.go
- iterator_deduping.go
- iterator_multiblock.go
- iterator_paged.go
- iterator_prefetch.go
- iterator_record.go
- object.go
- page.go
- page_header.go
- pool.go
- record.go
- streaming_block.go