Documentation ¶
Index ¶
- Constants
- func BloomName(shard int) string
- func ShardKeyForTraceID(traceID []byte, shardCount int) int
- func SortRecords(records []Record)
- func ValidateConfig(b *BlockConfig) error
- func ValidateShardCount(shardCount int) int
- type BackendBlock
- type BlockConfig
- type CacheControl
- type CompactionOptions
- type Compactor
- type DataReader
- type DataWriter
- type DataWriterGeneric
- type Finder
- type ID
- type IndexReader
- type IndexWriter
- type Iterator
- type ObjectReaderWriter
- type Record
- type RecordReaderWriter
- type Records
- type SearchOptions
- type Searcher
- type ShardedBloomFilter
Constants ¶
const ( // NameObjects names the backend data object NameObjects = "data" // NameIndex names the backend index object NameIndex = "index" )
Variables ¶
This section is empty.
Functions ¶
func ShardKeyForTraceID ¶
func SortRecords ¶ added in v1.0.0
func SortRecords(records []Record)
SortRecords sorts a slice of record pointers
func ValidateConfig ¶ added in v1.4.0
func ValidateConfig(b *BlockConfig) error
ValidateConfig returns true if the config is valid
func ValidateShardCount ¶ added in v1.0.0
For backward compatibility
Types ¶
type BackendBlock ¶ added in v1.5.0
type BlockConfig ¶ added in v1.4.0
type BlockConfig struct { IndexDownsampleBytes int `yaml:"index_downsample_bytes"` IndexPageSizeBytes int `yaml:"index_page_size_bytes"` BloomFP float64 `yaml:"bloom_filter_false_positive"` BloomShardSizeBytes int `yaml:"bloom_filter_shard_size_bytes"` Version string `yaml:"version"` Encoding backend.Encoding `yaml:"encoding"` SearchEncoding backend.Encoding `yaml:"search_encoding"` SearchPageSizeBytes int `yaml:"search_page_size_bytes"` // parquet fields RowGroupSizeBytes int `yaml:"row_group_size_bytes"` }
BlockConfig holds configuration options for newly created blocks
type CacheControl ¶ added in v1.5.0
type CompactionOptions ¶ added in v1.4.0
type CompactionOptions struct { ChunkSizeBytes uint32 FlushSizeBytes uint32 IteratorBufferSize int // How many traces to prefetch async. MaxBytesPerTrace int OutputBlocks uint8 BlockConfig BlockConfig Combiner model.ObjectCombiner ObjectsCombined func(compactionLevel, objects int) ObjectsWritten func(compactionLevel, objects int) BytesWritten func(compactionLevel, bytes int) SpansDiscarded func(spans int) }
type DataReader ¶ added in v0.7.0
type DataReader interface { Read(context.Context, []Record, [][]byte, []byte) ([][]byte, []byte, error) Close() // NextPage can be used to iterate at a page at a time. May return ErrUnsupported for older formats // NextPage takes a reusable buffer to read the page into and returns it in case it needs to resize // NextPage returns the uncompressed page buffer ready for object iteration and the length of the // original page from the page header. len(page) might not equal page len! NextPage([]byte) ([]byte, uint32, error) }
DataReader returns a slice of pages in the encoding/v0 format referenced by the slice of *Records passed in. The length of the returned slice is guaranteed to be equal to the length of the provided records unless error is non nil. DataReader is the primary abstraction point for supporting multiple data formats.
type DataWriter ¶ added in v0.7.0
type DataWriter interface { // Write writes the passed ID/byte to the current page Write(ID, []byte) (int, error) // CutPage completes the current page and start a new one. It // returns the length in bytes of the cut page. CutPage() (int, error) // Complete must be called when the operation DataWriter is done. Complete() error }
DataWriter is used to write paged data to the backend
type DataWriterGeneric ¶ added in v1.2.0
type DataWriterGeneric interface { // Write writes the passed ID/obj to the current page Write(context.Context, ID, interface{}) (int, error) // CutPage completes the current page and start a new one. It // returns the length in bytes of the cut page. CutPage(context.Context) (int, error) // Complete must be called when the operation DataWriter is done. Complete(context.Context) error }
DataWriterGeneric writes objects instead of byte slices
type IndexReader ¶
type IndexReader interface { At(ctx context.Context, i int) (*Record, error) Find(ctx context.Context, id ID) (*Record, int, error) }
IndexReader is used to abstract away the details of an index. Currently only used in the paged finder, it could eventually provide a way to support multiple index formats. IndexReader is the primary abstraction point for supporting multiple index formats.
type IndexWriter ¶ added in v0.7.0
type IndexWriter interface { // Write returns a byte representation of the provided Records Write([]Record) ([]byte, error) }
IndexWriter is used to write paged indexes
type ObjectReaderWriter ¶ added in v0.7.0
type ObjectReaderWriter interface { MarshalObjectToWriter(id ID, b []byte, w io.Writer) (int, error) UnmarshalObjectFromReader(r io.Reader) (ID, []byte, error) UnmarshalAndAdvanceBuffer(buffer []byte) ([]byte, ID, []byte, error) }
ObjectReaderWriter represents a library of methods to read and write at the object level
type RecordReaderWriter ¶ added in v0.7.0
type RecordReaderWriter interface { MarshalRecords(records []Record) ([]byte, error) MarshalRecordsToBuffer(records []Record, buffer []byte) error RecordCount(b []byte) int UnmarshalRecord(buff []byte) Record RecordLength() int }
RecordReaderWriter represents a library of methods to read and write records
type Records ¶
type Records []Record
Records is a slice of *Record
type SearchOptions ¶ added in v1.4.0
type SearchOptions struct { ChunkSizeBytes uint32 // Buffer size to read from backend storage. StartPage int // Controls searching only a subset of the block. Which page to begin searching at. TotalPages int // Controls searching only a subset of the block. How many pages to search. MaxBytes int // Max allowable trace size in bytes. Traces exceeding this are not searched. PrefetchTraceCount int // How many traces to prefetch async. ReadBufferCount int ReadBufferSize int CacheControl CacheControl }
type Searcher ¶ added in v1.4.0
type Searcher interface {
Search(ctx context.Context, req *tempopb.SearchRequest, opts SearchOptions) (*tempopb.SearchResponse, error)
}
type ShardedBloomFilter ¶
type ShardedBloomFilter struct {
// contains filtered or unexported fields
}
func NewBloom ¶ added in v1.0.0
func NewBloom(fp float64, shardSize, estimatedObjects uint) *ShardedBloomFilter
NewBloom creates a ShardedBloomFilter
func (*ShardedBloomFilter) Add ¶
func (b *ShardedBloomFilter) Add(traceID []byte)
func (*ShardedBloomFilter) GetShardCount ¶ added in v1.0.0
func (b *ShardedBloomFilter) GetShardCount() int
func (*ShardedBloomFilter) Marshal ¶ added in v1.0.0
func (b *ShardedBloomFilter) Marshal() ([][]byte, error)
Marshal is a wrapper around bloom.WriteTo
func (*ShardedBloomFilter) Test ¶
func (b *ShardedBloomFilter) Test(traceID []byte) bool
Test implements bloom.Test -> required only for testing