Documentation ¶
Index ¶
- Constants
- Variables
- func Compact(ctx context.Context, blocks []*metastorev1.BlockMeta, storage objstore.Bucket, ...) (m []*metastorev1.BlockMeta, err error)
- func NewMergeRowProfileIterator(src []*Dataset) (iter.Iterator[ProfileEntry], error)
- func NewProfileRowIterator(s *Dataset) (iter.Iterator[ProfileEntry], error)
- func ObjectPath(md *metastorev1.BlockMeta) string
- type CompactionOption
- type CompactionPlan
- type Dataset
- func (s *Dataset) Close() error
- func (s *Dataset) CloseWithError(err error) (closeErr error)
- func (s *Dataset) Index() phlaredb.IndexReader
- func (s *Dataset) Meta() *metastorev1.Dataset
- func (s *Dataset) Open(ctx context.Context, sections ...Section) error
- func (s *Dataset) ProfileRowReader() parquet.RowReader
- func (s *Dataset) Profiles() *ParquetFile
- func (s *Dataset) Symbols() symdb.SymbolsReader
- type DatasetOption
- type DedupeProfileRowIterator
- type Object
- type ObjectOption
- type Objects
- type ParquetFile
- type ProfileEntry
- type Section
- type Writer
Constants ¶
const ( DirPathSegment = "segments/" DirPathBlock = "blocks/" DirNameAnonTenant = tenant.DefaultTenantID FileNameProfilesParquet = "profiles.parquet" FileNameDataObject = "block.bin" )
Variables ¶
var ( ErrNoBlocksToMerge = fmt.Errorf("no blocks to merge") ErrShardMergeMismatch = fmt.Errorf("only blocks from the same shard can be merged") )
Functions ¶
func Compact ¶
func Compact( ctx context.Context, blocks []*metastorev1.BlockMeta, storage objstore.Bucket, options ...CompactionOption, ) (m []*metastorev1.BlockMeta, err error)
func NewMergeRowProfileIterator ¶
func NewMergeRowProfileIterator(src []*Dataset) (iter.Iterator[ProfileEntry], error)
func NewProfileRowIterator ¶
func NewProfileRowIterator(s *Dataset) (iter.Iterator[ProfileEntry], error)
func ObjectPath ¶
func ObjectPath(md *metastorev1.BlockMeta) string
Types ¶
type CompactionOption ¶
type CompactionOption func(*compactionConfig)
func WithCompactionDestination ¶
func WithCompactionDestination(storage objstore.Bucket) CompactionOption
func WithCompactionObjectOptions ¶
func WithCompactionObjectOptions(options ...ObjectOption) CompactionOption
func WithCompactionTempDir ¶
func WithCompactionTempDir(tempdir string) CompactionOption
type CompactionPlan ¶
type CompactionPlan struct {
// contains filtered or unexported fields
}
func PlanCompaction ¶
func PlanCompaction(objects Objects) ([]*CompactionPlan, error)
func (*CompactionPlan) Compact ¶
func (b *CompactionPlan) Compact(ctx context.Context, dst objstore.Bucket, tmpdir string) (m *metastorev1.BlockMeta, err error)
func (*CompactionPlan) Estimate ¶
func (b *CompactionPlan) Estimate()
type Dataset ¶
type Dataset struct {
// contains filtered or unexported fields
}
func NewDataset ¶
func NewDataset(meta *metastorev1.Dataset, obj *Object) *Dataset
func (*Dataset) CloseWithError ¶
CloseWithError closes the tenant dataset and disposes all the resources associated with it.
Any further attempts to open the dataset will return the provided error.
func (*Dataset) Index ¶
func (s *Dataset) Index() phlaredb.IndexReader
func (*Dataset) Meta ¶
func (s *Dataset) Meta() *metastorev1.Dataset
func (*Dataset) Open ¶
Open opens the dataset, initializing the sections specified.
Open may be called multiple times concurrently, but the dataset is only initialized once. While it is possible to open the dataset repeatedly after close, the caller must pass the failure reason to the CloseWithError call, preventing further use, if applicable.
func (*Dataset) ProfileRowReader ¶
func (s *Dataset) ProfileRowReader() parquet.RowReader
func (*Dataset) Profiles ¶
func (s *Dataset) Profiles() *ParquetFile
func (*Dataset) Symbols ¶
func (s *Dataset) Symbols() symdb.SymbolsReader
type DatasetOption ¶
type DatasetOption func(*Dataset)
func WithDatasetMaxSizeLoadInMemory ¶
func WithDatasetMaxSizeLoadInMemory(size int) DatasetOption
type DedupeProfileRowIterator ¶
type DedupeProfileRowIterator struct { iter.Iterator[ProfileEntry] // contains filtered or unexported fields }
func (*DedupeProfileRowIterator) Next ¶
func (it *DedupeProfileRowIterator) Next() bool
type Object ¶
type Object struct {
// contains filtered or unexported fields
}
Object represents a block or a segment in the object storage.
func NewObject ¶
func NewObject(storage objstore.Bucket, meta *metastorev1.BlockMeta, opts ...ObjectOption) *Object
func (*Object) CloseWithError ¶
CloseWithError closes the object, releasing all the acquired resources, once the last reference is released. If the provided error is not nil, the object will be marked as failed, preventing any further use.
func (*Object) Meta ¶
func (obj *Object) Meta() *metastorev1.BlockMeta
func (*Object) Open ¶
Open opens the object, loading the data into memory if it's small enough.
Open may be called multiple times concurrently, but the object is only initialized once. While it is possible to open the object repeatedly after close, the caller must pass the failure reason to the "CloseWithError" call, preventing further use, if applicable.
type ObjectOption ¶
type ObjectOption func(*Object)
func WithObjectDownload ¶
func WithObjectDownload(dir string) ObjectOption
func WithObjectMaxSizeLoadInMemory ¶
func WithObjectMaxSizeLoadInMemory(size int) ObjectOption
func WithObjectPath ¶
func WithObjectPath(path string) ObjectOption
type Objects ¶
type Objects []*Object
func ObjectsFromMetas ¶
func ObjectsFromMetas(storage objstore.Bucket, blocks []*metastorev1.BlockMeta, options ...ObjectOption) Objects
ObjectsFromMetas binds block metas to corresponding objects in the storage.
type ParquetFile ¶
type ParquetFile struct { *parquet.File // contains filtered or unexported fields }
func (*ParquetFile) Close ¶
func (f *ParquetFile) Close() error
func (*ParquetFile) RowReader ¶
func (f *ParquetFile) RowReader() *parquet.Reader
type ProfileEntry ¶
type ProfileEntry struct { Dataset *Dataset Timestamp int64 Fingerprint model.Fingerprint Labels phlaremodel.Labels Row schemav1.ProfileRow }
type Writer ¶
type Writer struct {
// contains filtered or unexported fields
}
func (*Writer) ReadFromFile ¶
ReadFromFile located in the directory Dir.