symdb

package
v1.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 16, 2023 License: AGPL-3.0 Imports: 40 Imported by: 0

Documentation

Index

Constants

View Source
const (
	DefaultDirName = "symbols"

	IndexFileName       = "index.symdb"
	StacktracesFileName = "stacktraces.symdb"
)
View Source
const (
	FormatV1
	FormatV2
)
View Source
const HeaderSize = int(unsafe.Sizeof(Header{}))

Variables

View Source
var (
	ErrInvalidSize    = &FormatError{fmt.Errorf("invalid size")}
	ErrInvalidCRC     = &FormatError{fmt.Errorf("invalid CRC")}
	ErrInvalidMagic   = &FormatError{fmt.Errorf("invalid magic number")}
	ErrUnknownVersion = &FormatError{fmt.Errorf("unknown version")}
)
View Source
var ErrInvalidStacktraceRange = fmt.Errorf("invalid range: stack traces can't be resolved")
View Source
var ErrPartitionNotFound = fmt.Errorf("partition not found")

Functions

This section is empty.

Types

type ChunkEncoding

type ChunkEncoding byte
const (
	ChunkEncodingGroupVarint ChunkEncoding
)

type Config

type Config struct {
	Dir         string
	Stacktraces StacktracesConfig
	Parquet     ParquetConfig
}

func DefaultConfig

func DefaultConfig() *Config

func (*Config) WithDirectory

func (c *Config) WithDirectory(dir string) *Config

func (*Config) WithParquetConfig

func (c *Config) WithParquetConfig(pc ParquetConfig) *Config

type FormatError

type FormatError struct {
	// contains filtered or unexported fields
}

func (*FormatError) Error

func (e *FormatError) Error() string
type Header struct {
	Magic    [4]byte
	Version  uint32
	Reserved [8]byte // Reserved for future use.
}

func (*Header) MarshalBinary

func (h *Header) MarshalBinary() ([]byte, error)

func (*Header) UnmarshalBinary

func (h *Header) UnmarshalBinary(b []byte) error

type Helper

type Helper[M schemav1.Models, K comparable] interface {
	// contains filtered or unexported methods
}

type IndexFile

type IndexFile struct {
	Header Header
	TOC    TOC

	// Version-specific parts.
	PartitionHeaders PartitionHeaders

	CRC uint32
}

func ReadIndexFile

func ReadIndexFile(b []byte) (f IndexFile, err error)

func (*IndexFile) WriteTo

func (f *IndexFile) WriteTo(dst io.Writer) (n int64, err error)

type MemoryStats

type MemoryStats struct {
	StacktracesSize uint64
	LocationsSize   uint64
	MappingsSize    uint64
	FunctionsSize   uint64
	StringsSize     uint64
}

func (*MemoryStats) MemorySize

func (m *MemoryStats) MemorySize() uint64

type ParquetConfig

type ParquetConfig struct {
	MaxBufferRowCount int
}

type PartitionHeader

type PartitionHeader struct {
	Partition uint64

	StacktraceChunks []StacktraceChunkHeader
	Locations        []RowRangeReference
	Mappings         []RowRangeReference
	Functions        []RowRangeReference
	Strings          []RowRangeReference
}

func (*PartitionHeader) Size

func (h *PartitionHeader) Size() int64

type PartitionHeaders

type PartitionHeaders []*PartitionHeader

func (*PartitionHeaders) Size

func (h *PartitionHeaders) Size() int64

func (*PartitionHeaders) Unmarshal

func (h *PartitionHeaders) Unmarshal(b []byte) error

func (*PartitionHeaders) WriteTo

func (h *PartitionHeaders) WriteTo(dst io.Writer) (_ int64, err error)

type PartitionReader

type PartitionReader interface {
	WriteStats(s *PartitionStats)
	Symbols() *Symbols
	Release()
}

type PartitionStats

type PartitionStats struct {
	StacktracesTotal int
	MaxStacktraceID  int
	LocationsTotal   int
	MappingsTotal    int
	FunctionsTotal   int
	StringsTotal     int
}

type PartitionWriter

type PartitionWriter struct {
	// contains filtered or unexported fields
}

func (*PartitionWriter) AppendFunctions

func (p *PartitionWriter) AppendFunctions(dst []uint32, functions []*schemav1.InMemoryFunction)

func (*PartitionWriter) AppendLocations

func (p *PartitionWriter) AppendLocations(dst []uint32, locations []*schemav1.InMemoryLocation)

func (*PartitionWriter) AppendMappings

func (p *PartitionWriter) AppendMappings(dst []uint32, mappings []*schemav1.InMemoryMapping)

func (*PartitionWriter) AppendStacktraces

func (p *PartitionWriter) AppendStacktraces(dst []uint32, s []*schemav1.Stacktrace)

func (*PartitionWriter) AppendStrings

func (p *PartitionWriter) AppendStrings(dst []uint32, strings []string)

func (*PartitionWriter) Release

func (p *PartitionWriter) Release()

func (*PartitionWriter) ResolveChunk

func (p *PartitionWriter) ResolveChunk(dst StacktraceInserter, sr StacktracesRange) error

func (*PartitionWriter) ResolveStacktraceLocations

func (p *PartitionWriter) ResolveStacktraceLocations(_ context.Context, dst StacktraceInserter, stacktraces []uint32) error

func (*PartitionWriter) Symbols

func (p *PartitionWriter) Symbols() *Symbols

func (*PartitionWriter) WriteProfileSymbols

func (p *PartitionWriter) WriteProfileSymbols(profile *profilev1.Profile) []schemav1.InMemoryProfile

func (*PartitionWriter) WriteStats

func (p *PartitionWriter) WriteStats(s *PartitionStats)

type Reader

type Reader struct {
	// contains filtered or unexported fields
}

func Open

func (*Reader) Close

func (r *Reader) Close() error

func (*Reader) Load

func (r *Reader) Load(ctx context.Context) error

Load loads all the partitions into memory. Partitions are kept in memory during the whole lifetime of the Reader object.

The main user of the function is Rewriter: as far as is not known which partitions will be fetched in advance, but it is known that all of them or majority will be requested, preloading all of them is more efficient yet consumes more memory.

func (*Reader) Partition

func (r *Reader) Partition(ctx context.Context, partition uint64) (PartitionReader, error)

type Resolver

type Resolver struct {
	// contains filtered or unexported fields
}

Resolver converts stack trace samples to one of the profile formats, such as tree or pprof.

Resolver asynchronously loads symbols for each partition as they are added with AddSamples or Partition calls.

A new Resolver must be created for each profile.

func NewResolver

func NewResolver(ctx context.Context, s SymbolsReader) *Resolver

func (*Resolver) AddSamples

func (r *Resolver) AddSamples(partition uint64, s schemav1.Samples)

AddSamples adds a collection of stack trace samples to the resolver. Samples can be added to different partitions concurrently, but modification of the same partition is not thread-safe.

func (*Resolver) AddSamplesWithSpanSelector added in v1.2.0

func (r *Resolver) AddSamplesWithSpanSelector(partition uint64, s schemav1.Samples, spanSelector model.SpanSelector)

func (*Resolver) Partition

func (r *Resolver) Partition(partition uint64) map[uint32]int64

Partition returns map of samples corresponding to the partition. The function initializes symbols of the partition on the first occurrence. The call is thread-safe, but access to the returned map is not.

func (*Resolver) Profile

func (r *Resolver) Profile() (*profile.Profile, error)

func (*Resolver) Release

func (r *Resolver) Release()

func (*Resolver) Tree

func (r *Resolver) Tree() (*model.Tree, error)

type ResolverOption

type ResolverOption func(*Resolver)

func WithMaxConcurrent

func WithMaxConcurrent(n int) ResolverOption

WithMaxConcurrent specifies how many partitions can be resolved concurrently.

type Rewriter

type Rewriter struct {
	// contains filtered or unexported fields
}

func NewRewriter

func NewRewriter(w *SymDB, r SymbolsReader) *Rewriter

func (*Rewriter) Rewrite

func (r *Rewriter) Rewrite(partition uint64, stacktraces []uint32) error

type RowRangeReference

type RowRangeReference struct {
	RowGroup uint32
	Index    uint32
	Rows     uint32
}

type StacktraceChunkHeader

type StacktraceChunkHeader struct {
	Offset int64
	Size   int64

	Partition     uint64
	ChunkIndex    uint16
	ChunkEncoding ChunkEncoding

	Stacktraces        uint32 // Number of unique stack traces in the chunk.
	StacktraceNodes    uint32 // Number of nodes in the stacktrace tree.
	StacktraceMaxDepth uint32 // Max stack trace depth in the tree.
	StacktraceMaxNodes uint32 // Max number of nodes at the time of the chunk creation.

	CRC uint32 // Checksum of the chunk data [Offset:Size).
	// contains filtered or unexported fields
}

type StacktraceInserter

type StacktraceInserter interface {
	InsertStacktrace(stacktraceID uint32, locations []int32)
}

StacktraceInserter accepts resolved locations for a given stack trace. The leaf is at locations[0].

Locations slice must not be retained by implementation. It is guaranteed, that for a given stacktrace ID InsertStacktrace is called not more than once.

type StacktraceResolver

type StacktraceResolver interface {
	// ResolveStacktraceLocations resolves locations for each stack
	// trace and inserts it to the StacktraceInserter provided.
	//
	// The stacktraces must be ordered in the ascending order.
	// If a stacktrace can't be resolved, dst receives an empty
	// array of locations.
	//
	// Stacktraces slice might be modified during the call.
	ResolveStacktraceLocations(ctx context.Context, dst StacktraceInserter, stacktraces []uint32) error
}

type StacktracesConfig

type StacktracesConfig struct {
	MaxNodesPerChunk uint32
}

type StacktracesRange

type StacktracesRange struct {
	// contains filtered or unexported fields
}

func SplitStacktraces

func SplitStacktraces(s []uint32, n uint32) []StacktracesRange

SplitStacktraces splits the range of stack trace IDs by limit n into sub-ranges matching to the corresponding chunks and shifts the values accordingly. Note that the input s is modified in place.

stack trace ID 0 is reserved and is not expected at the input. stack trace ID % max_nodes == 0 is not expected as well.

type SymDB

type SymDB struct {
	// contains filtered or unexported fields
}

func NewSymDB

func NewSymDB(c *Config) *SymDB

func (*SymDB) Files

func (s *SymDB) Files() []block.File

func (*SymDB) Flush

func (s *SymDB) Flush() error

func (*SymDB) Load added in v1.2.0

func (s *SymDB) Load(context.Context) error

func (*SymDB) MemorySize

func (s *SymDB) MemorySize() uint64

func (*SymDB) Partition

func (s *SymDB) Partition(_ context.Context, partition uint64) (PartitionReader, error)

func (*SymDB) PartitionWriter

func (s *SymDB) PartitionWriter(partition uint64) *PartitionWriter

func (*SymDB) WriteMemoryStats

func (s *SymDB) WriteMemoryStats(m *MemoryStats)

func (*SymDB) WriteProfileSymbols

func (s *SymDB) WriteProfileSymbols(partition uint64, profile *profilev1.Profile) []schemav1.InMemoryProfile

type Symbols

type Symbols struct {
	Stacktraces StacktraceResolver
	Locations   []*schemav1.InMemoryLocation
	Mappings    []*schemav1.InMemoryMapping
	Functions   []*schemav1.InMemoryFunction
	Strings     []string
}

func (*Symbols) Profile

func (r *Symbols) Profile(ctx context.Context, samples schemav1.Samples) (*profile.Profile, error)

func (*Symbols) Tree

func (r *Symbols) Tree(ctx context.Context, samples schemav1.Samples) (*model.Tree, error)

type SymbolsReader

type SymbolsReader interface {
	Partition(ctx context.Context, partition uint64) (PartitionReader, error)
	Load(context.Context) error
}

SymbolsReader provides access to a symdb partition.

type TOC

type TOC struct {
	Entries []TOCEntry
}

func (*TOC) MarshalBinary

func (toc *TOC) MarshalBinary() ([]byte, error)

func (*TOC) Size

func (toc *TOC) Size() int

func (*TOC) UnmarshalBinary

func (toc *TOC) UnmarshalBinary(b []byte) error

type TOCEntry

type TOCEntry struct {
	Offset int64
	Size   int64
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL