Documentation ¶
Index ¶
- Constants
- Variables
- type ChunkEncoding
- type Config
- type FormatError
- type Header
- type Helper
- type IndexFile
- type MemoryStats
- type ParquetConfig
- type PartitionHeader
- type PartitionHeaders
- type PartitionReader
- type PartitionStats
- type PartitionWriter
- func (p *PartitionWriter) AppendFunctions(dst []uint32, functions []*schemav1.InMemoryFunction)
- func (p *PartitionWriter) AppendLocations(dst []uint32, locations []*schemav1.InMemoryLocation)
- func (p *PartitionWriter) AppendMappings(dst []uint32, mappings []*schemav1.InMemoryMapping)
- func (p *PartitionWriter) AppendStacktraces(dst []uint32, s []*schemav1.Stacktrace)
- func (p *PartitionWriter) AppendStrings(dst []uint32, strings []string)
- func (p *PartitionWriter) Release()
- func (p *PartitionWriter) ResolveChunk(dst StacktraceInserter, sr StacktracesRange) error
- func (p *PartitionWriter) ResolveStacktraceLocations(_ context.Context, dst StacktraceInserter, stacktraces []uint32) error
- func (p *PartitionWriter) Symbols() *Symbols
- func (p *PartitionWriter) WriteProfileSymbols(profile *profilev1.Profile) []schemav1.InMemoryProfile
- func (p *PartitionWriter) WriteStats(s *PartitionStats)
- type Reader
- type Resolver
- type ResolverOption
- type Rewriter
- type RowRangeReference
- type StacktraceChunkHeader
- type StacktraceInserter
- type StacktraceResolver
- type StacktracesConfig
- type StacktracesRange
- type SymDB
- func (s *SymDB) Files() []block.File
- func (s *SymDB) Flush() error
- func (s *SymDB) MemorySize() uint64
- func (s *SymDB) Partition(_ context.Context, partition uint64) (PartitionReader, error)
- func (s *SymDB) PartitionWriter(partition uint64) *PartitionWriter
- func (s *SymDB) WriteMemoryStats(m *MemoryStats)
- func (s *SymDB) WriteProfileSymbols(partition uint64, profile *profilev1.Profile) []schemav1.InMemoryProfile
- type Symbols
- type SymbolsReader
- type TOC
- type TOCEntry
Constants ¶
const ( DefaultDirName = "symbols" IndexFileName = "index.symdb" StacktracesFileName = "stacktraces.symdb" )
const ( FormatV1 FormatV2 )
const HeaderSize = int(unsafe.Sizeof(Header{}))
Variables ¶
var ( ErrInvalidSize = &FormatError{fmt.Errorf("invalid size")} ErrInvalidCRC = &FormatError{fmt.Errorf("invalid CRC")} ErrInvalidMagic = &FormatError{fmt.Errorf("invalid magic number")} ErrUnknownVersion = &FormatError{fmt.Errorf("unknown version")} )
var ErrInvalidStacktraceRange = fmt.Errorf("invalid range: stack traces can't be resolved")
var ErrPartitionNotFound = fmt.Errorf("partition not found")
Functions ¶
This section is empty.
Types ¶
type Config ¶
type Config struct { Dir string Stacktraces StacktracesConfig Parquet ParquetConfig }
func DefaultConfig ¶
func DefaultConfig() *Config
func (*Config) WithDirectory ¶
func (*Config) WithParquetConfig ¶
func (c *Config) WithParquetConfig(pc ParquetConfig) *Config
type FormatError ¶
type FormatError struct {
// contains filtered or unexported fields
}
func (*FormatError) Error ¶
func (e *FormatError) Error() string
type Helper ¶
type Helper[M schemav1.Models, K comparable] interface { // contains filtered or unexported methods }
type IndexFile ¶
type IndexFile struct { Header Header TOC TOC // Version-specific parts. PartitionHeaders PartitionHeaders CRC uint32 }
func ReadIndexFile ¶
type MemoryStats ¶
type MemoryStats struct { StacktracesSize uint64 LocationsSize uint64 MappingsSize uint64 FunctionsSize uint64 StringsSize uint64 }
func (*MemoryStats) MemorySize ¶
func (m *MemoryStats) MemorySize() uint64
type ParquetConfig ¶
type ParquetConfig struct {
MaxBufferRowCount int
}
type PartitionHeader ¶
type PartitionHeader struct { Partition uint64 StacktraceChunks []StacktraceChunkHeader Locations []RowRangeReference Mappings []RowRangeReference Functions []RowRangeReference Strings []RowRangeReference }
func (*PartitionHeader) Size ¶
func (h *PartitionHeader) Size() int64
type PartitionHeaders ¶
type PartitionHeaders []*PartitionHeader
func (*PartitionHeaders) Size ¶
func (h *PartitionHeaders) Size() int64
func (*PartitionHeaders) Unmarshal ¶
func (h *PartitionHeaders) Unmarshal(b []byte) error
type PartitionReader ¶
type PartitionReader interface { WriteStats(s *PartitionStats) Symbols() *Symbols Release() }
type PartitionStats ¶
type PartitionWriter ¶
type PartitionWriter struct {
// contains filtered or unexported fields
}
func (*PartitionWriter) AppendFunctions ¶
func (p *PartitionWriter) AppendFunctions(dst []uint32, functions []*schemav1.InMemoryFunction)
func (*PartitionWriter) AppendLocations ¶
func (p *PartitionWriter) AppendLocations(dst []uint32, locations []*schemav1.InMemoryLocation)
func (*PartitionWriter) AppendMappings ¶
func (p *PartitionWriter) AppendMappings(dst []uint32, mappings []*schemav1.InMemoryMapping)
func (*PartitionWriter) AppendStacktraces ¶
func (p *PartitionWriter) AppendStacktraces(dst []uint32, s []*schemav1.Stacktrace)
func (*PartitionWriter) AppendStrings ¶
func (p *PartitionWriter) AppendStrings(dst []uint32, strings []string)
func (*PartitionWriter) Release ¶
func (p *PartitionWriter) Release()
func (*PartitionWriter) ResolveChunk ¶
func (p *PartitionWriter) ResolveChunk(dst StacktraceInserter, sr StacktracesRange) error
func (*PartitionWriter) ResolveStacktraceLocations ¶
func (p *PartitionWriter) ResolveStacktraceLocations(_ context.Context, dst StacktraceInserter, stacktraces []uint32) error
func (*PartitionWriter) Symbols ¶
func (p *PartitionWriter) Symbols() *Symbols
func (*PartitionWriter) WriteProfileSymbols ¶
func (p *PartitionWriter) WriteProfileSymbols(profile *profilev1.Profile) []schemav1.InMemoryProfile
func (*PartitionWriter) WriteStats ¶
func (p *PartitionWriter) WriteStats(s *PartitionStats)
type Resolver ¶
type Resolver struct {
// contains filtered or unexported fields
}
Resolver converts stack trace samples to one of the profile formats, such as tree or pprof.
Resolver asynchronously loads symbols for each partition as they are added with AddSamples or Partition calls.
A new Resolver must be created for each profile.
func NewResolver ¶
func NewResolver(ctx context.Context, s SymbolsReader) *Resolver
func (*Resolver) AddSamples ¶
AddSamples adds a collection of stack trace samples to the resolver. Samples can be added to different partitions concurrently, but modification of the same partition is not thread-safe.
type ResolverOption ¶
type ResolverOption func(*Resolver)
func WithMaxConcurrent ¶
func WithMaxConcurrent(n int) ResolverOption
WithMaxConcurrent specifies how many partitions can be resolved concurrently.
type Rewriter ¶
type Rewriter struct {
// contains filtered or unexported fields
}
func NewRewriter ¶
func NewRewriter(w *SymDB, r SymbolsReader) *Rewriter
type RowRangeReference ¶
type StacktraceChunkHeader ¶
type StacktraceChunkHeader struct { Offset int64 Size int64 Partition uint64 ChunkIndex uint16 ChunkEncoding ChunkEncoding Stacktraces uint32 // Number of unique stack traces in the chunk. StacktraceNodes uint32 // Number of nodes in the stacktrace tree. StacktraceMaxDepth uint32 // Max stack trace depth in the tree. StacktraceMaxNodes uint32 // Max number of nodes at the time of the chunk creation. CRC uint32 // Checksum of the chunk data [Offset:Size). // contains filtered or unexported fields }
type StacktraceInserter ¶
StacktraceInserter accepts resolved locations for a given stack trace. The leaf is at locations[0].
Locations slice must not be retained by implementation. It is guaranteed, that for a given stacktrace ID InsertStacktrace is called not more than once.
type StacktraceResolver ¶
type StacktraceResolver interface { // ResolveStacktraceLocations resolves locations for each stack // trace and inserts it to the StacktraceInserter provided. // // The stacktraces must be ordered in the ascending order. // If a stacktrace can't be resolved, dst receives an empty // array of locations. // // Stacktraces slice might be modified during the call. ResolveStacktraceLocations(ctx context.Context, dst StacktraceInserter, stacktraces []uint32) error }
type StacktracesConfig ¶
type StacktracesConfig struct {
MaxNodesPerChunk uint32
}
type StacktracesRange ¶
type StacktracesRange struct {
// contains filtered or unexported fields
}
func SplitStacktraces ¶
func SplitStacktraces(s []uint32, n uint32) []StacktracesRange
SplitStacktraces splits the range of stack trace IDs by limit n into sub-ranges matching to the corresponding chunks and shifts the values accordingly. Note that the input s is modified in place.
stack trace ID 0 is reserved and is not expected at the input. stack trace ID % max_nodes == 0 is not expected as well.
type SymDB ¶
type SymDB struct {
// contains filtered or unexported fields
}
func (*SymDB) MemorySize ¶
func (*SymDB) PartitionWriter ¶
func (s *SymDB) PartitionWriter(partition uint64) *PartitionWriter
func (*SymDB) WriteMemoryStats ¶
func (s *SymDB) WriteMemoryStats(m *MemoryStats)
func (*SymDB) WriteProfileSymbols ¶
type Symbols ¶
type Symbols struct { Stacktraces StacktraceResolver Locations []*schemav1.InMemoryLocation Mappings []*schemav1.InMemoryMapping Functions []*schemav1.InMemoryFunction Strings []string }
type SymbolsReader ¶
type SymbolsReader interface {
Partition(ctx context.Context, partition uint64) (PartitionReader, error)
}
SymbolsReader provides access to a symdb partition.
type TOC ¶
type TOC struct {
Entries []TOCEntry
}