Documentation ¶
Index ¶
- Constants
- Variables
- func WritePartition(p *PartitionWriter, dst io.Writer) error
- type CallSiteValues
- type ChunkEncoding
- type Config
- type Footer
- type FormatError
- type FormatVersion
- type Helper
- type IndexFile
- type IndexHeader
- type MemoryStats
- type Node
- type Option
- type ParentPointerTree
- type ParquetConfig
- type PartitionHeader
- type PartitionHeaderV2
- type PartitionHeaderV3
- type PartitionHeaders
- func (h *PartitionHeaders) MarshalV2To(dst io.Writer) (_ int64, err error)
- func (h *PartitionHeaders) MarshalV3To(dst io.Writer) (_ int64, err error)
- func (h *PartitionHeaders) Size() int64
- func (h *PartitionHeaders) UnmarshalV1(b []byte) error
- func (h *PartitionHeaders) UnmarshalV2(b []byte) error
- func (h *PartitionHeaders) UnmarshalV3(b []byte) error
- type PartitionReader
- type PartitionStats
- type PartitionWriter
- func (p *PartitionWriter) AppendFunctions(dst []uint32, functions []schemav1.InMemoryFunction)
- func (p *PartitionWriter) AppendLocations(dst []uint32, locations []schemav1.InMemoryLocation)
- func (p *PartitionWriter) AppendMappings(dst []uint32, mappings []schemav1.InMemoryMapping)
- func (p *PartitionWriter) AppendStacktraces(dst []uint32, s []*schemav1.Stacktrace)
- func (p *PartitionWriter) AppendStrings(dst []uint32, strings []string)
- func (p *PartitionWriter) LookupLocations(dst []uint64, stacktraceID uint32) []uint64
- func (p *PartitionWriter) Release()
- func (p *PartitionWriter) ResolveStacktraceLocations(_ context.Context, dst StacktraceInserter, stacktraces []uint32) error
- func (p *PartitionWriter) SplitStacktraceIDRanges(appender *SampleAppender) iter.Iterator[*StacktraceIDRange]
- func (p *PartitionWriter) Symbols() *Symbols
- func (p *PartitionWriter) WriteProfileSymbols(profile *profilev1.Profile) []schemav1.InMemoryProfile
- func (p *PartitionWriter) WriteStats(s *PartitionStats)
- type Reader
- type Resolver
- func (r *Resolver) AddSamples(partition uint64, s schemav1.Samples)
- func (r *Resolver) AddSamplesFromParquetRow(partition uint64, stacktraceIDs, values []parquet.Value)
- func (r *Resolver) AddSamplesWithSpanSelector(partition uint64, s schemav1.Samples, spanSelector model.SpanSelector)
- func (r *Resolver) AddSamplesWithSpanSelectorFromParquetRow(partition uint64, stacktraces, values, spans []parquet.Value, ...)
- func (r *Resolver) CallSiteValues(values *CallSiteValues, partition uint64, samples schemav1.Samples) error
- func (r *Resolver) CallSiteValuesParquet(values *CallSiteValues, partition uint64, stacktraceID, value []parquet.Value) error
- func (r *Resolver) Pprof() (*googlev1.Profile, error)
- func (r *Resolver) Release()
- func (r *Resolver) Tree() (*model.Tree, error)
- type ResolverOption
- type Rewriter
- type RowRangeReference
- type SampleAppender
- type SelectedStackTraces
- type StacktraceBlockHeader
- type StacktraceIDRange
- type StacktraceIDRangeIterator
- type StacktraceInserter
- type StacktraceResolver
- type StacktracesConfig
- type SymDB
- func (s *SymDB) Files() []block.File
- func (s *SymDB) Flush() error
- func (s *SymDB) FormatVersion() FormatVersion
- func (s *SymDB) MemorySize() uint64
- func (s *SymDB) Partition(_ context.Context, partition uint64) (PartitionReader, error)
- func (s *SymDB) PartitionWriter(partition uint64) *PartitionWriter
- func (s *SymDB) WriteMemoryStats(m *MemoryStats)
- func (s *SymDB) WriteProfileSymbols(partition uint64, profile *profilev1.Profile) []schemav1.InMemoryProfile
- type Symbols
- type SymbolsBlockFormat
- type SymbolsBlockHeader
- type SymbolsReader
- type TOC
- type TOCEntry
Constants ¶
const ( DefaultFileName = "symbols.symdb" // Added in v3. DefaultDirName = "symbols" IndexFileName = "index.symdb" StacktracesFileName = "stacktraces.symdb" )
const IndexHeaderSize = int(unsafe.Sizeof(IndexHeader{}))
Variables ¶
var ( ErrInvalidSize = &FormatError{fmt.Errorf("invalid size")} ErrInvalidCRC = &FormatError{fmt.Errorf("invalid CRC")} ErrInvalidMagic = &FormatError{fmt.Errorf("invalid magic number")} ErrUnknownVersion = &FormatError{fmt.Errorf("unknown version")} )
var ErrInvalidStacktraceRange = fmt.Errorf("invalid range: stack traces can't be resolved")
var ErrPartitionNotFound = fmt.Errorf("partition not found")
Functions ¶
func WritePartition ¶ added in v1.8.0
func WritePartition(p *PartitionWriter, dst io.Writer) error
Types ¶
type CallSiteValues ¶ added in v1.4.0
type CallSiteValues struct { // Flat is the sum of sample values directly attributed to the node. Flat uint64 // Total is the total sum of sample values attributed to the node and // its descendants. Total uint64 // LocationFlat is the sum of sample values directly attributed to the // node location, irrespectively of the call chain. LocationFlat uint64 // LocationTotal is the total sum of sample values attributed to the // node location and its descendants, irrespectively of the call chain. LocationTotal uint64 }
CallSiteValues represents statistics associated with a call tree node.
type Config ¶
type Config struct { Dir string Version FormatVersion Stacktraces StacktracesConfig Parquet ParquetConfig }
func DefaultConfig ¶
func DefaultConfig() *Config
func (*Config) WithDirectory ¶
func (*Config) WithParquetConfig ¶
func (c *Config) WithParquetConfig(pc ParquetConfig) *Config
func (*Config) WithVersion ¶ added in v1.7.0
func (c *Config) WithVersion(v FormatVersion) *Config
type Footer ¶ added in v1.7.0
type Footer struct { // contains filtered or unexported fields }
func (*Footer) MarshalBinary ¶ added in v1.7.0
func (*Footer) UnmarshalBinary ¶ added in v1.7.0
type FormatError ¶
type FormatError struct {
// contains filtered or unexported fields
}
func (*FormatError) Error ¶
func (e *FormatError) Error() string
type FormatVersion ¶ added in v1.7.0
type FormatVersion uint32
const ( FormatV1 FormatVersion FormatV2 FormatV3 )
type Helper ¶
type Helper[M schemav1.Models, K comparable] interface { // contains filtered or unexported methods }
type IndexFile ¶
type IndexFile struct { Header IndexHeader TOC TOC // Version-specific. PartitionHeaders PartitionHeaders CRC uint32 // Checksum of the index. }
type IndexHeader ¶ added in v1.7.0
type IndexHeader struct { Magic [4]byte Version FormatVersion // contains filtered or unexported fields }
func (*IndexHeader) MarshalBinary ¶ added in v1.7.0
func (h *IndexHeader) MarshalBinary() []byte
func (*IndexHeader) UnmarshalBinary ¶ added in v1.7.0
func (h *IndexHeader) UnmarshalBinary(b []byte) error
type MemoryStats ¶
type MemoryStats struct { StacktracesSize uint64 LocationsSize uint64 MappingsSize uint64 FunctionsSize uint64 StringsSize uint64 }
func (*MemoryStats) MemorySize ¶
func (m *MemoryStats) MemorySize() uint64
type ParentPointerTree ¶ added in v1.7.0
type ParentPointerTree interface {
Nodes() []Node
}
type ParquetConfig ¶
type ParquetConfig struct {
MaxBufferRowCount int
}
type PartitionHeader ¶
type PartitionHeader struct { Partition uint64 // TODO(kolesnikovae): Switch to SymbolsBlock encoding. Stacktraces []StacktraceBlockHeader V2 *PartitionHeaderV2 V3 *PartitionHeaderV3 }
func (*PartitionHeader) Size ¶
func (h *PartitionHeader) Size() int64
type PartitionHeaderV2 ¶ added in v1.7.0
type PartitionHeaderV2 struct { Locations []RowRangeReference Mappings []RowRangeReference Functions []RowRangeReference Strings []RowRangeReference }
type PartitionHeaderV3 ¶ added in v1.7.0
type PartitionHeaderV3 struct { Locations SymbolsBlockHeader Mappings SymbolsBlockHeader Functions SymbolsBlockHeader Strings SymbolsBlockHeader }
type PartitionHeaders ¶
type PartitionHeaders []*PartitionHeader
func (*PartitionHeaders) MarshalV2To ¶ added in v1.7.0
func (h *PartitionHeaders) MarshalV2To(dst io.Writer) (_ int64, err error)
func (*PartitionHeaders) MarshalV3To ¶ added in v1.7.0
func (h *PartitionHeaders) MarshalV3To(dst io.Writer) (_ int64, err error)
func (*PartitionHeaders) Size ¶
func (h *PartitionHeaders) Size() int64
func (*PartitionHeaders) UnmarshalV1 ¶ added in v1.7.0
func (h *PartitionHeaders) UnmarshalV1(b []byte) error
func (*PartitionHeaders) UnmarshalV2 ¶ added in v1.7.0
func (h *PartitionHeaders) UnmarshalV2(b []byte) error
func (*PartitionHeaders) UnmarshalV3 ¶ added in v1.7.0
func (h *PartitionHeaders) UnmarshalV3(b []byte) error
type PartitionReader ¶
type PartitionReader interface { WriteStats(s *PartitionStats) Symbols() *Symbols Release() }
type PartitionStats ¶
type PartitionWriter ¶
type PartitionWriter struct {
// contains filtered or unexported fields
}
func NewPartitionWriter ¶ added in v1.8.0
func NewPartitionWriter(partition uint64, config *Config) *PartitionWriter
func (*PartitionWriter) AppendFunctions ¶
func (p *PartitionWriter) AppendFunctions(dst []uint32, functions []schemav1.InMemoryFunction)
func (*PartitionWriter) AppendLocations ¶
func (p *PartitionWriter) AppendLocations(dst []uint32, locations []schemav1.InMemoryLocation)
func (*PartitionWriter) AppendMappings ¶
func (p *PartitionWriter) AppendMappings(dst []uint32, mappings []schemav1.InMemoryMapping)
func (*PartitionWriter) AppendStacktraces ¶
func (p *PartitionWriter) AppendStacktraces(dst []uint32, s []*schemav1.Stacktrace)
func (*PartitionWriter) AppendStrings ¶
func (p *PartitionWriter) AppendStrings(dst []uint32, strings []string)
func (*PartitionWriter) LookupLocations ¶ added in v1.2.1
func (p *PartitionWriter) LookupLocations(dst []uint64, stacktraceID uint32) []uint64
func (*PartitionWriter) Release ¶
func (p *PartitionWriter) Release()
func (*PartitionWriter) ResolveStacktraceLocations ¶
func (p *PartitionWriter) ResolveStacktraceLocations(_ context.Context, dst StacktraceInserter, stacktraces []uint32) error
func (*PartitionWriter) SplitStacktraceIDRanges ¶ added in v1.7.0
func (p *PartitionWriter) SplitStacktraceIDRanges(appender *SampleAppender) iter.Iterator[*StacktraceIDRange]
func (*PartitionWriter) Symbols ¶
func (p *PartitionWriter) Symbols() *Symbols
func (*PartitionWriter) WriteProfileSymbols ¶
func (p *PartitionWriter) WriteProfileSymbols(profile *profilev1.Profile) []schemav1.InMemoryProfile
func (*PartitionWriter) WriteStats ¶
func (p *PartitionWriter) WriteStats(s *PartitionStats)
type Reader ¶
type Reader struct {
// contains filtered or unexported fields
}
func OpenObject ¶ added in v1.8.0
type Resolver ¶
type Resolver struct {
// contains filtered or unexported fields
}
Resolver converts stack trace samples to one of the profile formats, such as tree or pprof.
Resolver asynchronously loads symbols for each partition as they are added with AddSamples or Partition calls.
A new Resolver must be created for each profile.
func NewResolver ¶
func NewResolver(ctx context.Context, s SymbolsReader, opts ...ResolverOption) *Resolver
func (*Resolver) AddSamples ¶
AddSamples adds a collection of stack trace samples to the resolver. Samples can be added to partitions concurrently.
func (*Resolver) AddSamplesFromParquetRow ¶ added in v1.3.0
func (*Resolver) AddSamplesWithSpanSelector ¶ added in v1.2.0
func (*Resolver) AddSamplesWithSpanSelectorFromParquetRow ¶ added in v1.7.0
func (r *Resolver) AddSamplesWithSpanSelectorFromParquetRow(partition uint64, stacktraces, values, spans []parquet.Value, spanSelector model.SpanSelector)
func (*Resolver) CallSiteValues ¶ added in v1.4.0
func (*Resolver) CallSiteValuesParquet ¶ added in v1.4.0
func (r *Resolver) CallSiteValuesParquet(values *CallSiteValues, partition uint64, stacktraceID, value []parquet.Value) error
type ResolverOption ¶
type ResolverOption func(*Resolver)
func WithResolverMaxConcurrent ¶ added in v1.3.0
func WithResolverMaxConcurrent(n int) ResolverOption
WithResolverMaxConcurrent specifies how many partitions can be resolved concurrently.
func WithResolverMaxNodes ¶ added in v1.3.0
func WithResolverMaxNodes(n int64) ResolverOption
WithResolverMaxNodes specifies the desired maximum number of nodes the resulting profile should include.
func WithResolverStackTraceSelector ¶ added in v1.3.0
func WithResolverStackTraceSelector(sts *typesv1.StackTraceSelector) ResolverOption
WithResolverStackTraceSelector specifies the stack trace selector. Only stack traces that belong to the callSite (have the prefix provided) will be selected. If empty, the filter is ignored. Subtree root location is the last element.
type Rewriter ¶
type Rewriter struct {
// contains filtered or unexported fields
}
func NewRewriter ¶
func NewRewriter(w *SymDB, r SymbolsReader) *Rewriter
type RowRangeReference ¶
type SampleAppender ¶ added in v1.7.0
type SampleAppender struct { Append func(stacktrace uint32, value uint64) AppendMany func(stacktraces []uint32, values []uint64) // contains filtered or unexported fields }
SampleAppender is a dynamic data structure that accumulates samples, by summing them up by stack trace ID.
It has two underlying implementations:
- map: a hash table is used for small sparse data sets (16k by default). This representation is optimal for small profiles, like span profile, or a short time range profile of a specific service/series.
- chunked sparse set: stack trace IDs serve as indices in a sparse set. Provided that the stack trace IDs are dense (as they point to the node index in the parent pointer tree), this representation is significantly more performant, but may require more space, if the stack trace IDs set is very sparse. In order to reduce memory consumption, the set is split into chunks (16k by default), that are allocated once at least one ID matches the chunk range. In addition, values are ordered by stack trace ID without being sorted explicitly.
func NewSampleAppender ¶ added in v1.7.0
func NewSampleAppender() *SampleAppender
func NewSampleAppenderSize ¶ added in v1.7.0
func NewSampleAppenderSize(maxMapSize, chunkSize uint32) *SampleAppender
func (*SampleAppender) Len ¶ added in v1.7.0
func (s *SampleAppender) Len() int
func (*SampleAppender) Samples ¶ added in v1.7.0
func (s *SampleAppender) Samples() v1.Samples
type SelectedStackTraces ¶ added in v1.4.0
type SelectedStackTraces struct {
// contains filtered or unexported fields
}
func SelectStackTraces ¶ added in v1.4.0
func SelectStackTraces(symbols *Symbols, selector *typesv1.StackTraceSelector) *SelectedStackTraces
func (*SelectedStackTraces) CallSiteValues ¶ added in v1.4.0
func (x *SelectedStackTraces) CallSiteValues(values *CallSiteValues, samples schemav1.Samples)
CallSiteValues writes the call site statistics for the selected stack traces and the given set of samples.
func (*SelectedStackTraces) CallSiteValuesParquet ¶ added in v1.4.0
func (x *SelectedStackTraces) CallSiteValuesParquet(values *CallSiteValues, stacktraceID, value []parquet.Value)
CallSiteValuesParquet is identical to CallSiteValues but accepts raw parquet values instead of samples.
func (*SelectedStackTraces) HasValidCallSite ¶ added in v1.7.0
func (x *SelectedStackTraces) HasValidCallSite() bool
HasValidCallSite reports whether any stack traces match the selector. An empty selector results in a valid empty selection.
type StacktraceBlockHeader ¶ added in v1.7.0
type StacktraceBlockHeader struct { Offset int64 Size int64 Partition uint64 // Used in v1. BlockIndex uint16 // Used in v1. Encoding ChunkEncoding Stacktraces uint32 // Number of unique stack traces in the chunk. StacktraceNodes uint32 // Number of nodes in the stacktrace tree. StacktraceMaxDepth uint32 // Max stack trace depth in the tree. StacktraceMaxNodes uint32 // Max number of nodes at the time of the chunk creation. CRC uint32 // Checksum of the chunk data [Offset:Size). // contains filtered or unexported fields }
type StacktraceIDRange ¶ added in v1.7.0
type StacktraceIDRange struct { // Stack trace identifiers that belong to the range. // Identifiers are relative to the range Offset(). IDs []uint32 // Parent pointer tree, the stack traces refer to. // A stack trace identifier is the index of the node // in the tree. Optional. ParentPointerTree // Samples matching the stack trace range. Optional. schemav1.Samples // contains filtered or unexported fields }
StacktraceIDRange represents a range of stack trace identifiers, sharing the same parent pointer tree.
func SplitStacktraces ¶
func SplitStacktraces(s []uint32, n uint32) []*StacktraceIDRange
SplitStacktraces splits the range of stack trace IDs by limit n into sub-ranges matching to the corresponding chunks and shifts the values accordingly. Note that the input s is modified in place.
stack trace ID 0 is reserved and is not expected at the input. stack trace ID % max_nodes == 0 is not expected as well.
func (*StacktraceIDRange) Offset ¶ added in v1.7.0
func (r *StacktraceIDRange) Offset() uint32
Offset returns the lowest identifier of the range. Identifiers are relative to the range offset.
func (*StacktraceIDRange) SetNodeValues ¶ added in v1.7.0
func (r *StacktraceIDRange) SetNodeValues(dst []Node)
SetNodeValues sets the values of the provided Samples to the matching parent pointer tree nodes.
type StacktraceIDRangeIterator ¶ added in v1.7.0
type StacktraceIDRangeIterator interface {
SplitStacktraceIDRanges(*SampleAppender) iter.Iterator[*StacktraceIDRange]
}
StacktraceIDRangeIterator provides low level access to stack traces, stored in painter point trees.
type StacktraceInserter ¶
StacktraceInserter accepts resolved locations for a given stack trace. The leaf is at locations[0].
Locations slice must not be retained by implementation. It is guaranteed, that for a given stacktrace ID InsertStacktrace is called not more than once.
type StacktraceResolver ¶
type StacktraceResolver interface { // ResolveStacktraceLocations resolves locations for each stack // trace and inserts it to the StacktraceInserter provided. // // The stacktraces must be ordered in the ascending order. // If a stacktrace can't be resolved, dst receives an empty // array of locations. // // Stacktraces slice might be modified during the call. ResolveStacktraceLocations(ctx context.Context, dst StacktraceInserter, stacktraces []uint32) error LookupLocations(dst []uint64, stacktraceID uint32) []uint64 }
type StacktracesConfig ¶
type StacktracesConfig struct {
MaxNodesPerChunk uint32
}
type SymDB ¶
type SymDB struct {
// contains filtered or unexported fields
}
func (*SymDB) FormatVersion ¶ added in v1.7.0
func (s *SymDB) FormatVersion() FormatVersion
func (*SymDB) MemorySize ¶
func (*SymDB) PartitionWriter ¶
func (s *SymDB) PartitionWriter(partition uint64) *PartitionWriter
func (*SymDB) WriteMemoryStats ¶
func (s *SymDB) WriteMemoryStats(m *MemoryStats)
func (*SymDB) WriteProfileSymbols ¶
type Symbols ¶
type Symbols struct { Stacktraces StacktraceResolver Locations []schemav1.InMemoryLocation Mappings []schemav1.InMemoryMapping Functions []schemav1.InMemoryFunction Strings []string }
type SymbolsBlockFormat ¶ added in v1.7.0
type SymbolsBlockFormat uint16
const ( BlockLocationsV1 SymbolsBlockFormat BlockFunctionsV1 BlockMappingsV1 BlockStringsV1 )
type SymbolsBlockHeader ¶ added in v1.7.0
type SymbolsBlockHeader struct { // Offset in the data file. Offset uint64 // Size of the section. Size uint32 // Checksum of the section. CRC uint32 // Length denotes the total number of items encoded. Length uint32 // BlockSize denotes the number of items per block. BlockSize uint32 // BlockHeaderSize denotes the encoder block header size in bytes. // This enables forward compatibility within the same format version: // as long as fields are not removed or reordered, and the encoding // scheme does not change, the format can be extended without updating // the format version. Decoder is able to read the whole header and // skip unknown fields. BlockHeaderSize uint16 // Format of the encoded data. // Change of the format _version_ may break forward compatibility. Format SymbolsBlockFormat }
SymbolsBlockHeader describes a collection of elements encoded in a content-specific way: symbolic information such as locations, functions, mappings, and strings is represented as Array of Structures in memory, and is encoded as Structure of Arrays when written on disk.
type SymbolsReader ¶
type SymbolsReader interface {
Partition(ctx context.Context, partition uint64) (PartitionReader, error)
}
SymbolsReader provides access to a symdb partition.
type TOC ¶
type TOC struct {
Entries []TOCEntry
}
func (*TOC) MarshalBinary ¶
func (*TOC) UnmarshalBinary ¶
Source Files ¶
- block_reader.go
- block_reader_parquet.go
- block_writer.go
- block_writer_v2.go
- block_writer_v3.go
- dedup_slice.go
- format.go
- functions.go
- locations.go
- mappings.go
- partition_memory.go
- resolver.go
- resolver_pprof.go
- resolver_pprof_full.go
- resolver_pprof_go_pgo.go
- resolver_pprof_tree.go
- resolver_tree.go
- rewriter.go
- sample_appender.go
- stacktrace_range.go
- stacktrace_selection.go
- stacktrace_tree.go
- strings.go
- symdb.go