v1

package
v1.6.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 30, 2024 License: AGPL-3.0 Imports: 11 Imported by: 0

Documentation

Index

Constants

View Source
const (
	SeriesIndexColumnName         = "SeriesIndex"
	TimeNanosColumnName           = "TimeNanos"
	StacktracePartitionColumnName = "StacktracePartition"
	TotalValueColumnName          = "TotalValue"
	SamplesColumnName             = "Samples"
)

Variables

View Source
var (
	ProfilesSchema = parquet.NewSchema("Profile", phlareparquet.Group{
		phlareparquet.NewGroupField("ID", parquet.UUID()),
		phlareparquet.NewGroupField(SeriesIndexColumnName, parquet.Encoded(parquet.Uint(32), &parquet.DeltaBinaryPacked)),
		phlareparquet.NewGroupField(StacktracePartitionColumnName, parquet.Encoded(parquet.Uint(64), &parquet.DeltaBinaryPacked)),
		phlareparquet.NewGroupField(TotalValueColumnName, parquet.Encoded(parquet.Uint(64), &parquet.DeltaBinaryPacked)),
		phlareparquet.NewGroupField(SamplesColumnName, parquet.List(sampleField)),
		phlareparquet.NewGroupField("DropFrames", parquet.Optional(stringRef)),
		phlareparquet.NewGroupField("KeepFrames", parquet.Optional(stringRef)),
		phlareparquet.NewGroupField(TimeNanosColumnName, parquet.Timestamp(parquet.Nanosecond)),
		phlareparquet.NewGroupField("DurationNanos", parquet.Optional(parquet.Int(64))),
		phlareparquet.NewGroupField("Period", parquet.Optional(parquet.Int(64))),
		phlareparquet.NewGroupField("Comments", parquet.List(stringRef)),
		phlareparquet.NewGroupField("DefaultSampleType", parquet.Optional(parquet.Int(64))),
	})
	DownsampledProfilesSchema = parquet.NewSchema("DownsampledProfile", phlareparquet.Group{
		phlareparquet.NewGroupField(SeriesIndexColumnName, parquet.Encoded(parquet.Uint(32), &parquet.DeltaBinaryPacked)),
		phlareparquet.NewGroupField(StacktracePartitionColumnName, parquet.Encoded(parquet.Uint(64), &parquet.DeltaBinaryPacked)),
		phlareparquet.NewGroupField(TotalValueColumnName, parquet.Encoded(parquet.Uint(64), &parquet.DeltaBinaryPacked)),
		phlareparquet.NewGroupField(SamplesColumnName, parquet.List(
			phlareparquet.Group{
				phlareparquet.NewGroupField("StacktraceID", parquet.Encoded(parquet.Uint(64), &parquet.DeltaBinaryPacked)),
				phlareparquet.NewGroupField("Value", parquet.Encoded(parquet.Int(64), &parquet.DeltaBinaryPacked)),
			})),
		phlareparquet.NewGroupField(TimeNanosColumnName, parquet.Timestamp(parquet.Nanosecond)),
	})

	SampleValueColumnPath = strings.Split("Samples.list.element.Value", ".")

	ErrColumnNotFound = fmt.Errorf("column path not found")
)

Functions

func NewMergeProfilesRowReader

func NewMergeProfilesRowReader(rowGroups []parquet.RowReader) parquet.RowReader

func ResolveColumnByPath added in v1.2.0

func ResolveColumnByPath(schema *parquet.Schema, path []string) (parquet.LeafColumn, error)

Types

type DownsampledProfileRow added in v1.3.0

type DownsampledProfileRow parquet.Row

func (DownsampledProfileRow) ForValues added in v1.3.0

func (p DownsampledProfileRow) ForValues(fn func([]parquet.Value))

type FunctionPersister

type FunctionPersister struct{}

func (*FunctionPersister) Deconstruct

func (*FunctionPersister) Deconstruct(row parquet.Row, _ uint64, fn *InMemoryFunction) parquet.Row

func (*FunctionPersister) Name

func (*FunctionPersister) Name() string

func (*FunctionPersister) Reconstruct

func (*FunctionPersister) Reconstruct(row parquet.Row) (uint64, *InMemoryFunction, error)

func (*FunctionPersister) Schema

func (*FunctionPersister) Schema() *parquet.Schema

func (*FunctionPersister) SortingColumns

func (*FunctionPersister) SortingColumns() parquet.SortingOption

type InMemoryFunction

type InMemoryFunction struct {
	// Unique nonzero id for the function.
	Id uint64
	// Name of the function, in human-readable form if available.
	Name uint32
	// Name of the function, as identified by the system.
	// For instance, it can be a C++ mangled name.
	SystemName uint32
	// Source file containing the function.
	Filename uint32
	// Line number in source file.
	StartLine uint32
}

func (*InMemoryFunction) Clone added in v1.1.0

func (f *InMemoryFunction) Clone() *InMemoryFunction

type InMemoryLine

type InMemoryLine struct {
	// The id of the corresponding profile.Function for this line.
	FunctionId uint32
	// Line number in source code.
	Line int32
}

type InMemoryLocation

type InMemoryLocation struct {
	// Unique nonzero id for the location.  A profile could use
	// instruction addresses or any integer sequence as ids.
	Id uint64
	// The instruction address for this location, if available.  It
	// should be within [Mapping.memory_start...Mapping.memory_limit]
	// for the corresponding mapping. A non-leaf address may be in the
	// middle of a call instruction. It is up to display tools to find
	// the beginning of the instruction if necessary.
	Address uint64
	// The id of the corresponding profile.Mapping for this location.
	// It can be unset if the mapping is unknown or not applicable for
	// this profile type.
	MappingId uint32
	// Provides an indication that multiple symbols map to this location's
	// address, for example due to identical code folding by the linker. In that
	// case the line information above represents one of the multiple
	// symbols. This field must be recomputed when the symbolization state of the
	// profile changes.
	IsFolded bool
	// Multiple line indicates this location has inlined functions,
	// where the last entry represents the caller into which the
	// preceding entries were inlined.
	//
	// E.g., if memcpy() is inlined into printf:
	//
	//	line[0].function_name == "memcpy"
	//	line[1].function_name == "printf"
	Line []InMemoryLine
}

func (*InMemoryLocation) Clone added in v1.1.0

func (l *InMemoryLocation) Clone() *InMemoryLocation

type InMemoryMapping

type InMemoryMapping struct {
	// Unique nonzero id for the mapping.
	Id uint64
	// Address at which the binary (or DLL) is loaded into memory.
	MemoryStart uint64
	// The limit of the address range occupied by this mapping.
	MemoryLimit uint64
	// Offset in the binary that corresponds to the first mapped address.
	FileOffset uint64
	// The object this entry is loaded from.  This can be a filename on
	// disk for the main binary and shared libraries, or virtual
	// abstractions like "[vdso]".
	Filename uint32
	// A string that uniquely identifies a particular program version
	// with high probability. E.g., for binaries generated by GNU tools,
	// it could be the contents of the .note.gnu.build-id field.
	BuildId uint32
	// The following fields indicate the resolution of symbolic info.
	HasFunctions    bool
	HasFilenames    bool
	HasLineNumbers  bool
	HasInlineFrames bool
}

func (*InMemoryMapping) Clone added in v1.1.0

func (m *InMemoryMapping) Clone() *InMemoryMapping

type InMemoryProfile

type InMemoryProfile struct {
	// A unique UUID per ingested profile
	ID uuid.UUID

	// SeriesIndex references the underlying series and is generated when
	// writing the TSDB index. The SeriesIndex is different from block to
	// block.
	SeriesIndex uint32

	// StacktracePartition is the partition ID of the stacktrace table that this profile belongs to.
	StacktracePartition uint64

	// TotalValue is the sum of all values in the profile.
	TotalValue uint64

	// SeriesFingerprint references the underlying series and is purely based
	// on the label values. The value is consistent for the same label set (so
	// also between different blocks).
	SeriesFingerprint model.Fingerprint

	// frames with Function.function_name fully matching the following
	// regexp will be dropped from the samples, along with their successors.
	DropFrames int64
	// frames with Function.function_name fully matching the following
	// regexp will be kept, even if it matches drop_frames.
	KeepFrames int64
	// Time of collection (UTC) represented as nanoseconds past the epoch.
	TimeNanos int64
	// Duration of the profile, if a duration makes sense.
	DurationNanos int64
	// The number of events between sampled occurrences.
	Period int64
	// Freeform text associated to the profile.
	Comments []int64
	// Index into the string table of the type of the preferred sample
	// value. If unset, clients should default to the last sample value.
	DefaultSampleType int64

	Samples Samples
}

func (InMemoryProfile) Size

func (p InMemoryProfile) Size() uint64

func (InMemoryProfile) Timestamp

func (p InMemoryProfile) Timestamp() model.Time

func (InMemoryProfile) Total

func (p InMemoryProfile) Total() int64

type LocationPersister

type LocationPersister struct{}

func (*LocationPersister) Deconstruct

func (*LocationPersister) Deconstruct(row parquet.Row, _ uint64, loc *InMemoryLocation) parquet.Row

func (*LocationPersister) Name

func (*LocationPersister) Name() string

func (*LocationPersister) Reconstruct

func (*LocationPersister) Reconstruct(row parquet.Row) (uint64, *InMemoryLocation, error)

func (*LocationPersister) Schema

func (*LocationPersister) Schema() *parquet.Schema

func (*LocationPersister) SortingColumns

func (*LocationPersister) SortingColumns() parquet.SortingOption

type MappingPersister

type MappingPersister struct{}

func (*MappingPersister) Deconstruct

func (*MappingPersister) Deconstruct(row parquet.Row, _ uint64, m *InMemoryMapping) parquet.Row

func (*MappingPersister) Name

func (*MappingPersister) Name() string

func (*MappingPersister) Reconstruct

func (*MappingPersister) Reconstruct(row parquet.Row) (uint64, *InMemoryMapping, error)

func (*MappingPersister) Schema

func (*MappingPersister) Schema() *parquet.Schema

func (*MappingPersister) SortingColumns

func (*MappingPersister) SortingColumns() parquet.SortingOption

type Persister

type Persister[T any] interface {
	PersisterName
	Schema() *parquet.Schema
	Deconstruct(parquet.Row, uint64, T) parquet.Row
	Reconstruct(parquet.Row) (uint64, T, error)
	SortingColumns() parquet.SortingOption
}

type PersisterName

type PersisterName interface {
	Name() string
}

type Profile

type Profile struct {
	// A unique UUID per ingested profile
	ID uuid.UUID `parquet:",uuid"`

	// SeriesIndex references the underlying series and is generated when
	// writing the TSDB index. The SeriesIndex is different from block to
	// block.
	SeriesIndex uint32 `parquet:",delta"`

	// StacktracePartition is the partition ID of the stacktrace table that this profile belongs to.
	StacktracePartition uint64 `parquet:",delta"`

	// TotalValue is the sum of all values in the profile.
	TotalValue uint64 `parquet:",delta"`

	// SeriesFingerprint references the underlying series and is purely based
	// on the label values. The value is consistent for the same label set (so
	// also between different blocks).
	SeriesFingerprint model.Fingerprint `parquet:"-"`

	// The set of samples recorded in this profile.
	Samples []*Sample `parquet:",list"`

	// frames with Function.function_name fully matching the following
	// regexp will be dropped from the samples, along with their successors.
	DropFrames int64 `parquet:",optional"` // Index into string table.
	// frames with Function.function_name fully matching the following
	// regexp will be kept, even if it matches drop_frames.
	KeepFrames int64 `parquet:",optional"` // Index into string table.
	// Time of collection (UTC) represented as nanoseconds past the epoch.
	TimeNanos int64 `parquet:",delta,timestamp(nanosecond)"`
	// Duration of the profile, if a duration makes sense.
	DurationNanos int64 `parquet:",delta,optional"`
	// The number of events between sampled occurrences.
	Period int64 `parquet:",optional"`
	// Freeform text associated to the profile.
	Comments []int64 `parquet:",list"` // Indices into string table.
	// Index into the string table of the type of the preferred sample
	// value. If unset, clients should default to the last sample value.
	DefaultSampleType int64 `parquet:",optional"`
}

func (Profile) Timestamp

func (p Profile) Timestamp() model.Time

func (Profile) Total

func (p Profile) Total() int64

type ProfilePersister

type ProfilePersister struct{}

func (*ProfilePersister) Deconstruct

func (*ProfilePersister) Deconstruct(row parquet.Row, id uint64, s *Profile) parquet.Row

func (*ProfilePersister) Name

func (*ProfilePersister) Name() string

func (*ProfilePersister) Reconstruct

func (*ProfilePersister) Reconstruct(row parquet.Row) (id uint64, s *Profile, err error)

func (*ProfilePersister) Schema

func (*ProfilePersister) Schema() *parquet.Schema

func (*ProfilePersister) SortingColumns

func (*ProfilePersister) SortingColumns() parquet.SortingOption

type ProfileRow

type ProfileRow parquet.Row

func (ProfileRow) ForStacktraceIDsValues

func (p ProfileRow) ForStacktraceIDsValues(fn func([]parquet.Value))

func (ProfileRow) ForStacktraceIdsAndValues added in v1.3.0

func (p ProfileRow) ForStacktraceIdsAndValues(fn func([]parquet.Value, []parquet.Value))

func (ProfileRow) SeriesIndex

func (p ProfileRow) SeriesIndex() uint32

func (ProfileRow) SetSeriesIndex

func (p ProfileRow) SetSeriesIndex(v uint32)

func (ProfileRow) StacktracePartitionID

func (p ProfileRow) StacktracePartitionID() uint64

func (ProfileRow) TimeNanos

func (p ProfileRow) TimeNanos() int64

type ReadWriter

type ReadWriter[T any, P Persister[T]] struct{}

func (*ReadWriter[T, P]) ReadParquetFile

func (*ReadWriter[T, P]) ReadParquetFile(file io.ReaderAt) ([]T, error)

func (*ReadWriter[T, P]) WriteParquetFile

func (*ReadWriter[T, P]) WriteParquetFile(file io.Writer, elements []T) error

type Sample

type Sample struct {
	StacktraceID uint64             `parquet:",delta"`
	Value        int64              `parquet:",delta"`
	Labels       []*profilev1.Label `parquet:",list"`
	SpanID       uint64             `parquet:",optional"`
}

type SampleColumns added in v1.2.0

type SampleColumns struct {
	StacktraceID parquet.LeafColumn
	Value        parquet.LeafColumn
	SpanID       parquet.LeafColumn
}

func (*SampleColumns) HasSpanID added in v1.2.0

func (c *SampleColumns) HasSpanID() bool

func (*SampleColumns) Resolve added in v1.2.0

func (c *SampleColumns) Resolve(schema *parquet.Schema) error

type SampleMap

type SampleMap map[uint64]map[uint32]int64

SampleMap is a map of partitioned samples structured as follows: partition => stacktrace_id => value

func (SampleMap) AddSamples

func (m SampleMap) AddSamples(partition uint64, samples Samples)

func (SampleMap) Partition

func (m SampleMap) Partition(p uint64) map[uint32]int64

func (SampleMap) WriteSamples

func (m SampleMap) WriteSamples(partition uint64, dst *Samples)

type Samples

type Samples struct {
	StacktraceIDs []uint32
	Values        []uint64
	// Span associated with samples.
	// Optional: Spans == nil, if not present.
	Spans []uint64
}

func NewSamples

func NewSamples(size int) Samples

func NewSamplesFromMap

func NewSamplesFromMap(m map[uint32]int64) Samples

func (Samples) Clone

func (s Samples) Clone() Samples

func (Samples) Compact

func (s Samples) Compact(dedupe bool) Samples

Compact zero samples and optionally duplicates.

func (Samples) Len

func (s Samples) Len() int

func (Samples) Less

func (s Samples) Less(i, j int) bool

func (Samples) Sum

func (s Samples) Sum() uint64

func (Samples) Swap

func (s Samples) Swap(i, j int)

type SamplesBySpanID added in v1.2.0

type SamplesBySpanID Samples

func (SamplesBySpanID) Len added in v1.2.0

func (s SamplesBySpanID) Len() int

func (SamplesBySpanID) Less added in v1.2.0

func (s SamplesBySpanID) Less(i, j int) bool

func (SamplesBySpanID) Swap added in v1.2.0

func (s SamplesBySpanID) Swap(i, j int)

type SliceRowReader

type SliceRowReader[T any] struct {
	// contains filtered or unexported fields
}

func NewInMemoryProfilesRowReader

func NewInMemoryProfilesRowReader(slice []InMemoryProfile) *SliceRowReader[InMemoryProfile]

func NewProfilesRowReader

func NewProfilesRowReader(slice []*Profile) *SliceRowReader[*Profile]

func (*SliceRowReader[T]) ReadRows

func (r *SliceRowReader[T]) ReadRows(rows []parquet.Row) (n int, err error)

type Stacktrace

type Stacktrace struct {
	LocationIDs []uint64 `parquet:",list"`
}

type StacktracePersister

type StacktracePersister struct{}

func (*StacktracePersister) Deconstruct

func (*StacktracePersister) Deconstruct(row parquet.Row, id uint64, s *Stacktrace) parquet.Row

func (*StacktracePersister) Name

func (*StacktracePersister) Name() string

func (*StacktracePersister) Reconstruct

func (*StacktracePersister) Reconstruct(row parquet.Row) (id uint64, s *Stacktrace, err error)

func (*StacktracePersister) Schema

func (*StacktracePersister) Schema() *parquet.Schema

func (*StacktracePersister) SortingColumns

func (*StacktracePersister) SortingColumns() parquet.SortingOption

type StringPersister

type StringPersister struct{}

func (*StringPersister) Deconstruct

func (*StringPersister) Deconstruct(row parquet.Row, id uint64, s string) parquet.Row

func (*StringPersister) Name

func (*StringPersister) Name() string

func (*StringPersister) Reconstruct

func (*StringPersister) Reconstruct(row parquet.Row) (id uint64, s string, err error)

func (*StringPersister) Schema

func (*StringPersister) Schema() *parquet.Schema

func (*StringPersister) SortingColumns

func (*StringPersister) SortingColumns() parquet.SortingOption

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL