Documentation ¶
Index ¶
- func GetProcessorConstructor(fun string) (func(val float64, ts uint32) Processor, error)
- func InitMetrics()
- type Aggregator
- func (a *Aggregator) AddMaybe(buf [][]byte, val float64, ts uint32) bool
- func (a *Aggregator) AddOrCreate(key string, ts uint32, quantized uint, value float64)
- func (a *Aggregator) Flush(ts uint)
- func (a *Aggregator) PreMatch(buf []byte) bool
- func (a *Aggregator) Shutdown()
- func (a *Aggregator) Snapshot() *Aggregator
- type Avg
- type CacheEntry
- type Delta
- type Derive
- type Last
- type Max
- type Min
- type Percentiles
- type Processor
- func NewAvg(val float64, ts uint32) Processor
- func NewDelta(val float64, ts uint32) Processor
- func NewDerive(val float64, ts uint32) Processor
- func NewLast(val float64, ts uint32) Processor
- func NewMax(val float64, ts uint32) Processor
- func NewMin(val float64, ts uint32) Processor
- func NewPercentiles(val float64, ts uint32) Processor
- func NewStdev(val float64, ts uint32) Processor
- func NewSum(val float64, ts uint32) Processor
- type RangeTracker
- type Stdev
- type Sum
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func GetProcessorConstructor ¶ added in v0.9.3
func InitMetrics ¶ added in v0.11.0
func InitMetrics()
Types ¶
type Aggregator ¶
type Aggregator struct { Fun string `json:"fun"` Regex string `json:"regex,omitempty"` Prefix string `json:"prefix,omitempty"` Sub string `json:"substring,omitempty"` OutFmt string Cache bool Interval uint // expected interval between values in seconds, we will quantize to make sure alginment to interval-spaced timestamps Wait uint // seconds to wait after quantized time value before flushing final outcome and ignoring future values that are sent too late. DropRaw bool // drop raw values "consumed" by this aggregator // contains filtered or unexported fields }
func New ¶
func New(fun, regex, prefix, sub, outFmt string, cache bool, interval, wait uint, dropRaw bool, out chan []byte) (*Aggregator, error)
New creates an aggregator
func (*Aggregator) AddMaybe ¶ added in v0.9.3
func (a *Aggregator) AddMaybe(buf [][]byte, val float64, ts uint32) bool
func (*Aggregator) AddOrCreate ¶
func (a *Aggregator) AddOrCreate(key string, ts uint32, quantized uint, value float64)
func (*Aggregator) Flush ¶
func (a *Aggregator) Flush(ts uint)
Flush finalizes and removes aggregations that are due
func (*Aggregator) PreMatch ¶
func (a *Aggregator) PreMatch(buf []byte) bool
PreMatch checks if the specified metric matches the specified prefix and/or substring If prefix isn't explicitly specified it will be derived from the regex where possible. If this returns false the metric will not be passed through to the main regex matching stage.
func (*Aggregator) Shutdown ¶
func (a *Aggregator) Shutdown()
func (*Aggregator) Snapshot ¶
func (a *Aggregator) Snapshot() *Aggregator
to view the state of the aggregator at any point in time
type CacheEntry ¶ added in v0.9.3
type CacheEntry struct {
// contains filtered or unexported fields
}
type Delta ¶ added in v0.9.0
type Delta struct {
// contains filtered or unexported fields
}
Delta aggregates to the difference between highest and lowest value seen
type Derive ¶ added in v0.9.3
type Derive struct {
// contains filtered or unexported fields
}
Derive aggregates to the derivative of the largest timeframe we get
type Last ¶ added in v0.9.0
type Last struct {
// contains filtered or unexported fields
}
Last aggregates to the last value seen
type Max ¶
type Max struct {
// contains filtered or unexported fields
}
Max aggregates to the highest value seen
type Min ¶
type Min struct {
// contains filtered or unexported fields
}
Min aggregates to the lowest value seen
type Percentiles ¶ added in v0.11.0
type Percentiles struct {
// contains filtered or unexported fields
}
Percentiles aggregates to different percentiles
func (*Percentiles) Add ¶ added in v0.11.0
func (p *Percentiles) Add(val float64, ts uint32)
func (*Percentiles) Flush ¶ added in v0.11.0
func (p *Percentiles) Flush() ([]processorResult, bool)
Using the latest recommendation from NIST See https://www.itl.nist.gov/div898/handbook/prc/section2/prc262.htm The method implemented corresponds to method R6 of Hyndman and Fan. See https://en.wikipedia.org/wiki/Percentile, Third variant
type Processor ¶ added in v0.9.3
type Processor interface { // Add adds a point to aggregate Add(val float64, ts uint32) // Flush returns the aggregated value(s) and true if it is valid // the only reason why it would be non-valid is for aggregators that need // more than 1 value but they didn't have enough to produce a useful result. Flush() ([]processorResult, bool) }
func NewPercentiles ¶ added in v0.11.0
type RangeTracker ¶ added in v0.11.0
func NewRangeTracker ¶ added in v0.11.0
func NewRangeTracker() *RangeTracker
func (*RangeTracker) Run ¶ added in v0.11.0
func (m *RangeTracker) Run()
func (*RangeTracker) Sample ¶ added in v0.11.0
func (m *RangeTracker) Sample(ts uint32)