Documentation ¶
Index ¶
- func GetProcessorConstructor(fun string) (func(val float64, ts uint32) Processor, error)
- func InitMetrics()
- type Aggregator
- type AggregatorReporter
- type Avg
- type CacheEntry
- type Count
- type Delta
- type Derive
- type Last
- type Max
- type Min
- type Percentiles
- type Processor
- func NewAvg(val float64, ts uint32) Processor
- func NewCount(val float64, ts uint32) Processor
- func NewDelta(val float64, ts uint32) Processor
- func NewDerive(val float64, ts uint32) Processor
- func NewLast(val float64, ts uint32) Processor
- func NewMax(val float64, ts uint32) Processor
- func NewMin(val float64, ts uint32) Processor
- func NewPercentiles(val float64, ts uint32) Processor
- func NewStdev(val float64, ts uint32) Processor
- func NewSum(val float64, ts uint32) Processor
- type RangeTracker
- type Stdev
- type Sum
- type TsSlice
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func GetProcessorConstructor ¶
func InitMetrics ¶
func InitMetrics()
Types ¶
type Aggregator ¶
type Aggregator struct { Fun string `json:"fun"` Matcher matcher.Matcher OutFmt string Cache bool Interval uint // expected interval between values in seconds, we will quantize to make sure alginment to interval-spaced timestamps Wait uint // seconds to wait after quantized time value before flushing final outcome and ignoring future values that are sent too late. DropRaw bool // drop raw values "consumed" by this aggregator Key string // contains filtered or unexported fields }
func New ¶
func New(fun string, matcher matcher.Matcher, outFmt string, cache bool, interval, wait uint, dropRaw bool, out chan []byte) (*Aggregator, error)
New creates an aggregator
func (*Aggregator) AddMaybe ¶
func (a *Aggregator) AddMaybe(buf [][]byte, val float64, ts uint32) bool
func (*Aggregator) AddOrCreate ¶
func (a *Aggregator) AddOrCreate(key string, ts uint32, quantized uint, value float64)
func (*Aggregator) Flush ¶
func (a *Aggregator) Flush(cutoff uint)
Flush finalizes and removes aggregations that are due
func (*Aggregator) Shutdown ¶
func (a *Aggregator) Shutdown()
func (*Aggregator) Snapshot ¶
func (a *Aggregator) Snapshot() *Aggregator
to view the state of the aggregator at any point in time
type AggregatorReporter ¶ added in v0.13.0
AggregatorReporter reports the state of aggregation buckets when they are known. (after flushing) it reports on buckets by their logical timestamps, not wallclock time
func NewAggregatorReporter ¶ added in v0.13.0
func NewAggregatorReporter() (*AggregatorReporter, error)
func (*AggregatorReporter) ReportGraphite ¶ added in v0.13.0
func (r *AggregatorReporter) ReportGraphite(prefix, buf []byte, now time.Time) []byte
type CacheEntry ¶
type CacheEntry struct {
// contains filtered or unexported fields
}
type Count ¶
type Count struct {
// contains filtered or unexported fields
}
Count aggregates to the number of values seen
type Delta ¶
type Delta struct {
// contains filtered or unexported fields
}
Delta aggregates to the difference between highest and lowest value seen
type Derive ¶
type Derive struct {
// contains filtered or unexported fields
}
Derive aggregates to the derivative of the largest timeframe we get
type Last ¶
type Last struct {
// contains filtered or unexported fields
}
Last aggregates to the last value seen
type Max ¶
type Max struct {
// contains filtered or unexported fields
}
Max aggregates to the highest value seen
type Min ¶
type Min struct {
// contains filtered or unexported fields
}
Min aggregates to the lowest value seen
type Percentiles ¶
type Percentiles struct {
// contains filtered or unexported fields
}
Percentiles aggregates to different percentiles
func (*Percentiles) Add ¶
func (p *Percentiles) Add(val float64, ts uint32)
func (*Percentiles) Flush ¶
func (p *Percentiles) Flush() ([]processorResult, bool)
Using the latest recommendation from NIST See https://www.itl.nist.gov/div898/handbook/prc/section2/prc262.htm The method implemented corresponds to method R6 of Hyndman and Fan. See https://en.wikipedia.org/wiki/Percentile, Third variant
type Processor ¶
type Processor interface { // Add adds a point to aggregate Add(val float64, ts uint32) // Flush returns the aggregated value(s) and true if it is valid // the only reason why it would be non-valid is for aggregators that need // more than 1 value but they didn't have enough to produce a useful result. Flush() ([]processorResult, bool) }
func NewPercentiles ¶
type RangeTracker ¶
func NewRangeTracker ¶
func NewRangeTracker() *RangeTracker
func (*RangeTracker) Run ¶
func (m *RangeTracker) Run()
func (*RangeTracker) Sample ¶
func (m *RangeTracker) Sample(ts uint32)
type Stdev ¶
type Stdev struct {
// contains filtered or unexported fields
}
Stdev aggregates to standard deviation