Documentation ¶
Index ¶
- Constants
- Variables
- type Client
- func (c *Client) GetAvailableStorage(window time.Duration, aggregator string, tenants ...string) (*StorageMetrics, error)
- func (c *Client) GetHostMemoryStats(startDate time.Time, hostID string) (*MemoryUsageStats, error)
- func (c *Client) GetInstanceMemoryStats(startDate time.Time, instances ...ServiceInstance) ([]MemoryUsageStats, error)
- func (c *Client) GetServiceMemoryStats(startDate time.Time, serviceID string) (*MemoryUsageStats, error)
- type Datapoint
- type Error
- type Float
- type MemoryUsageCache
- type MemoryUsageItem
- type MemoryUsageQuery
- type MemoryUsageStats
- type MetricOptions
- type MetricSeries
- type MetricTimer
- type Metrics
- type PerformanceData
- type PerformanceOptions
- type RateOptions
- type ResultData
- type ServiceInstance
- type StorageMetrics
- type V2Datapoint
- type V2MetricOptions
- type V2PerformanceData
- type V2PerformanceOptions
- type V2RateOptions
- type V2ResultData
- type V2Status
Constants ¶
const ( PoolDataAvailableName = "thinpool-data" PoolMetadataAvailableName = "thinpool-metadata" )
Variables ¶
var ( // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. ErrInvalidEndpoint = errors.New("invalid endpoint") // ErrConnectionRefused is returned when the client cannot connect to the // given endpoint ErrConnectionRefused = errors.New("cannot connect to the metric query service") )
Functions ¶
This section is empty.
Types ¶
type Client ¶
func NewClient ¶
NewClient returns a Client instance ready for communication with the given server endpoint.
func (*Client) GetAvailableStorage ¶
func (*Client) GetHostMemoryStats ¶
func (*Client) GetInstanceMemoryStats ¶
func (c *Client) GetInstanceMemoryStats(startDate time.Time, instances ...ServiceInstance) ([]MemoryUsageStats, error)
func (*Client) GetServiceMemoryStats ¶
type MemoryUsageCache ¶
type MemoryUsageCache struct { sync.Mutex Usages map[string]*MemoryUsageItem TTL time.Duration Clock utils.Clock }
MemoryUsageCache is a simple TTL cache for MemoryUsageStats objects
func NewMemoryUsageCache ¶
func NewMemoryUsageCache(ttl time.Duration) *MemoryUsageCache
func (*MemoryUsageCache) Get ¶
func (c *MemoryUsageCache) Get(key string, getter MemoryUsageQuery) (val []MemoryUsageStats, err error)
Get retrieves a cached value if one exists; otherwise it calls getter, caches the result, and returns it
type MemoryUsageItem ¶
type MemoryUsageItem struct {
// contains filtered or unexported fields
}
func (*MemoryUsageItem) Expired ¶
func (item *MemoryUsageItem) Expired() bool
type MemoryUsageQuery ¶
type MemoryUsageQuery func() ([]MemoryUsageStats, error)
MemoryUsageQuery is a function that will return a value to be cached in the event of a miss
type MemoryUsageStats ¶
type MemoryUsageStats struct { HostID string ServiceID string InstanceID string Last int64 Max int64 Average int64 }
Main container for stats for serviced to consume
type MetricOptions ¶
type MetricOptions struct { Metric string `json:"metric,omitempty"` Name string `json:"name,omitempty"` ID string `json:"id,omitempty"` Aggregator string `json:"aggregator,omitempty"` Interpolator string `json:"interpolator,omitempty"` Rate bool `json:"rate,omitempty"` RateOptions RateOptions `json:"rateOptions,omitempty"` Expression string `json:"expression,omitempty"` Tags map[string][]string `json:"tags,omitempty"` }
MetricOptions are the options for receiving metrics for a set of data.
type MetricSeries ¶
type MetricSeries struct {
// contains filtered or unexported fields
}
func DatapointsToSeries ¶
func DatapointsToSeries(dp []Datapoint) MetricSeries
func (*MetricSeries) X ¶
func (s *MetricSeries) X() []float64
func (*MetricSeries) Y ¶
func (s *MetricSeries) Y() []float64
type Metrics ¶
type Metrics struct { sync.Mutex Enabled bool Registry gometrics.Registry Timers map[string]gometrics.Timer GroupName string }
* To record metrics, enable metrics for a function with ctx.Metrics().Enabled = true * and: defer ctx.Metrics().Stop(ctx.Metrics().Start("FunctionName")) where * you want to time some code through the end of the method. If you want to time * specific parts of a function, you can break it out the Start/Stop into two calls. * Call the Log() method to capture the timing information and clear the data. For * running logs use the go-metrics Log() method, passing in the Metrics.Registy object.
func NewMetrics ¶
func NewMetrics() *Metrics
func (*Metrics) Log ¶
func (m *Metrics) Log()
Log the current timers. Turns off metric logging and clears the metric data. Note that if we want a running tally we can use the go-metric log method directly, providing our registry.
func (*Metrics) LogAndCleanUp ¶
func (m *Metrics) LogAndCleanUp(ssTimer *MetricTimer)
This function is intended to be used in a defer call on methods for which metric logging is desired. To write metrics for a method invocation to the log, add the following at the top of the method:
ctx.Metrics().Enabled = true defer ctx.Metrics().LogAndCleanUp(ctx.Metrics().Start("methodname"))
if Enabled is true, the metrics will be gathered and written at the end of the method. if Enabled is false, this will gather metrics for the method, but only report them if the method is called
by another method with metrics enabled. I.E. it should behave similarly to 'defer <metrics>.Stop(<metrics>.Start("methodname"))'
It is not necessary to reset Metrics().Enabled to false, as the Log() method does so before exiting.
func (*Metrics) Start ¶
func (m *Metrics) Start(name string) *MetricTimer
Returns a new timing object. This will be used as an argument to Stop() to record the duration/count.
func (*Metrics) Stop ¶
func (m *Metrics) Stop(timer *MetricTimer)
When stop is called, calculate the duration.
type PerformanceData ¶
type PerformanceData struct { ClientID string `json:"clientId,omitempty"` Source string `json:"source,omitempty"` StartTime string `json:"startTime,omitempty"` StartTimeActual int64 `json:"startTimeActual"` EndTime string `json:"endTime,omitempty"` EndTimeActual int64 `json:"endTimeActual"` ExactTimeWindow bool `json:"exactTimeWindow,omitempty"` Results []ResultData `json:"results,omitempty"` }
PerformanceData is the resulting object from a performance query.
type PerformanceOptions ¶
type PerformanceOptions struct { Start string `json:"start,omitempty"` End string `json:"end,omitempty"` Returnset string `json:"returnset,omitempty"` Downsample string `json:"downsample,omitempty"` DownsampleMultiplier string `json:"downsampleMultiplier,omitempty"` Tags map[string][]string `json:"tags,omitempty"` Metrics []MetricOptions `json:"metrics,omitempty"` }
PerformanceOptions is the request object for doing a performance query.
type RateOptions ¶
type RateOptions struct { Counter bool `json:"counter,omitempty"` CounterMax int `json:"counterMax,omitempty"` ResetThreshold int `json:"resetThreshold,omitempty"` }
RateOptions are the options for collecting performance data.
type ResultData ¶
type ResultData struct { Datapoints []Datapoint `json:"datapoints,omitempty"` Metric string `json:"metric,omitempty"` Tags map[string][]string `json:"tags,omitempty"` }
ResultData is actual resulting data from the query per metric and tag
type ServiceInstance ¶
type StorageMetrics ¶
type StorageMetrics struct { PoolDataAvailable MetricSeries PoolMetadataAvailable MetricSeries Tenants map[string]MetricSeries }
type V2Datapoint ¶
type V2Datapoint []float64
func (V2Datapoint) Timestamp ¶
func (dp V2Datapoint) Timestamp() float64
func (V2Datapoint) Value ¶
func (dp V2Datapoint) Value() float64
type V2MetricOptions ¶
type V2MetricOptions struct { Metric string `json:"metric,omitempty"` Aggregator string `json:"aggregator,omitempty"` Rate bool `json:"rate,omitempty"` RateOptions V2RateOptions `json:"rateOptions,omitempty"` Expression string `json:"expression,omitempty"` Tags map[string][]string `json:"tags,omitempty"` Downsample string `json:"downsample,omitempty"` }
MetricOptions are the options for receiving metrics for a set of data.
type V2PerformanceData ¶
type V2PerformanceData struct { Series []V2ResultData `json:"series,omitempty"` Statuses []V2Status `json:"statuses,omitempty"` }
PerformanceData is the resulting object from a performance query.
type V2PerformanceOptions ¶
type V2PerformanceOptions struct { Start string `json:"start,omitempty"` End string `json:"end,omitempty"` Returnset string `json:"returnset,omitempty"` Metrics []V2MetricOptions `json:"queries,omitempty"` }
PerformanceOptions is the request object for doing a performance query.
type V2RateOptions ¶
type V2RateOptions struct { Counter bool `json:"counter,omitempty"` CounterMax int `json:"counterMax,omitempty"` ResetThreshold int `json:"resetThreshold,omitempty"` }
RateOptions are the options for collecting performance data.
type V2ResultData ¶
type V2ResultData struct { Datapoints []V2Datapoint `json:"datapoints"` Metric string `json:"metric,omitempty"` Tags map[string]string `json:"tags,omitempty"` }