Documentation ¶
Overview ¶
This package is a Prometheus implementation for metrics. In its creation, the main consideration was the ease of use.
Provides the following metric types:
- Counter,
- Gauge,
- Histogram.
It can also provide built-in statistics (optionally) about the system and the application:
- Goroutines,
- Memory usage,
- CPU usage.
Pprof HTTP server can be enabled to get runtime profiling data on Host:Port/debug/pprof/ endpoint.
Index ¶
- func DecimalPlaces(n float64) int
- func GenerateBuckets(start, width float64, count int) []float64
- func GetFreePort() (port int)
- func Grep(find, inText string) (result string)
- func ParseOutput(text string) map[string]*dto.MetricFamily
- func RoundFloat(value float64, decimalPlaces int) float64
- type CounterArgs
- type GaugeArgs
- type HistogramArgs
- type Init
- type Labels
- type MeasureExecTime
- type MeasureExecTimeArgs
- type Object
- func (o *Object) Counter(args CounterArgs) (err error)
- func (o *Object) Gauge(args GaugeArgs) (err error)
- func (o *Object) GetMetrics(text string) string
- func (o *Object) Histogram(args HistogramArgs) (err error)
- func (o *Object) StartHttpServer()
- func (o *Object) StartMeasureExecTime(m MeasureExecTimeArgs) *MeasureExecTime
- func (o *Object) StopHttpServer()
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func DecimalPlaces ¶ added in v1.0.2
DecimalPlaces returns the number of decimal places of a float64 number.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { dp := prometheus.DecimalPlaces(1.559633154856) fmt.Println(dp) }
Output: 12
func GenerateBuckets ¶ added in v1.1.1
GenerateBuckets creates a float64 slice to measure request durations.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { buckets := prometheus.GenerateBuckets(1, 1.5, 8) fmt.Println(buckets) buckets = prometheus.GenerateBuckets(2, 4, 10) fmt.Println(buckets) }
Output: [1 2.5 4 5.5 7 8.5 10 11.5] [2 6 10 14 18 22 26 30 34 38]
func GetFreePort ¶
func GetFreePort() (port int)
GetFreePort asks the kernel for a free open port that is ready to use.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { port := prometheus.GetFreePort() fmt.Println(port) // Output example: // 45689 }
Output:
func Grep ¶
Grep processes text line by line, and gives back any lines which match a specified word.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { text := ` Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. ` fmt.Println(prometheus.Grep("dolore", text)) }
Output: sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
func ParseOutput ¶
func ParseOutput(text string) map[string]*dto.MetricFamily
ParseOutput reads 'text' as the simple and flat text-based exchange format and creates MetricFamily proto messages.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleParseOutput", }) err := p.Counter(prometheus.CounterArgs{ MetricName: "parse_output_example", Labels: prometheus.Labels{"parsed": "true"}, Value: 1, }) fmt.Println(err) metric := p.GetMetrics("parse_output_example") parsed := prometheus.ParseOutput(metric) fmt.Println(metric) fmt.Println(parsed) }
Output: <nil> # HELP parse_output_example Counter created for parse_output_example # TYPE parse_output_example counter parse_output_example{app="ExampleParseOutput",env="test",parsed="true"} 1 map[parse_output_example:name:"parse_output_example" help:"Counter created for parse_output_example" type:COUNTER metric:<label:<name:"app" value:"ExampleParseOutput" > label:<name:"env" value:"test" > label:<name:"parsed" value:"true" > > ]
func RoundFloat ¶ added in v1.0.2
RoundFloat truncate the decimal places of a float64 number by a given precision.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { float := prometheus.RoundFloat(1.559633154856, 2) fmt.Println(float) }
Output: 1.56
Types ¶
type CounterArgs ¶ added in v1.1.0
CounterArgs contains the necessary arguments of the *Object.Counter function.
type GaugeArgs ¶ added in v1.1.0
GaugeArgs contains the necessary arguments of the *Object.Gauge function.
type HistogramArgs ¶ added in v1.1.0
HistogramArgs contains the necessary arguments of the *Object.Histogram function.
type Init ¶
type Init struct { Host string Port int Environment string AppName string MetricEndpoint string StatCountGoroutines bool StatMemoryUsage bool StatCpuUsage bool EnablePprof bool }
Init is used for prometheus Object initialization.
type Labels ¶
type Labels prometheus.Labels
Label used by metric types: Counter, Histogram, Gauge
func GetLabels ¶
GetLabels gives back the labels for a metric.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleGetLabels", }) metric := "promhttp_metric_handler_requests_total" err := p.Counter(prometheus.CounterArgs{ MetricName: metric, Labels: prometheus.Labels{"code": "200"}, Value: 1, }) fmt.Println(err) output := p.GetMetrics(p.App) fmt.Println(prometheus.GetLabels(output, metric)) }
Output: <nil> map[app:ExampleGetLabels code:200 env:test]
type MeasureExecTime ¶ added in v1.0.3
type MeasureExecTime struct { MetricName string Labels Labels Buckets []float64 TimeDuration time.Duration // contains filtered or unexported fields }
MeasureExecTimeArgs is used by Start and StopMeasureExecTime functions.
func (*MeasureExecTime) StopMeasureExecTime ¶ added in v1.0.3
func (m *MeasureExecTime) StopMeasureExecTime() error
StopMeasureExecTime ends the execution time measurement.
Example ¶
package main import ( "fmt" "log" "time" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "StopMeasureExecTime", }) ms := p.StartMeasureExecTime(prometheus.MeasureExecTimeArgs{ MetricName: "execution_time_milli_sec", Labels: prometheus.Labels{"function": "calculate"}, Buckets: prometheus.GenerateBuckets(5, 5, 10), TimeDuration: time.Millisecond, }) time.Sleep(10 * time.Millisecond) err := ms.StopMeasureExecTime() if err != nil { log.Fatal(err) } fmt.Println(p.GetMetrics("execution_time_milli_sec")) // Output example: // # HELP execution_time_milli_sec Histogram created for execution_time_milli_sec // # TYPE execution_time_milli_sec histogram // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="5"} 0 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="10"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="15"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="20"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="25"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="30"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="35"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="40"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="45"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="50"} 1 // execution_time_milli_sec_bucket{app="StopMeasureExecTime",env="test",function="calculate",le="+Inf"} 1 // execution_time_milli_sec_sum{app="StopMeasureExecTime",env="test",function="calculate"} 10 // execution_time_milli_sec_count{app="StopMeasureExecTime",env="test",function="calculate"} 1 }
Output:
type MeasureExecTimeArgs ¶ added in v1.1.0
type MeasureExecTimeArgs MeasureExecTime
MeasureExecTimeArgs contains the necessary arguments of the *Object.StartMeasureExecTime function.
type Object ¶
type Object struct { Addr string Env string App string MetricsEndpoint string StatCountGoroutines bool StatMemoryUsage bool StatCpuUsage bool EnablePprof bool // contains filtered or unexported fields }
Object provides structure to use metric types.
func New ¶
New creates a new Object structure.
Example ¶
package main import ( "encoding/json" "fmt" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ // Obligatory fields Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleService", // Optional fields MetricEndpoint: "/metrics", // default: /metrics StatCountGoroutines: true, // default: false StatMemoryUsage: true, // default: false StatCpuUsage: true, // default: false EnablePprof: true, // default: false, endpoint: /debug/pprof/ }) b, _ := json.MarshalIndent(p, "", " ") fmt.Println(string(b)) // Output example: // { // "Addr": "0.0.0.0:40045", // "Env": "test", // "App": "ExampleService", // "MetricsEndpoint": "/metrics", // "StatCountGoroutines": true, // "StatMemoryUsage": true, // "StatCpuUsage": true, // "EnablePprof": true // } }
Output:
func (*Object) Counter ¶
func (o *Object) Counter(args CounterArgs) (err error)
Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase or be reset to zero on restart. For example, you can use a counter to represent the number of requests served, tasks completed, or errors.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleCounter", }) err := p.Counter(prometheus.CounterArgs{ MetricName: "response_status", Labels: prometheus.Labels{"handler": "MyHandler1", "statuscode": "200"}, Value: 1, }) fmt.Println() fmt.Println(p.GetMetrics("response_status")) fmt.Println("Error:", err) }
Output: # HELP response_status Counter created for response_status # TYPE response_status counter response_status{app="ExampleCounter",env="test",handler="MyHandler1",statuscode="200"} 1 Error: <nil>
func (*Object) Gauge ¶
Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.
Gauges are typically used for measured values like temperatures or current memory usage, but also "counts" that can go up and down, like the number of concurrent requests.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleGauge", }) err := p.Gauge(prometheus.GaugeArgs{ MetricName: "cpu_usage_example", Labels: prometheus.Labels{"core": "0"}, Value: 15, }) fmt.Println() fmt.Println(p.GetMetrics("cpu_usage_example")) fmt.Println("Error:", err) }
Output: # HELP cpu_usage_example Gauge created for cpu_usage_example # TYPE cpu_usage_example gauge cpu_usage_example{app="ExampleGauge",core="0",env="test"} 15 Error: <nil>
func (*Object) GetMetrics ¶
GetMetrics queries all metrics data which matches to 'text' string.
Example ¶
package main import ( "fmt" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleGetMetrics", }) fmt.Println(p.GetMetrics("go_")) // Output example: // # HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. // # TYPE go_gc_duration_seconds summary // go_gc_duration_seconds{quantile="0"} 1.5162e-05 // go_gc_duration_seconds{quantile="0.25"} 1.9539e-05 // go_gc_duration_seconds{quantile="0.5"} 3.6708e-05 // go_gc_duration_seconds{quantile="0.75"} 9.2103e-05 // go_gc_duration_seconds{quantile="1"} 0.00023626 // go_gc_duration_seconds_sum 0.000506999 // go_gc_duration_seconds_count 7 // # HELP go_goroutines Number of goroutines that currently exist. // # TYPE go_goroutines gauge // go_goroutines 24 }
Output:
func (*Object) Histogram ¶
func (o *Object) Histogram(args HistogramArgs) (err error)
Histogram samples observations (usually things like request durations or response sizes) and counts them in configurable buckets. It also provides a sum of all observed values.
A histogram with a base metric name of <basename> exposes multiple time series during a scrape:
- cumulative counters for the observation buckets, exposed as <basename>_bucket{le="<upper inclusive bound>"}
- the total sum of all observed values, exposed as <basename>_sum
- the count of events that have been observed, exposed as <basename>_count (identical to <basename>_bucket{le="+Inf"} above)
Use the histogram_quantile() function to calculate quantiles from histograms or even aggregations of histograms.
A histogram is also suitable to calculate an Apdex score. When operating on buckets, remember that the histogram is cumulative.
Example ¶
package main import ( "log" "time" "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleHistogram", }) start := time.Now() // Elapsed time to measure the computation time // of a given function, handler, etc... defer func(begin time.Time) { err := p.Histogram(prometheus.HistogramArgs{ MetricName: "get_stat", Labels: prometheus.Labels{"handler": "purchases"}, Buckets: prometheus.GenerateBuckets(0.5, 0.05, 5), Value: time.Since(begin).Seconds(), }) if err != nil { log.Fatal(err) } }(start) time.Sleep(100 * time.Millisecond) // Output example: // # HELP get_stat Histogram created for get_stat // # TYPE get_stat histogram // get_stat_bucket{app="ExampleHistogram",env="test",handler="purchases",le="0.05"} 0 // get_stat_bucket{app="ExampleHistogram",env="test",handler="purchases",le="0.1"} 0 // get_stat_bucket{app="ExampleHistogram",env="test",handler="purchases",le="0.15"} 1 // get_stat_bucket{app="ExampleHistogram",env="test",handler="purchases",le="0.2"} 1 // get_stat_bucket{app="ExampleHistogram",env="test",handler="purchases",le="0.25"} 1 // get_stat_bucket{app="ExampleHistogram",env="test",handler="purchases",le="+Inf"} 1 // get_stat_sum{app="ExampleHistogram",env="test",handler="purchases"} 0.100233303 // get_stat_count{app="ExampleHistogram",env="test",handler="purchases"} 1 }
Output:
func (*Object) StartHttpServer ¶
func (o *Object) StartHttpServer()
StartHttpServer starts providing metrics data on given host and port on the endpoint which set by (*Object).MetricsEndpoint.
Example ¶
package main import ( "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleStartHttpServer", }) p.StartHttpServer() }
Output:
func (*Object) StartMeasureExecTime ¶ added in v1.0.3
func (o *Object) StartMeasureExecTime(m MeasureExecTimeArgs) *MeasureExecTime
StartMeasureExecTime launches the measurement of the runtime of a particular calculation.
Use the TimeDuration field to set the unit of the elapsed time measurement: Minute, Second, Millisecond, Microsecond, Nanosecond.
Example ¶
package main import ( "fmt" "log" "time" "github.com/takattila/prometheus" ) func main() { functionName := "calculate" p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "StartMeasureExecTime", }) // Nanoseconds start ns := p.StartMeasureExecTime(prometheus.MeasureExecTimeArgs{ MetricName: "execution_time_nano_sec", Labels: prometheus.Labels{"function": functionName}, Buckets: prometheus.GenerateBuckets(5000, 10000, 10), TimeDuration: time.Nanosecond, }) time.Sleep(5000 * time.Nanosecond) err := ns.StopMeasureExecTime() if err != nil { log.Fatal(err) } fmt.Println(p.GetMetrics("execution_time_nano_sec")) // Nanoseconds end // Microseconds start µs := p.StartMeasureExecTime(prometheus.MeasureExecTimeArgs{ MetricName: "execution_time_micro_sec", Labels: prometheus.Labels{"function": functionName}, Buckets: prometheus.GenerateBuckets(50, 50, 10), TimeDuration: time.Microsecond, }) time.Sleep(100 * time.Microsecond) err = µs.StopMeasureExecTime() if err != nil { log.Fatal(err) } fmt.Println(p.GetMetrics("execution_time_micro_sec")) // Microseconds end // Milliseconds start ms := p.StartMeasureExecTime(prometheus.MeasureExecTimeArgs{ MetricName: "execution_time_milli_sec", Labels: prometheus.Labels{"function": functionName}, Buckets: prometheus.GenerateBuckets(5, 5, 10), TimeDuration: time.Millisecond, }) time.Sleep(10 * time.Millisecond) err = ms.StopMeasureExecTime() if err != nil { log.Fatal(err) } fmt.Println(p.GetMetrics("execution_time_milli_sec")) // Milliseconds end // Seconds start s := p.StartMeasureExecTime(prometheus.MeasureExecTimeArgs{ MetricName: "execution_time_seconds", Labels: prometheus.Labels{"function": functionName}, Buckets: prometheus.GenerateBuckets(0.5, 0.5, 10), TimeDuration: time.Second, }) time.Sleep(1 * time.Second) err = s.StopMeasureExecTime() if err != nil { log.Fatal(err) } fmt.Println(p.GetMetrics("execution_time_seconds")) // Seconds end // Minutes start m := p.StartMeasureExecTime(prometheus.MeasureExecTimeArgs{ MetricName: "execution_time_minutes", Labels: prometheus.Labels{"function": functionName}, Buckets: prometheus.GenerateBuckets(0.005, 0.005, 10), TimeDuration: time.Minute, }) time.Sleep(1 * time.Second) err = m.StopMeasureExecTime() if err != nil { log.Fatal(err) } fmt.Println(p.GetMetrics("execution_time_minutes")) // Minutes end // Output example: // # HELP execution_time_nano_sec Histogram created for execution_time_nano_sec // # TYPE execution_time_nano_sec histogram // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="5000"} 0 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="15000"} 0 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="25000"} 0 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="35000"} 0 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="45000"} 1 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="55000"} 1 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="65000"} 1 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="75000"} 1 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="85000"} 1 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="95000"} 1 // execution_time_nano_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="+Inf"} 1 // execution_time_nano_sec_sum{app="StartMeasureExecTime",env="test",function="calculate"} 43280 // execution_time_nano_sec_count{app="StartMeasureExecTime",env="test",function="calculate"} 1 // // # HELP execution_time_micro_sec Histogram created for execution_time_micro_sec // # TYPE execution_time_micro_sec histogram // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="50"} 0 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="100"} 0 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="150"} 0 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="200"} 0 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="250"} 1 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="300"} 1 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="350"} 1 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="400"} 1 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="450"} 1 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="500"} 1 // execution_time_micro_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="+Inf"} 1 // execution_time_micro_sec_sum{app="StartMeasureExecTime",env="test",function="calculate"} 236 // execution_time_micro_sec_count{app="StartMeasureExecTime",env="test",function="calculate"} 1 // // # HELP execution_time_milli_sec Histogram created for execution_time_milli_sec // # TYPE execution_time_milli_sec histogram // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="5"} 0 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="10"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="15"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="20"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="25"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="30"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="35"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="40"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="45"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="50"} 1 // execution_time_milli_sec_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="+Inf"} 1 // execution_time_milli_sec_sum{app="StartMeasureExecTime",env="test",function="calculate"} 10 // execution_time_milli_sec_count{app="StartMeasureExecTime",env="test",function="calculate"} 1 // // # HELP execution_time_seconds Histogram created for execution_time_seconds // # TYPE execution_time_seconds histogram // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.5"} 0 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="1"} 0 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="1.5"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="2"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="2.5"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="3"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="3.5"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="4"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="4.5"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="5"} 1 // execution_time_seconds_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="+Inf"} 1 // execution_time_seconds_sum{app="StartMeasureExecTime",env="test",function="calculate"} 1.000324369 // execution_time_seconds_count{app="StartMeasureExecTime",env="test",function="calculate"} 1 // // # HELP execution_time_minutes Histogram created for execution_time_minutes // # TYPE execution_time_minutes histogram // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.005"} 0 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.01"} 0 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.015"} 0 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.02"} 1 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.025"} 1 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.03"} 1 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.035"} 1 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.04"} 1 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.045"} 1 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="0.05"} 1 // execution_time_minutes_bucket{app="StartMeasureExecTime",env="test",function="calculate",le="+Inf"} 1 // execution_time_minutes_sum{app="StartMeasureExecTime",env="test",function="calculate"} 0.016671208216666667 // execution_time_minutes_count{app="StartMeasureExecTime",env="test",function="calculate"} 1 }
Output:
func (*Object) StopHttpServer ¶
func (o *Object) StopHttpServer()
StopHttpServer ends providing metrics data.
Example ¶
package main import ( "github.com/takattila/prometheus" ) func main() { p := prometheus.New(prometheus.Init{ Host: "0.0.0.0", Port: prometheus.GetFreePort(), Environment: "test", AppName: "ExampleStopHttpServer", }) p.StopHttpServer() }
Output: