collectors

package
v0.0.0-...-824ea50 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 9, 2016 License: MIT Imports: 61 Imported by: 0

Documentation

Index

Constants

View Source
const (
	VRRPInstanceTable = ".1.3.6.1.4.1.9586.100.5.2.3.1"
	VRRPAddressTable  = ".1.3.6.1.4.1.9586.100.5.2.6.1"
)
View Source
const (
	DescRmqBackingQueueStatusAvgAckEgressRate  = "Rate at which unacknowledged message records leave RAM, e.g. because acks arrive or unacked messages are paged out"
	DescRmqBackingQueueStatusAvgAckIngressRate = "Rate at which unacknowledged message records enter RAM, e.g. because messages are delivered requiring acknowledgement"
	DescRmqBackingQueueStatusAvgEgressRate     = "Average egress (outbound) rate, not including messages that straight through to auto-acking consumers."
	DescRmqBackingQueueStatusAvgIngressRate    = "Average ingress (inbound) rate, not including messages that straight through to auto-acking consumers."
	DescRmqBackingQueueStatusLen               = "Total backing queue length."
	DescRmqConsumers                           = "Number of consumers."
	DescRmqConsumerUtilisation                 = "" /* 203-byte string literal not displayed */
	DescRmqDiskFreeAlarm                       = "Whether the disk alarm has gone off."
	DescRmqDiskFree                            = "Disk free space in bytes."
	DescRmqDiskFreeLimit                       = "Point at which the disk alarm will go off."
	DescRmqDownSlaveNodes                      = "Count of down nodes having a copy of the queue."
	DescRmqFDTotal                             = "File descriptors available."
	DescRmqFDUsed                              = "Used file descriptors."
	DescRmqIOReadAvgTime                       = "Average wall time (milliseconds) for each disk read operation in the last statistics interval."
	DescRmqIOReadBytes                         = "Total number of bytes read from disk by the persister."
	DescRmqIOReadCount                         = "Total number of read operations by the persister."
	DescRmqIOReopenCount                       = "" /* 248-byte string literal not displayed */
	DescRmqIOSeekAvgTime                       = "Average wall time (milliseconds) for each seek operation in the last statistics interval."
	DescRmqIOSeekCount                         = "Total number of seek operations by the persister."
	DescRmqIOSyncAvgTime                       = "Average wall time (milliseconds) for each sync operation in the last statistics interval."
	DescRmqIOSyncCount                         = "Total number of fsync() operations by the persister."
	DescRmqIOWriteAvgTime                      = "Average wall time (milliseconds) for each write operation in the last statistics interval."
	DescRmqIOWriteBytes                        = "Total number of bytes written to disk by the persister."
	DescRmqIOWriteCount                        = "Total number of write operations by the persister."
	DescRmqMemAlarm                            = ""
	DescRmqMemLimit                            = "Point at which the memory alarm will go off."
	DescRmqMemory                              = "Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures."
	DescRmqMemUsed                             = "Memory used in bytes."
	DescRmqMessageBytesPersistent              = "Like messageBytes but counting only those messages which are persistent."
	DescRmqMessageBytesRAM                     = "Like messageBytes but counting only those messages which are in RAM."
	DescRmqMessageBytesReady                   = "Like messageBytes but counting only those messages ready to be delivered to clients."
	DescRmqMessageBytes                        = "" /* 133-byte string literal not displayed */
	DescRmqMessageBytesUnacknowledged          = "Like messageBytes but counting only those messages delivered to clients but not yet acknowledged."
	DescRmqMessagesPersistent                  = "Total number of persistent messages in the queue (will always be 0 for transient queues)."
	DescRmqMessagesRAM                         = "Total number of messages which are resident in ram."
	DescRmqMessagesReady                       = "Number of messages ready to be delivered to clients."
	DescRmqMessagesReadyRAM                    = "Number of messages from messagesReady which are resident in ram."
	DescRmqMessages                            = "Sum of ready and unacknowledged messages (queue depth)."
	DescRmqMessageStatsAck                     = "Count of acknowledged messages."
	DescRmqMessageStatsConfirm                 = "Count of messages confirmed."
	DescRmqMessageStatsDeliver                 = "Count of messages delivered in acknowledgement mode to consumers."
	DescRmqMessageStatsDeliverGet              = "Sum of deliver, deliverNoack, get, getNoack."
	DescRmqMessageStatsDeliverNoAck            = "Count of messages delivered in no-acknowledgement mode to consumers."
	DescRmqMessageStatsGet                     = "Count of messages delivered in acknowledgement mode in response to basic.get."
	DescRmqMessageStatsGetNoack                = "Count of messages delivered in no-acknowledgement mode in response to basic.get."
	DescRmqMessageStatsPublish                 = "Count of messages published."
	DescRmqMessageStatsPublishIn               = "Count of messages published \"in\" to an exchange, i.e. not taking account of routing."
	DescRmqMessageStatsPublishOut              = "Count of messages published \"out\" of an exchange, i.e. taking account of routing."
	DescRmqMessageStatsRedeliver               = "Count of subset of messages in deliverGet which had the redelivered flag set."
	DescRmqMessageStatsReturn                  = "Count of messages returned to publisher as unroutable."
	DescRmqMessagesUnacknowledged              = "Number of messages delivered to clients but not yet acknowledged."
	DescRmqMessagesUnacknowledgedRAM           = "Number of messages from messagesUnacknowledged which are resident in ram."
	DescRmqMnesiaDiskTxCount                   = "" /* 180-byte string literal not displayed */
	DescRmqMnesiaRAMTxCount                    = "" /* 189-byte string literal not displayed */
	DescRmqMsgStoreReadCount                   = "Number of messages which have been read from the message store."
	DescRmqMsgStoreWriteCount                  = "Number of messages which have been written to the message store."
	DescRmqObjecttotalsChannels                = "Overall number of channels."
	DescRmqObjectTotalsConnections             = "Overall number of connections."
	DescRmqObjectTotalsConsumers               = "Overall number of consumers."
	DescRmqObjectTotalsExchanges               = "Overall number of exchanges."
	DescRmqObjectTotalsQueues                  = "Overall number of queues."
	DescRmqPartitions                          = "Count of network partitions this node is seeing."
	DescRmqProcessors                          = "Number of cores detected and usable by Erlang."
	DescRmqProcTotal                           = "Maximum number of Erlang processes."
	DescRmqProcUsed                            = "Number of Erlang processes in use."
	DescRmqQueueIndexJournalWriteCount         = "" /* 179-byte string literal not displayed */
	DescRmqQueueIndexReadCount                 = "Number of records read from the queue index."
	DescRmqQueueIndexWriteCount                = "Number of records written to the queue index."
	DescRmqQueueTotalsMessages                 = "Overall sum of ready and unacknowledged messages (queue depth)."
	DescRmqQueueTotalsMessagesReady            = "Overall number of messages ready to be delivered to clients."
	DescRmqQueueTotalsMessagesUnacknowledged   = "Overall number of messages delivered to clients but not yet acknowledged."
	DescRmqRunning                             = "Boolean for whether this node is up. Obviously if this is false, most other stats will be missing."
	DescRmqRunQueue                            = "Average number of Erlang processes waiting to run."
	DescRmqSlaveNodes                          = "Count of nodes having a copy of the queue."
	DescRmqSocketsTotal                        = "File descriptors available for use as sockets."
	DescRmqSocketsUsed                         = "File descriptors used as sockets."
	DescRmqState                               = "The state of the queue. Unknown=> -1, Running=> 0, Syncing=> 1, Flow=> 2, Down=> 3"
	DescRmqSynchronisedSlaveNodes              = "Count of nodes having synchronised copy of the queue."
	DescRmqSyncMessages                        = "Count of already synchronised messages on a slave node."
	DescRmqUptime                              = "Node uptime in seconds."
)

Variables

View Source
var (
	// DefaultFreq is the duration between collection intervals if none is
	// specified.
	DefaultFreq = time.Second * 15

	AddTags opentsdb.TagSet

	AddProcessDotNetConfig = func(params conf.ProcessDotNet) error {
		return fmt.Errorf("process_dotnet watching not implemented on this platform")
	}
	WatchProcessesDotNet = func() {}

	KeepalivedCommunity = ""
)
View Source
var CPU_FIELDS = []string{
	"user",
	"nice",
	"system",
	"idle",
	"iowait",
	"irq",
	"softirq",
	"steal",
	"guest",
	"guest_nice",
}

Functions

func AWS

func AWS(accessKey, secretKey, region string) error

func Add

func Add(md *opentsdb.MultiDataPoint, name string, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string)

Add appends a new data point with given metric name, value, and tags. Tags may be nil. If tags is nil or does not contain a host key, it will be automatically added. If the value of the host key is the empty string, it will be removed (use this to prevent the normal auto-adding of the host tag).

func AddElasticIndexFilter

func AddElasticIndexFilter(s string) error

func AddMetricFilters

func AddMetricFilters(s string) error

AddMetricFilters adds metric filters provided by the conf

func AddProcessConfig

func AddProcessConfig(params conf.ProcessParams) error

func AddSystemdServiceConfig

func AddSystemdServiceConfig(params conf.ServiceParams) error

func AddTS

func AddTS(md *opentsdb.MultiDataPoint, name string, ts int64, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string)

AddTS is the same as Add but lets you specify the timestamp

func AddTagOverrides

func AddTagOverrides(s []Collector, tagOverride []conf.TagOverride) error

Adds configured tag overrides to all matching collectors

func ExtraHop

func ExtraHop(host, apikey, filterby string, filterpercent int) error

Register a collector for ExtraHop

func GenericSnmp

func GenericSnmp(cfg conf.SNMP, mib conf.MIB) (opentsdb.MultiDataPoint, error)

func HTTPUnitHiera

func HTTPUnitHiera(filename string, freq time.Duration) error

func HTTPUnitPlans

func HTTPUnitPlans(name string, plans *httpunit.Plans, freq time.Duration)

func HTTPUnitTOML

func HTTPUnitTOML(filename string, freq time.Duration) error

func ICMP

func ICMP(host string) error

ICMP registers an ICMP collector a given host.

func InContainer

func InContainer(pid string) bool

InContainer detects if a process is running in a Linux container.

func Init

func Init(c *conf.Conf)

func InitFake

func InitFake(fake int)

func InitPrograms

func InitPrograms(cpath string)

func IsAlNum

func IsAlNum(s string) bool

IsAlNum returns true if s is alphanumeric.

func IsDigit

func IsDigit(s string) bool

IsDigit returns true if s consists of decimal digits.

func RabbitMQ

func RabbitMQ(url string) error

RabbitMQ registers a RabbitMQ collector.

func Riak

func Riak(s string) error

func Run

func Run(cs []Collector) (chan *opentsdb.DataPoint, chan struct{})

Run runs specified collectors. Use nil for all collectors.

func SNMP

func SNMP(cfg conf.SNMP, mibs map[string]conf.MIB) error

func SNMPBridge

func SNMPBridge(cfg conf.SNMP)

SNMP Bridge registers

func SNMPCiscoASA

func SNMPCiscoASA(cfg conf.SNMP)

SNMPCiscoASA registers a SNMP CISCO IOS collector for the given community and host.

func SNMPCiscoBGP

func SNMPCiscoBGP(cfg conf.SNMP)

func SNMPCiscoIOS

func SNMPCiscoIOS(cfg conf.SNMP)

SNMPCiscoIOS registers a SNMP CISCO IOS collector for the given community and host.

func SNMPCiscoNXOS

func SNMPCiscoNXOS(cfg conf.SNMP)

SNMPCiscoNXOS registers a SNMP Cisco's NXOS collector (i.e. nexus switches) for the given community and host.

func SNMPFortinet

func SNMPFortinet(cfg conf.SNMP)

SNMPFortinet registers a SNMP Fortinet collector for the given community and host.

func SNMPIPAddresses

func SNMPIPAddresses(cfg conf.SNMP)

SNMPIfaces registers a SNMP Interfaces collector for the given community and host.

func SNMPIfaces

func SNMPIfaces(cfg conf.SNMP)

SNMPIfaces registers a SNMP Interfaces collector for the given community and host.

func SNMPSys

func SNMPSys(cfg conf.SNMP)

SNMPSys registers a SNMP system data collector for the given community and host.

func TSys100NStoEpoch

func TSys100NStoEpoch(nsec uint64) int64

func Vsphere

func Vsphere(user, pwd, host string) error

Vsphere registers a vSphere collector.

func WatchProcesses

func WatchProcesses()

Types

type Collector

type Collector interface {
	Run(chan<- *opentsdb.DataPoint, <-chan struct{})
	Name() string
	Init()
	AddTagOverrides(map[string]string, opentsdb.TagSet) error
	ApplyTagOverrides(opentsdb.TagSet)
}
func Search(s []string) []Collector

Search returns all collectors matching the pattern s.

type DatastoreHostMount

type DatastoreHostMount struct {
	Key       string `xml:"key"`
	MountInfo struct {
		Accessible bool   `xml:"accessible"`
		AccessMode string `xml:"accessMode"`
		Mounted    bool   `xml:"mounted"`
		Path       string `xml:"path"`
	} `xml:"mountInfo"`
}

type ElasticBreakersStat

type ElasticBreakersStat struct {
	EstimatedSize        string  `json:"estimated_size"`
	EstimatedSizeInBytes int     `json:"estimated_size_in_bytes"`
	LimitSize            string  `json:"limit_size"`
	LimitSizeInBytes     int     `json:"limit_size_in_bytes"`
	Overhead             float64 `json:"overhead"`
	Tripped              int     `json:"tripped"`
}

type ElasticClusterState

type ElasticClusterState struct {
	MasterNode string `json:"master_node"`
}

type ElasticClusterStats

type ElasticClusterStats struct {
	ClusterName string `json:"cluster_name"`
	Nodes       map[string]struct {
		Attributes struct {
			Master string `json:"master"`
		} `json:"attributes"`
		Breakers struct {
			Fielddata ElasticBreakersStat `json:"fielddata"`
			Parent    ElasticBreakersStat `json:"parent"`
			Request   ElasticBreakersStat `json:"request"`
		} `json:"breakers" exclude:"true"`
		FS struct {
			Data []struct {
				AvailableInBytes     int    `json:"available_in_bytes"`
				Dev                  string `json:"dev" version:"1"`                      // 1.0 only
				DiskIoOp             int    `json:"disk_io_op" version:"1"`               // 1.0 only
				DiskIoSizeInBytes    int    `json:"disk_io_size_in_bytes" version:"1"`    // 1.0 only
				DiskQueue            string `json:"disk_queue" version:"1"`               // 1.0 only
				DiskReadSizeInBytes  int    `json:"disk_read_size_in_bytes" version:"1"`  // 1.0 only
				DiskReads            int    `json:"disk_reads" version:"1"`               // 1.0 only
				DiskServiceTime      string `json:"disk_service_time" version:"1"`        // 1.0 only
				DiskWriteSizeInBytes int    `json:"disk_write_size_in_bytes" version:"1"` // 1.0 only
				DiskWrites           int    `json:"disk_writes" version:"1"`              // 1.0 only
				FreeInBytes          int    `json:"free_in_bytes"`
				Mount                string `json:"mount"`
				Path                 string `json:"path"`
				TotalInBytes         int    `json:"total_in_bytes"`
				Type                 string `json:"type" version:"2"` // 2.0 only
			} `json:"data"`
			Timestamp int `json:"timestamp"`
			Total     struct {
				AvailableInBytes     int    `json:"available_in_bytes"`
				DiskIoOp             int    `json:"disk_io_op" version:"1"`               // 1.0 only
				DiskIoSizeInBytes    int    `json:"disk_io_size_in_bytes" version:"1"`    // 1.0 only
				DiskQueue            string `json:"disk_queue" version:"1"`               // 1.0 only
				DiskReadSizeInBytes  int    `json:"disk_read_size_in_bytes" version:"1"`  // 1.0 only
				DiskReads            int    `json:"disk_reads" version:"1"`               // 1.0 only
				DiskServiceTime      string `json:"disk_service_time" version:"1"`        // 1.0 only
				DiskWriteSizeInBytes int    `json:"disk_write_size_in_bytes" version:"1"` // 1.0 only
				DiskWrites           int    `json:"disk_writes" version:"1"`              // 1.0 only
				FreeInBytes          int    `json:"free_in_bytes"`
				TotalInBytes         int    `json:"total_in_bytes"`
			} `json:"total"`
		} `json:"fs" exclude:"true"`
		Host string `json:"host"`
		HTTP struct {
			CurrentOpen int `json:"current_open"`
			TotalOpened int `json:"total_opened"`
		} `json:"http"`
		Indices ElasticIndexDetails `json:"indices" exclude:"true"` // Stored under elastic.indices.local namespace.
		IP      []string            `json:"ip" exclude:"true"`
		JVM     struct {
			BufferPools struct {
				Direct struct {
					Count                int `json:"count"`
					TotalCapacityInBytes int `json:"total_capacity_in_bytes"`
					UsedInBytes          int `json:"used_in_bytes"`
				} `json:"direct"`
				Mapped struct {
					Count                int `json:"count"`
					TotalCapacityInBytes int `json:"total_capacity_in_bytes"`
					UsedInBytes          int `json:"used_in_bytes"`
				} `json:"mapped"`
			} `json:"buffer_pools"`
			Classes struct {
				CurrentLoadedCount int `json:"current_loaded_count" version:"2"` // 2.0 only
				TotalLoadedCount   int `json:"total_loaded_count" version:"2"`   // 2.0 only
				TotalUnloadedCount int `json:"total_unloaded_count" version:"2"` // 2.0 only
			} `json:"classes"`
			GC struct {
				Collectors struct {
					Old struct {
						CollectionCount        int `json:"collection_count"`
						CollectionTimeInMillis int `json:"collection_time_in_millis"`
					} `json:"old"`
					Young struct {
						CollectionCount        int `json:"collection_count"`
						CollectionTimeInMillis int `json:"collection_time_in_millis"`
					} `json:"young"`
				} `json:"collectors"`
			} `json:"gc" exclude:"true"` // This is recorded manually so we can tag the GC collector type.
			Mem struct {
				HeapCommittedInBytes    int `json:"heap_committed_in_bytes" metric:"heap_committed"`
				HeapMaxInBytes          int `json:"heap_max_in_bytes"`
				HeapUsedInBytes         int `json:"heap_used_in_bytes" metric:"heap_used"`
				HeapUsedPercent         int `json:"heap_used_percent"`
				NonHeapCommittedInBytes int `json:"non_heap_committed_in_bytes"`
				NonHeapUsedInBytes      int `json:"non_heap_used_in_bytes"`
				Pools                   struct {
					Old struct {
						MaxInBytes      int `json:"max_in_bytes"`
						PeakMaxInBytes  int `json:"peak_max_in_bytes"`
						PeakUsedInBytes int `json:"peak_used_in_bytes"`
						UsedInBytes     int `json:"used_in_bytes"`
					} `json:"old"`
					Survivor struct {
						MaxInBytes      int `json:"max_in_bytes"`
						PeakMaxInBytes  int `json:"peak_max_in_bytes"`
						PeakUsedInBytes int `json:"peak_used_in_bytes"`
						UsedInBytes     int `json:"used_in_bytes"`
					} `json:"survivor"`
					Young struct {
						MaxInBytes      int `json:"max_in_bytes"`
						PeakMaxInBytes  int `json:"peak_max_in_bytes"`
						PeakUsedInBytes int `json:"peak_used_in_bytes"`
						UsedInBytes     int `json:"used_in_bytes"`
					} `json:"young"`
				} `json:"pools" exclude:"true"`
			} `json:"mem"`
			Threads struct {
				Count     int `json:"count"`
				PeakCount int `json:"peak_count"`
			} `json:"threads"`
			Timestamp      int `json:"timestamp"`
			UptimeInMillis int `json:"uptime_in_millis"`
		} `json:"jvm"`
		Name    string `json:"name"`
		Network struct {
			TCP struct {
				ActiveOpens  int `json:"active_opens" version:"1"`  // 1.0 only
				AttemptFails int `json:"attempt_fails" version:"1"` // 1.0 only
				CurrEstab    int `json:"curr_estab" version:"1"`    // 1.0 only
				EstabResets  int `json:"estab_resets" version:"1"`  // 1.0 only
				InErrs       int `json:"in_errs" version:"1"`       // 1.0 only
				InSegs       int `json:"in_segs" version:"1"`       // 1.0 only
				OutRsts      int `json:"out_rsts" version:"1"`      // 1.0 only
				OutSegs      int `json:"out_segs" version:"1"`      // 1.0 only
				PassiveOpens int `json:"passive_opens" version:"1"` // 1.0 only
				RetransSegs  int `json:"retrans_segs" version:"1"`  // 1.0 only
			} `json:"tcp"`
		} `json:"network"`
		OS struct {
			CPU struct {
				Idle   int `json:"idle" version:"1"`   // 1.0 only
				Stolen int `json:"stolen" version:"1"` // 1.0 only
				Sys    int `json:"sys" version:"1"`    // 1.0 only
				Usage  int `json:"usage" version:"1"`  // 1.0 only
				User   int `json:"user" version:"1"`   // 1.0 only
			} `json:"cpu"`
			//			LoadAverage []float64 `json:"load_average"` // 1.0 only
			//			LoadAverage float64 `json:"load_average"` // 2.0 only
			Mem struct {
				ActualFreeInBytes int `json:"actual_free_in_bytes" version:"1"` // 1.0 only
				ActualUsedInBytes int `json:"actual_used_in_bytes" version:"1"` // 1.0 only
				FreeInBytes       int `json:"free_in_bytes"`
				FreePercent       int `json:"free_percent"`
				TotalInBytes      int `json:"total_in_bytes" version:"2"` // 2.0 only
				UsedInBytes       int `json:"used_in_bytes"`
				UsedPercent       int `json:"used_percent"`
			} `json:"mem"`
			Swap struct {
				FreeInBytes  int `json:"free_in_bytes"`
				TotalInBytes int `json:"total_in_bytes" version:"2"` // 2.0 only
				UsedInBytes  int `json:"used_in_bytes"`
			} `json:"swap"`
			Timestamp      int `json:"timestamp"`
			UptimeInMillis int `json:"uptime_in_millis"`
		} `json:"os" exclude:"true"` // These are OS-wide stats, and are already gathered by other collectors.
		Process struct {
			CPU struct {
				Percent       int `json:"percent" exclude:"true"`
				SysInMillis   int `json:"sys_in_millis" version:"1"` // 1.0 only
				TotalInMillis int `json:"total_in_millis"`
				UserInMillis  int `json:"user_in_millis" version:"1"` // 1.0 only
			} `json:"cpu"`
			MaxFileDescriptors int `json:"max_file_descriptors" version:"2"` // 2.0 only
			Mem                struct {
				ResidentInBytes     int `json:"resident_in_bytes" metric:"resident" version:"1"` // 1.0 only
				ShareInBytes        int `json:"share_in_bytes" metric:"shared" version:"1"`      // 1.0 only
				TotalVirtualInBytes int `json:"total_virtual_in_bytes" metric:"total_virtual"`
			} `json:"mem"`
			OpenFileDescriptors int `json:"open_file_descriptors"`
			Timestamp           int `json:"timestamp" exclude:"true"`
		} `json:"process"`
		Script struct {
			CacheEvictions int `json:"cache_evictions" version:"2"` // 2.0 only
			Compilations   int `json:"compilations" version:"2"`    // 2.0 only
		} `json:"script"`
		ThreadPool struct {
			Bulk              ElasticThreadPoolStat `json:"bulk"`
			FetchShardStarted ElasticThreadPoolStat `json:"fetch_shard_started" version:"2"` // 2.0 only
			FetchShardStore   ElasticThreadPoolStat `json:"fetch_shard_store" version:"2"`   // 2.0 only
			Flush             ElasticThreadPoolStat `json:"flush"`
			Generic           ElasticThreadPoolStat `json:"generic"`
			Get               ElasticThreadPoolStat `json:"get"`
			Index             ElasticThreadPoolStat `json:"index"`
			Listener          ElasticThreadPoolStat `json:"listener"`
			Management        ElasticThreadPoolStat `json:"management"`
			Merge             ElasticThreadPoolStat `json:"merge" version:"1"` // 1.0 only
			Optimize          ElasticThreadPoolStat `json:"optimize"`
			Percolate         ElasticThreadPoolStat `json:"percolate"`
			Refresh           ElasticThreadPoolStat `json:"refresh"`
			Search            ElasticThreadPoolStat `json:"search"`
			Snapshot          ElasticThreadPoolStat `json:"snapshot"`
			Suggest           ElasticThreadPoolStat `json:"suggest"`
			Warmer            ElasticThreadPoolStat `json:"warmer"`
		} `json:"thread_pool" exclude:"true"`
		Timestamp int `json:"timestamp"`
		Transport struct {
			RxCount       int `json:"rx_count"`
			RxSizeInBytes int `json:"rx_size_in_bytes"`
			ServerOpen    int `json:"server_open"`
			TxCount       int `json:"tx_count"`
			TxSizeInBytes int `json:"tx_size_in_bytes"`
		} `json:"transport"`
		TransportAddress string `json:"transport_address"`
	} `json:"nodes"`
}

type ElasticHealth

type ElasticHealth struct {
	ActivePrimaryShards         int                           `` /* 191-byte string literal not displayed */
	ActiveShards                int                           `json:"active_shards" desc:"The number of active shards."`
	ActiveShardsPercentAsNumber float64                       `json:"active_shards_percent_as_number" version:"2"` // 2.0 only
	ClusterName                 string                        `json:"cluster_name"`
	DelayedUnassignedShards     int                           `json:"delayed_unassigned_shards" version:"2"` // 2.0 only
	Indices                     map[string]ElasticIndexHealth `json:"indices" exclude:"true"`
	InitializingShards          int                           `json:"initializing_shards" desc:"The number of initalizing shards."`
	NumberOfDataNodes           int                           `json:"number_of_data_nodes"`
	NumberOfInFlightFetch       int                           `json:"number_of_in_flight_fetch" version:"2"` // 2.0 only
	NumberOfNodes               int                           `json:"number_of_nodes"`
	NumberOfPendingTasks        int                           `json:"number_of_pending_tasks"`
	RelocatingShards            int                           `json:"relocating_shards" desc:"The number of shards relocating."`
	Status                      string                        `json:"status" desc:"The current status of the cluster. 0: green, 1: yellow, 2: red."`
	TaskMaxWaitingInQueueMillis int                           `json:"task_max_waiting_in_queue_millis" version:"2"` // 2.0 only
	TimedOut                    bool                          `json:"timed_out" exclude:"true"`
	UnassignedShards            int                           `json:"unassigned_shards" version:"2"` // 2.0 only
}

type ElasticIndex

type ElasticIndex struct {
	Primaries ElasticIndexDetails `json:"primaries"`
	Total     ElasticIndexDetails `json:"total"`
}

type ElasticIndexDetails

type ElasticIndexDetails struct {
	Completion struct {
		SizeInBytes int `json:"size_in_bytes" desc:"Size of the completion index (used for auto-complete functionallity)."`
	} `json:"completion"`
	Docs struct {
		Count   int `json:"count" rate:"gauge" rate:"gauge" unit:"documents" desc:"The number of documents in the index."`
		Deleted int `json:"deleted" rate:"gauge" unit:"documents" desc:"The number of deleted documents in the index."`
	} `json:"docs"`
	Fielddata struct {
		Evictions         int `json:"evictions" rate:"counter" unit:"evictions" desc:"The number of cache evictions for field data."`
		MemorySizeInBytes int `json:"memory_size_in_bytes" desc:"The amount of memory used for field data."`
	} `json:"fielddata"`
	FilterCache struct {
		Evictions         int `json:"evictions" version:"1" rate:"counter" unit:"evictions" desc:"The number of cache evictions for filter data."` // 1.0 only
		MemorySizeInBytes int `json:"memory_size_in_bytes" version:"1" desc:"The amount of memory used for filter data."`                          // 1.0 only
	} `json:"filter_cache"`
	Flush struct {
		Total             int `` /* 230-byte string literal not displayed */
		TotalTimeInMillis int `` /* 265-byte string literal not displayed */
	} `json:"flush"`
	Get struct {
		Current             int `` /* 147-byte string literal not displayed */
		ExistsTimeInMillis  int `` /* 161-byte string literal not displayed */
		ExistsTotal         int `` /* 141-byte string literal not displayed */
		MissingTimeInMillis int `` /* 152-byte string literal not displayed */
		MissingTotal        int `` /* 151-byte string literal not displayed */
		TimeInMillis        int `` /* 171-byte string literal not displayed */
		Total               int `` /* 151-byte string literal not displayed */
	} `json:"get"`
	IDCache struct {
		MemorySizeInBytes int `json:"memory_size_in_bytes" version:"1" desc:"The size of the id cache."` // 1.0 only
	} `json:"id_cache"`
	Indexing struct {
		DeleteCurrent        int  `` /* 152-byte string literal not displayed */
		DeleteTimeInMillis   int  `json:"delete_time_in_millis" rate:"counter" unit:"seconds" desc:"The time spent deleting documents."`
		DeleteTotal          int  `json:"delete_total" rate:"counter" unit:"documents" desc:"The total number of documents deleted."`
		IndexCurrent         int  `json:"index_current" rate:"gauge" unit:"documents" desc:"The current number of documents being indexed."`
		IndexTimeInMillis    int  `json:"index_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent indexing documents."`
		IndexTotal           int  `json:"index_total" rate:"counter" unit:"documents" desc:"The total number of documents indexed."`
		IsThrottled          bool `json:"is_throttled" exclude:"true"`
		NoopUpdateTotal      int  `json:"noop_update_total"`
		ThrottleTimeInMillis int  `json:"throttle_time_in_millis"`
	} `json:"indexing"`
	Merges struct {
		Current                    int ``                                                  /* 197-byte string literal not displayed */
		CurrentDocs                int ``                                                  /* 247-byte string literal not displayed */
		CurrentSizeInBytes         int ``                                                  /* 186-byte string literal not displayed */
		Total                      int ``                                                  /* 185-byte string literal not displayed */
		TotalAutoThrottleInBytes   int `json:"total_auto_throttle_in_bytes" version:"2"`   // 2.0 only
		TotalDocs                  int ``                                                  /* 240-byte string literal not displayed */
		TotalSizeInBytes           int ``                                                  /* 176-byte string literal not displayed */
		TotalStoppedTimeInMillis   int `json:"total_stopped_time_in_millis" version:"2"`   // 2.0 only
		TotalThrottledTimeInMillis int `json:"total_throttled_time_in_millis" version:"2"` // 2.0 only
		TotalTimeInMillis          int ``                                                  /* 225-byte string literal not displayed */
	} `json:"merges"`
	Percolate struct {
		Current           int    `json:"current" rate:"gauge" unit:"operations" desc:"The current number of percolate operations."`
		MemorySize        string `json:"memory_size"`
		MemorySizeInBytes int    `` /* 137-byte string literal not displayed */
		Queries           int    `` /* 142-byte string literal not displayed */
		TimeInMillis      int    `` /* 157-byte string literal not displayed */
		Total             int    `` /* 144-byte string literal not displayed */
	} `json:"percolate"`
	QueryCache struct {
		CacheCount        int `json:"cache_count" version:"2"` // 2.0 only
		CacheSize         int `json:"cache_size" version:"2"`  // 2.0 only
		Evictions         int `json:"evictions"`
		HitCount          int `json:"hit_count"`
		MemorySizeInBytes int `json:"memory_size_in_bytes"`
		MissCount         int `json:"miss_count"`
		TotalCount        int `json:"total_count" version:"2"` // 2.0 only
	} `json:"query_cache"`
	Recovery struct {
		CurrentAsSource      int `json:"current_as_source"`
		CurrentAsTarget      int `json:"current_as_target"`
		ThrottleTimeInMillis int `json:"throttle_time_in_millis"`
	} `json:"recovery"`
	Refresh struct {
		Total             int `` /* 155-byte string literal not displayed */
		TotalTimeInMillis int `` /* 184-byte string literal not displayed */
	} `json:"refresh"`
	RequestCache struct {
		Evictions         int `json:"evictions" version:"2"`            // 2.0 only
		HitCount          int `json:"hit_count" version:"2"`            // 2.0 only
		MemorySizeInBytes int `json:"memory_size_in_bytes" version:"2"` // 2.0 only
		MissCount         int `json:"miss_count" version:"2"`           // 2.0 only
	} `json:"request_cache"`
	Search struct {
		FetchCurrent       int `` /* 161-byte string literal not displayed */
		FetchTimeInMillis  int `` /* 162-byte string literal not displayed */
		FetchTotal         int `` /* 153-byte string literal not displayed */
		OpenContexts       int `` /* 148-byte string literal not displayed */
		QueryCurrent       int `json:"query_current" rate:"gauge" unit:"queries" desc:"The current number of queries."`
		QueryTimeInMillis  int `json:"query_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent querying."`
		QueryTotal         int `json:"query_total" rate:"counter" unit:"queries" desc:"The total number of queries."`
		ScrollCurrent      int `json:"scroll_current" version:"2"`        // 2.0 only
		ScrollTimeInMillis int `json:"scroll_time_in_millis" version:"2"` // 2.0 only
		ScrollTotal        int `json:"scroll_total" version:"2"`          // 2.0 only
	} `json:"search"`
	Segments struct {
		Count                       int `json:"count" rate:"counter" unit:"segments" desc:"The number of segments that make up the index."`
		DocValuesMemoryInBytes      int `json:"doc_values_memory_in_bytes" version:"2"` // 2.0 only
		FixedBitSetMemoryInBytes    int `json:"fixed_bit_set_memory_in_bytes"`
		IndexWriterMaxMemoryInBytes int `json:"index_writer_max_memory_in_bytes"`
		IndexWriterMemoryInBytes    int `json:"index_writer_memory_in_bytes"`
		MemoryInBytes               int `json:"memory_in_bytes" desc:"The total amount of memory used for Lucene segments."`
		NormsMemoryInBytes          int `json:"norms_memory_in_bytes" version:"2"`         // 2.0 only
		StoredFieldsMemoryInBytes   int `json:"stored_fields_memory_in_bytes" version:"2"` // 2.0 only
		TermVectorsMemoryInBytes    int `json:"term_vectors_memory_in_bytes" version:"2"`  // 2.0 only
		TermsMemoryInBytes          int `json:"terms_memory_in_bytes" version:"2"`         // 2.0 only
		VersionMapMemoryInBytes     int `json:"version_map_memory_in_bytes"`
	} `json:"segments"`
	Store struct {
		SizeInBytes          int `json:"size_in_bytes" unit:"bytes" desc:"The current size of the store."`
		ThrottleTimeInMillis int `json:"throttle_time_in_millis" rate:"gauge" unit:"seconds" desc:"The amount of time that merges where throttled."`
	} `json:"store"`
	Suggest struct {
		Current      int `json:"current" rate:"gauge" unit:"suggests" desc:"The current number of suggest operations."`
		TimeInMillis int `json:"time_in_millis" rate:"gauge" unit:"seconds" desc:"The total amount of time spent on suggest operations."`
		Total        int `json:"total" rate:"gauge" unit:"suggests" desc:"The total number of suggest operations."`
	} `json:"suggest"`
	Translog struct {
		Operations  int `` /* 171-byte string literal not displayed */
		SizeInBytes int `` /* 137-byte string literal not displayed */
	} `json:"translog"`
	Warmer struct {
		Current           int `` /* 181-byte string literal not displayed */
		Total             int `` /* 177-byte string literal not displayed */
		TotalTimeInMillis int `` /* 193-byte string literal not displayed */
	} `json:"warmer"`
}

type ElasticIndexHealth

type ElasticIndexHealth struct {
	ActivePrimaryShards int    `` /* 191-byte string literal not displayed */
	ActiveShards        int    `json:"active_shards" desc:"The number of active shards."`
	InitializingShards  int    `json:"initializing_shards" desc:"The number of initalizing shards."`
	NumberOfReplicas    int    `json:"number_of_replicas" desc:"The number of replicas."`
	NumberOfShards      int    `json:"number_of_shards" desc:"The number of shards."`
	RelocatingShards    int    `json:"relocating_shards" desc:"The number of shards relocating."`
	Status              string `json:"status" desc:"The current status of the index. 0: green, 1: yellow, 2: red."`
	UnassignedShards    int    `json:"unassigned_shards"`
}

type ElasticIndexStats

type ElasticIndexStats struct {
	All    ElasticIndex `json:"_all"`
	Shards struct {
		Failed     float64 `json:"failed"`
		Successful float64 `json:"successful"`
		Total      float64 `json:"total"`
	} `json:"_shards"`
	Indices map[string]ElasticIndex `json:"indices"`
}

type ElasticStatus

type ElasticStatus struct {
	Status  int    `json:"status"`
	Name    string `json:"name"`
	Version struct {
		Number string `json:"number"`
	} `json:"version"`
}

type ElasticThreadPoolStat

type ElasticThreadPoolStat struct {
	Active    int `json:"active"`
	Completed int `json:"completed"`
	Largest   int `json:"largest"`
	Queue     int `json:"queue"`
	Rejected  int `json:"rejected"`
	Threads   int `json:"threads"`
}

type HostSystemIdentificationInfo

type HostSystemIdentificationInfo struct {
	IdentiferValue string `xml:"identifierValue"`
	IdentiferType  struct {
		Label   string `xml:"label"`
		Summary string `xml:"summary"`
		Key     string `xml:"key"`
	} `xml:"identifierType"`
}

type IntervalCollector

type IntervalCollector struct {
	F        func() (opentsdb.MultiDataPoint, error)
	Interval time.Duration // defaults to DefaultFreq if unspecified
	Enable   func() bool

	// internal use
	sync.Mutex

	TagOverride
	// contains filtered or unexported fields
}

func (*IntervalCollector) Enabled

func (c *IntervalCollector) Enabled() bool

func (*IntervalCollector) Init

func (c *IntervalCollector) Init()

func (*IntervalCollector) Name

func (c *IntervalCollector) Name() string

func (*IntervalCollector) Run

func (c *IntervalCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type L7Stats

type L7Stats struct {
	Rate        metadata.RateType
	Unit        metadata.Unit
	Description string
}

type MetricMeta

type MetricMeta struct {
	Metric   string
	TagSet   opentsdb.TagSet
	RateType metadata.RateType
	Unit     metadata.Unit
	Desc     string
}

type MetricMetaHAProxy

type MetricMetaHAProxy struct {
	Name   string
	Ignore bool
	MetricMeta
}

MetricMetaHAProxy is a super-structure which adds a friendly Name, as well as an indicator on if a metric is to be ignored.

type MetricSet

type MetricSet map[string]string

type PRReport

type PRReport struct {
	Status string `yaml:"status"`
	Time   string `yaml:"time"` // 2006-01-02 15:04:05.999999 -07:00
}

type PRSummary

type PRSummary struct {
	Changes struct {
		Total float64 `yaml:"total"`
	} `yaml:"changes"`
	Events struct {
		Failure float64 `yaml:"failure"`
		Success float64 `yaml:"success"`
		Total   float64 `yaml:"total"`
	} `yaml:"events"`
	Resources struct {
		Changed         float64 `yaml:"changed"`
		Failed          float64 `yaml:"failed"`
		FailedToRestart float64 `yaml:"failed_to_restart"`
		OutOfSync       float64 `yaml:"out_of_sync"`
		Restarted       float64 `yaml:"restarted"`
		Scheduled       float64 `yaml:"scheduled"`
		Skipped         float64 `yaml:"skipped"`
		Total           float64 `yaml:"total"`
	} `yaml:"resources"`
	Time    map[string]string `yaml:"time"`
	Version struct {
		Config string `yaml:"config"`
		Puppet string `yaml:"puppet"`
	} `yaml:"version"`
}

type Process

type Process struct {
	Pid       string
	Command   string
	Arguments string
}

type ProgramCollector

type ProgramCollector struct {
	Path     string
	Interval time.Duration

	TagOverride
}

func (*ProgramCollector) Init

func (c *ProgramCollector) Init()

func (*ProgramCollector) Name

func (c *ProgramCollector) Name() string

func (*ProgramCollector) Run

func (c *ProgramCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type StreamCollector

type StreamCollector struct {
	F func() <-chan *opentsdb.MultiDataPoint

	TagOverride
	// contains filtered or unexported fields
}

func (*StreamCollector) Enabled

func (s *StreamCollector) Enabled() bool

func (*StreamCollector) Init

func (s *StreamCollector) Init()

func (*StreamCollector) Name

func (s *StreamCollector) Name() string

func (*StreamCollector) Run

func (s *StreamCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type TagOverride

type TagOverride struct {
	// contains filtered or unexported fields
}

func (*TagOverride) AddTagOverrides

func (to *TagOverride) AddTagOverrides(sources map[string]string, t opentsdb.TagSet) error

func (*TagOverride) ApplyTagOverrides

func (to *TagOverride) ApplyTagOverrides(t opentsdb.TagSet)

type TeamPort

type TeamPort struct {
	Ifinfo struct {
		DevAddr    string  `json:"dev_addr"`
		DevAddrLen float64 `json:"dev_addr_len"`
		Ifindex    float64 `json:"ifindex"`
		Ifname     string  `json:"ifname"`
	}
	Link struct {
		Duplex string  `json:"duplex"`
		Speed  float64 `json:"speed"`
		Up     bool    `json:"up"`
	} `json:"link"`
	LinkWatches struct {
		List struct {
			LinkWatch0 struct {
				DelayDown float64 `json:"delay_down"`
				DelayUp   float64 `json:"delay_up"`
				Name      string  `json:"name"`
				Up        bool    `json:"up"`
			} `json:"link_watch_0"`
		} `json:"list"`
		Up bool `json:"up"`
	} `json:"link_watches"`
	Runner struct {
		ActorLacpduInfo struct {
			Key            float64 `json:"key"`
			Port           float64 `json:"port"`
			PortPriority   float64 `json:"port_priority"`
			State          float64 `json:"state"`
			System         string  `json:"system"`
			SystemPriority float64 `json:"system_priority"`
		} `json:"actor_lacpdu_info"`
		Aggregator struct {
			ID       float64 `json:"id"`
			Selected bool    `json:"selected"`
		} `json:"aggregator"`
		Key               float64 `json:"key"`
		PartnerLacpduInfo struct {
			Key            float64 `json:"key"`
			Port           float64 `json:"port"`
			PortPriority   float64 `json:"port_priority"`
			State          float64 `json:"state"`
			System         string  `json:"system"`
			SystemPriority float64 `json:"system_priority"`
		} `json:"partner_lacpdu_info"`
		Prio     float64 `json:"prio"`
		Selected bool    `json:"selected"`
		State    string  `json:"state"`
	} `json:"runner"`
}

type TeamState

type TeamState struct {
	TeamPorts map[string]TeamPort `json:"ports"`
	Runner    struct {
		Active       bool    `json:"active"`
		FastRate     bool    `json:"fast_rate"`
		SelectPolicy string  `json:"select_policy"`
		SysPrio      float64 `json:"sys_prio"`
	} `json:"runner"`
	Setup struct {
		Daemonized         bool    `json:"daemonized"`
		DbusEnabled        bool    `json:"dbus_enabled"`
		DebugLevel         float64 `json:"debug_level"`
		KernelTeamModeName string  `json:"kernel_team_mode_name"`
		Pid                float64 `json:"pid"`
		PidFile            string  `json:"pid_file"`
		RunnerName         string  `json:"runner_name"`
		ZmqEnabled         bool    `json:"zmq_enabled"`
	} `json:"setup"`
	TeamDevice struct {
		Ifinfo struct {
			DevAddr    string  `json:"dev_addr"`
			DevAddrLen float64 `json:"dev_addr_len"`
			Ifindex    float64 `json:"ifindex"`
			Ifname     string  `json:"ifname"`
		} `json:"ifinfo"`
	} `json:"team_device"`
}

type VRRPAddressEntry

type VRRPAddressEntry struct {
	VRRPAddressIndex       int64
	VRRPAddressType        int64
	VRRPAddressValue       string `snmp:"octet"`
	VRRPAddressBroadcast   string `snmp:"octet"`
	VRRPAddressMask        int64
	VRRPAddressScope       int64
	VRRPAddressIfIndex     int64
	VRRPAddressIfName      string
	VRRPAddressIfAlias     string
	VRRPAddressStatus      int64
	VRRPAddressAdvertising int64
}

type VRRPInstanceEntry

type VRRPInstanceEntry struct {
	VInstanceIndex             int64
	VInstanceName              string
	VInstanceVirtualRouterId   int64
	VInstanceState             int64
	VInstanceInitialState      int64
	VInstanceWantedState       int64
	VInstanceBasePriority      int64
	VInstanceEffectivePriority int64
	VInstanceVipsStatus        int64
	VInstancePrimaryInterface  string
	VInstanceTrackPrimaryIf    int64
	VInstanceAdvertisementsInt int64
	VInstancePreempt           int64
	VInstancePreemptDelay      int64
	VInstanceAuthType          int64
	VInstanceLvsSyncDaemon     int64
	VInstanceLvsSyncInterface  string
	VInstanceSyncGroup         string
	VInstanceGarpDelay         int64
	VInstanceSmtpAlert         int64
	VInstanceNotifyExec        int64
	VInstanceScriptMaster      string
	VInstanceScriptBackup      string
	VInstanceScriptFault       string
	VInstanceScriptStop        string
	VInstanceScript            string
	VInstanceAccept            int64
}

type WatchedProc

type WatchedProc struct {
	Command      *regexp.Regexp
	Name         string
	IncludeCount bool
	Processes    map[string]int
	ArgMatch     *regexp.Regexp
	// contains filtered or unexported fields
}

func NewWatchedProc

func NewWatchedProc(params conf.ProcessParams) (*WatchedProc, error)

NewWatchedProc takes a configuration block [Process] from conf

func (*WatchedProc) Check

func (w *WatchedProc) Check(procs []*Process)

Check finds all matching processes and assigns them a new unique id.

func (*WatchedProc) Remove

func (w *WatchedProc) Remove(pid string)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL