Documentation ¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Arguments ¶
type Arguments struct { Targets []discovery.Target `alloy:"targets,attr"` ForwardTo []storage.Appendable `alloy:"forward_to,attr"` // The job name to override the job label with. JobName string `alloy:"job_name,attr,optional"` // Indicator whether the scraped metrics should remain unmodified. HonorLabels bool `alloy:"honor_labels,attr,optional"` // Indicator whether the scraped timestamps should be respected. HonorTimestamps bool `alloy:"honor_timestamps,attr,optional"` // Indicator whether to track the staleness of the scraped timestamps. TrackTimestampsStaleness bool `alloy:"track_timestamps_staleness,attr,optional"` // A set of query parameters with which the target is scraped. Params url.Values `alloy:"params,attr,optional"` // Whether to scrape a classic histogram that is also exposed as a native histogram. ScrapeClassicHistograms bool `alloy:"scrape_classic_histograms,attr,optional"` // How frequently to scrape the targets of this scrape config. ScrapeInterval time.Duration `alloy:"scrape_interval,attr,optional"` // The timeout for scraping targets of this config. ScrapeTimeout time.Duration `alloy:"scrape_timeout,attr,optional"` // The protocols to negotiate during a scrape. It tells clients what // protocol are accepted by Prometheus and with what order of preference. // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, // OpenMetricsText1.0.0, PrometheusText0.0.4. ScrapeProtocols []string `alloy:"scrape_protocols,attr,optional"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `alloy:"metrics_path,attr,optional"` // The URL scheme with which to fetch metrics from targets. Scheme string `alloy:"scheme,attr,optional"` // An uncompressed response body larger than this many bytes will cause the // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `alloy:"body_size_limit,attr,optional"` // More than this many samples post metric-relabeling will cause the scrape // to fail. SampleLimit uint `alloy:"sample_limit,attr,optional"` // More than this many targets after the target relabeling will cause the // scrapes to fail. TargetLimit uint `alloy:"target_limit,attr,optional"` // More than this many labels post metric-relabeling will cause the scrape // to fail. LabelLimit uint `alloy:"label_limit,attr,optional"` // More than this label name length post metric-relabeling will cause the // scrape to fail. LabelNameLengthLimit uint `alloy:"label_name_length_limit,attr,optional"` // More than this label value length post metric-relabeling will cause the // scrape to fail. LabelValueLengthLimit uint `alloy:"label_value_length_limit,attr,optional"` HTTPClientConfig component_config.HTTPClientConfig `alloy:",squash"` // Scrape Options ExtraMetrics bool `alloy:"extra_metrics,attr,optional"` // Deprecated: Use ScrapeProtocols instead. For backwards-compatibility, if this option is set to true, the // ScrapeProtocols will be set to [PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4]. // It is invalid to set both EnableProtobufNegotiation and ScrapeProtocols. // TODO: https://github.com/grafana/alloy/issues/878: Remove this option. EnableProtobufNegotiation bool `alloy:"enable_protobuf_negotiation,attr,optional"` Clustering cluster.ComponentBlock `alloy:"clustering,block,optional"` }
Arguments holds values which are used to configure the prometheus.scrape component.
func (*Arguments) SetToDefault ¶
func (arg *Arguments) SetToDefault()
SetToDefault implements syntax.Defaulter.
type Component ¶
type Component struct {
// contains filtered or unexported fields
}
Component implements the prometheus.scrape component.
func (*Component) DebugInfo ¶
func (c *Component) DebugInfo() interface{}
DebugInfo implements component.DebugComponent
func (*Component) NotifyClusterChange ¶
func (c *Component) NotifyClusterChange()
NotifyClusterChange implements component.ClusterComponent.
type ScraperStatus ¶
type ScraperStatus struct {
TargetStatus []TargetStatus `alloy:"target,block,optional"`
}
ScraperStatus reports the status of the scraper's jobs.
type TargetStatus ¶
type TargetStatus struct { JobName string `alloy:"job,attr"` URL string `alloy:"url,attr"` Health string `alloy:"health,attr"` Labels map[string]string `alloy:"labels,attr"` LastError string `alloy:"last_error,attr,optional"` LastScrape time.Time `alloy:"last_scrape,attr"` LastScrapeDuration time.Duration `alloy:"last_scrape_duration,attr,optional"` }
TargetStatus reports on the status of the latest scrape for a target.
func BuildTargetStatuses ¶
func BuildTargetStatuses(targets map[string][]*scrape.Target) []TargetStatus
BuildTargetStatuses transforms the targets from a scrape manager into our internal status type for debug info.
Click to show internal directories.
Click to hide internal directories.