analysis

package
v0.0.0-...-8be5f70 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 28, 2024 License: Apache-2.0 Imports: 26 Imported by: 0

Documentation

Overview

Package analysis contains methods to query cluster analysis maintained in BigQuery, and to add/update clustered failures used by the analysis.

Index

Constants

View Source
const MetricValueColumnSuffix = "value"

Variables

View Source
var ClusteredFailuresTable = aip.NewTable().WithColumns(
	aip.NewColumn().WithFieldPath("test_id").WithDatabaseName("test_id").FilterableImplicitly().Build(),
	aip.NewColumn().WithFieldPath("failure_reason").WithDatabaseName("failure_reason.primary_error_message").FilterableImplicitly().Build(),
	aip.NewColumn().WithFieldPath("realm").WithDatabaseName("realm").Filterable().Build(),
	aip.NewColumn().WithFieldPath("ingested_invocation_id").WithDatabaseName("ingested_invocation_id").Filterable().Build(),
	aip.NewColumn().WithFieldPath("cluster_algorithm").WithDatabaseName("cluster_algorithm").Filterable().WithArgumentSubstitutor(resolveAlgorithm).Build(),
	aip.NewColumn().WithFieldPath("cluster_id").WithDatabaseName("cluster_id").Filterable().Build(),
	aip.NewColumn().WithFieldPath("variant_hash").WithDatabaseName("variant_hash").Filterable().Build(),
	aip.NewColumn().WithFieldPath("test_run_id").WithDatabaseName("test_run_id").Filterable().Build(),
	aip.NewColumn().WithFieldPath("variant").WithDatabaseName("variant").KeyValue().Filterable().Build(),
	aip.NewColumn().WithFieldPath("tags").WithDatabaseName("tags").KeyValue().Filterable().Build(),
	aip.NewColumn().WithFieldPath("is_test_run_blocked").WithDatabaseName("is_test_run_blocked").Bool().Filterable().Build(),
	aip.NewColumn().WithFieldPath("is_ingested_invocation_blocked").WithDatabaseName("is_ingested_invocation_blocked").Bool().Filterable().Build(),
	aip.NewColumn().WithFieldPath("build_gardener_rotations").WithDatabaseName("build_gardener_rotations").Array().Filterable().Build(),
).Build()
View Source
var InvalidArgumentTag = errors.BoolTag{Key: errors.NewTagKey("invalid argument")}

InvalidArgumentTag is used to indicate that one of the query options is invalid.

Functions

func ClusterSummariesTable

func ClusterSummariesTable(queriedMetrics []metrics.Definition) *aip.Table

ClusterSummariesTable returns the schema of the table returned by the cluster summaries query. This can be used to generate and validate the order by clause.

func FromBQBuildStatus

func FromBQBuildStatus(value string) pb.BuildStatus

FromBQBuildStatus extracts luci.analysis.v1.BuildStatus from its BigQuery column representation.

func FromBQChangelistOwnershipKind

func FromBQChangelistOwnershipKind(value string) pb.ChangelistOwnerKind

FromBQChangelistOwnershipKind extracts luci.analysis.v1.ChangelistOwnerKind from its BigQuery column representation.

func FromBQExonerationReason

func FromBQExonerationReason(value string) pb.ExonerationReason

FromBQExonerationReason extracts luci.analysis.v1.ExonerationReason from its BigQuery column representation.

func FromBQPresubmitRunMode

func FromBQPresubmitRunMode(value string) pb.PresubmitRunMode

FromBQPresubmitRunMode extracts luci.analysis.v1.PresubmitRunMode from its BigQuery column representation.

func FromBQPresubmitRunStatus

func FromBQPresubmitRunStatus(value string) pb.PresubmitRunStatus

FromBQPresubmitRunStatus extracts luci.analysis.v1.PresubmitRunStatus from its BigQuery column representation.

func ToBQBuildStatus

func ToBQBuildStatus(value pb.BuildStatus) string

ToBQBuildStatus converts a luci.analysis.v1.BuildStatus to its BigQuery column representation. This trims the BUILD_STATUS_ prefix to avoid excessive verbosity in the table.

func ToBQPresubmitRunMode

func ToBQPresubmitRunMode(value pb.PresubmitRunMode) string

ToBQPresubmitRunMode converts a luci.analysis.v1.PresubmitRunMode to its BigQuery column representation.

func ToBQPresubmitRunStatus

func ToBQPresubmitRunStatus(value pb.PresubmitRunStatus) string

ToBQPresubmitRunStatus converts a luci.analysis.v1.PresubmitRunStatus to its BigQuery column representation. This trims the PRESUBMIT_RUN_STATUS_ prefix to avoid excessive verbosity in the table.

Types

type Changelist

type Changelist struct {
	Host     bigquery.NullString
	Change   bigquery.NullInt64
	Patchset bigquery.NullInt64
}

type Client

type Client struct {
	// contains filtered or unexported fields
}

Client may be used to read LUCI Analysis clusters.

func NewClient

func NewClient(ctx context.Context, gcpProject string) (*Client, error)

NewClient creates a new client for reading clusters. Close() MUST be called after you have finished using this client.

func (*Client) Close

func (c *Client) Close() error

Close releases any resources held by the client.

func (*Client) ProjectsWithDataset

func (c *Client) ProjectsWithDataset(ctx context.Context) (map[string]struct{}, error)

ProjectsWithDataset returns the set of LUCI projects which have a BigQuery dataset created.

func (*Client) PurgeStaleRows

func (c *Client) PurgeStaleRows(ctx context.Context) error

PurgeStaleRows purges stale clustered failure rows from the table. Stale rows are those rows which have been superseded by a new row with a later version, or where the latest version of the row has the row not included in a cluster. This is necessary for:

  • Our QueryClusterSummaries query, which for performance reasons (UI-interactive) does not do filtering to fetch the latest version of rows and instead uses all rows.
  • Keeping the size of the BigQuery table to a minimum.

We currently only purge the last 7 days to keep purging costs to a minimum and as this is as far as QueryClusterSummaries looks back.

func (*Client) QueryClusterSummaries

func (c *Client) QueryClusterSummaries(ctx context.Context, luciProject string, options *QueryClusterSummariesOptions) (cs []*ClusterSummary, err error)

QueryClusterSummaries queries a summary of clusters in the project. The subset of failures included in the clustering may be filtered. If the dataset for the LUCI project does not exist, returns ProjectNotExistsErr. If options.TimeRange is invalid, returns an error tagged with InvalidArgumentTag so that the appropriate gRPC error can be returned to the client (if applicable). If options.FailuresFilter or options.OrderBy is invalid with respect to the query schema, returns an error tagged with InvalidArgumentTag so that the appropriate gRPC error can be returned to the client (if applicable).

func (*Client) ReadCluster

func (c *Client) ReadCluster(ctx context.Context, luciProject string, clusterID clustering.ClusterID) (cl *Cluster, err error)

ReadCluster reads information about a cluster. If the dataset for the LUCI project does not exist, returns ProjectNotExistsErr. If information for the cluster could not be found (e.g. because there are no examples), returns an empty cluster.

func (*Client) ReadClusterExoneratedTestVariantBranches

func (c *Client) ReadClusterExoneratedTestVariantBranches(ctx context.Context, opts ReadClusterExoneratedTestVariantBranchesOptions) (cfs []*ExoneratedTestVariantBranch, err error)

ReadClusterExoneratedTestVariantBranches reads the latest 100 test variants which have presubmit-blocking failures exonerated in the last 7 days.

func (*Client) ReadClusterExoneratedTestVariants

func (c *Client) ReadClusterExoneratedTestVariants(ctx context.Context, opts ReadClusterExoneratedTestVariantsOptions) (cfs []*ExoneratedTestVariant, err error)

ReadClusterExoneratedTestVariants reads the latest 100 test variants which have presubmit-blocking failures exonerated in the last 7 days.

func (*Client) ReadClusterFailures

func (c *Client) ReadClusterFailures(ctx context.Context, opts ReadClusterFailuresOptions) (cfs []*ClusterFailure, err error)

ReadClusterFailures reads the latest 2000 groups of failures for a single cluster for the last 7 days. A group of failures are failures that would be grouped together in MILO display, i.e. same ingested_invocation_id, test_id and variant.

func (*Client) ReadClusterHistory

func (c *Client) ReadClusterHistory(ctx context.Context, options ReadClusterHistoryOptions) (ret []*ReadClusterHistoryDay, err error)

ReadCluster reads information about a list of clusters. If the dataset for the LUCI project does not exist, returns ProjectNotExistsErr.

func (*Client) ReadImpactfulClusters

func (c *Client) ReadImpactfulClusters(ctx context.Context, opts ImpactfulClusterReadOptions) (cs []*Cluster, err error)

ReadImpactfulClusters reads clusters exceeding specified metrics, or are otherwise nominated to be read.

func (*Client) RebuildAnalysis

func (c *Client) RebuildAnalysis(ctx context.Context) error

RebuildAnalysis re-builds the cluster summaries analysis from clustered test results for all LUCI projects.

type Cluster

type Cluster struct {
	ClusterID clustering.ClusterID

	// MetricValues the values of cluster metrics. Only metrics which
	// have been computed for the cluster are populated.
	MetricValues map[metrics.ID]metrics.TimewiseCounts

	// The number of distinct user (i.e not automation generated) CLs
	// which have failures that are part of this cluster, over the last
	// 7 days. If this is more than a couple, it is a good indicator the
	// problem is really in the tree and not only on a few unsubmitted CLs.
	DistinctUserCLsWithFailures7d metrics.Counts
	// The number of postsubmit builds which have failures that are
	// a part of this cluster. If this is non-zero, it is an indicator
	// the problem is in the tree and not in a few unsubmitted CLs.
	PostsubmitBuildsWithFailures7d metrics.Counts

	// The realm(s) examples of the cluster are present in.
	Realms               []string
	ExampleFailureReason bigquery.NullString
	// Top Test IDs included in the cluster, up to 5. Unless the cluster
	// is empty, will always include at least one Test ID.
	TopTestIDs []TopCount
	// Top Monorail Components indicates the top monorail components failures
	// in the cluster are associated with by number of failures, up to 5.
	TopMonorailComponents []TopCount
	// Top Buganizer Components indicates the top buganizer components failures
	// in the cluster are associated with by number of failures, up to 5.
	TopBuganizerComponents []TopCount
}

Cluster contains detailed information about a cluster, including a statistical summary of a cluster's failures, and its metrics.

func EmptyCluster

func EmptyCluster(clusterID clustering.ClusterID) *Cluster

EmptyCluster returns a Cluster entry for a cluster without any clustered failures.

func (*Cluster) ExampleTestID

func (s *Cluster) ExampleTestID() string

ExampleTestID returns an example Test ID that is part of the cluster, or "" if the cluster is empty.

type ClusterFailure

type ClusterFailure struct {
	Realm              bigquery.NullString
	TestID             bigquery.NullString
	Variant            []*Variant
	PresubmitRunID     *PresubmitRunID
	PresubmitRunOwner  bigquery.NullString
	PresubmitRunMode   bigquery.NullString
	PresubmitRunStatus bigquery.NullString
	Changelists        []*Changelist
	PartitionTime      bigquery.NullTimestamp
	Exonerations       []*Exoneration
	// luci.analysis.v1.BuildStatus, without "BUILD_STATUS_" prefix.
	BuildStatus                 bigquery.NullString
	IsBuildCritical             bigquery.NullBool
	IngestedInvocationID        bigquery.NullString
	IsIngestedInvocationBlocked bigquery.NullBool
	Count                       int32
	FailureReasonPrefix         bigquery.NullString
}

type ClusterMetricBreakdown

type ClusterMetricBreakdown struct {
	ClusterID        clustering.ClusterID
	MetricBreakdowns map[metrics.ID]*MetricBreakdown
}

ClusterMetricBreakdown is the breakdown of metrics over time for a cluster's failures.

type ClusterSummary

type ClusterSummary struct {
	ClusterID            clustering.ClusterID
	ExampleFailureReason bigquery.NullString
	ExampleTestID        string
	UniqueTestIDs        int64
	MetricValues         map[metrics.ID]*MetricValue
}

ClusterSummary represents a summary of the cluster's failures and its metrics.

type ClusteredFailuresClient

type ClusteredFailuresClient interface {
	// Insert inserts the given rows into BigQuery.
	Insert(ctx context.Context, rows []*bqpb.ClusteredFailureRow) error
}

ClusteredFailuresClient exports clustered failures to BigQuery for further analysis.

type ClusteringHandler

type ClusteringHandler struct {
	// contains filtered or unexported fields
}

ClusteringHandler handles test result (re-)clustering events, to ensure analysis remains up-to-date.

func (*ClusteringHandler) HandleUpdatedClusters

func (r *ClusteringHandler) HandleUpdatedClusters(ctx context.Context, updates *clustering.Update, commitTime time.Time) error

HandleUpdatedClusters handles (re-)clustered test results. It is called after the spanner transaction effecting the (re-)clustering has committed. commitTime is the Spanner time the transaction committed.

If this method fails, it will not be retried and data loss or inconsistency (in this method's BigQuery export) may occur. This could be improved in future with a two-stage apply process (journalling the BigQuery updates to be applied as part of the original transaction and retrying them at a later point if they do not succeed).

type ExoneratedTestVariant

type ExoneratedTestVariant struct {
	TestID                     bigquery.NullString
	Variant                    []*Variant
	CriticalFailuresExonerated int32
	LastExoneration            bigquery.NullTimestamp
}

type ExoneratedTestVariantBranch

type ExoneratedTestVariantBranch struct {
	Project                    bigquery.NullString
	TestID                     bigquery.NullString
	Variant                    []*Variant
	SourceRef                  SourceRef
	CriticalFailuresExonerated int32
	LastExoneration            bigquery.NullTimestamp
}

ExoneratedTestVariantBranch represents a test variant branch read from BigQuery.

type Exoneration

type Exoneration struct {
	// luci.analysis.v1.ExonerationReason value. E.g. "OCCURS_ON_OTHER_CLS".
	Reason bigquery.NullString
}

type GitilesRef

type GitilesRef struct {
	Host    bigquery.NullString
	Project bigquery.NullString
	Ref     bigquery.NullString
}

GitilesRef represents a gitiles branch reference read from BigQuery.

type ImpactfulClusterReadOptions

type ImpactfulClusterReadOptions struct {
	// Project is the LUCI Project for which analysis is being performed.
	Project string
	// Thresholds is the set of thresholds, which if any are met
	// or exceeded, should result in the cluster being returned.
	// Thresholds are applied based on the residual actual
	// cluster impact.
	Thresholds []*configpb.ImpactMetricThreshold
	// AlwaysIncludeBugClusters controls whether to include analysis for all
	// bug clusters.
	AlwaysIncludeBugClusters bool
}

ImpactfulClusterReadOptions specifies options for ReadImpactfulClusters().

type MetricBreakdown

type MetricBreakdown struct {
	DailyValues []int64
}

MetricBreakdown is the breakdown of values over time for a single metric.

type MetricValue

type MetricValue struct {
	// The residual value of the cluster metric.
	// For bug clusters, the residual metric value is the metric value
	// calculated using all of the failures in the cluster.
	// For suggested clusters, the residual metric value is calculated
	// using the failures in the cluster which are not also part of a
	// bug cluster. In this way, measures attributed to bug clusters
	// are not counted again against suggested clusters.
	Value int64
	// The value of the cluster metric over time, grouped by 24-hour periods
	// in the queried time range, in reverse chronological order
	// i.e. the first entry is the metric value for the 24-hour period
	// immediately preceding the time range's latest time.
	DailyBreakdown []int64
}

type PresubmitRunID

type PresubmitRunID struct {
	System bigquery.NullString
	ID     bigquery.NullString
}

type QueryClusterMetricBreakdownsOptions

type QueryClusterMetricBreakdownsOptions struct {
	// A filter on the underlying failures to include in the clusters.
	FailureFilter *aip160.Filter
	OrderBy       []aip.OrderBy
	Realms        []string
	// Metrics is the set of metrics to query. If a metric is referenced
	// in the OrderBy clause, it must also be included here.
	Metrics   []metrics.Definition
	TimeRange *pb.TimeRange
}

type QueryClusterSummariesOptions

type QueryClusterSummariesOptions struct {
	// A filter on the underlying failures to include in the clusters.
	FailureFilter *aip160.Filter
	OrderBy       []aip.OrderBy
	Realms        []string
	// Metrics is the set of metrics to query. If a metric is referenced
	// in the OrderBy clause, it must also be included here.
	Metrics   []metrics.Definition
	TimeRange *pb.TimeRange
	// Whether the daily breakdown should be included in the cluster summaries'
	// metric values.
	IncludeMetricBreakdown bool
}

type ReadClusterExoneratedTestVariantBranchesOptions

type ReadClusterExoneratedTestVariantBranchesOptions struct {
	// The LUCI Project.
	Project   string
	ClusterID clustering.ClusterID
	Realms    []string
}

ReadClusterExoneratedTestVariantBranchesOptions contains options for ReadClusterExoneratedTestVariantBranches.

type ReadClusterExoneratedTestVariantsOptions

type ReadClusterExoneratedTestVariantsOptions struct {
	// The LUCI Project.
	Project   string
	ClusterID clustering.ClusterID
	Realms    []string
}

type ReadClusterFailuresOptions

type ReadClusterFailuresOptions struct {
	// The LUCI Project.
	Project   string
	ClusterID clustering.ClusterID
	Realms    []string
	// The metric to show failures related to.
	// If this is empty, all failures can be returned.
	MetricFilter *metrics.Definition
}

type ReadClusterHistoryDay

type ReadClusterHistoryDay struct {
	Date         time.Time
	MetricValues map[metrics.ID]int32
	Realms       []string
}

type ReadClusterHistoryOptions

type ReadClusterHistoryOptions struct {
	Project       string
	FailureFilter *aip160.Filter
	Days          int32
	Metrics       []metrics.Definition
	Realms        []string
}

type SourceRef

type SourceRef struct {
	Gitiles *GitilesRef
}

SourceRef represents a source reference (e.g. git branch reference) read from BigQuery.

type TopCount

type TopCount struct {
	// Value is the value that was frequently occurring.
	Value string
	// Count is the frequency with which the value occurred.
	Count int64
}

TopCount captures the result of the APPROX_TOP_COUNT operator. See: https://cloud.google.com/bigquery/docs/reference/standard-sql/approximate_aggregate_functions#approx_top_count

type Variant

type Variant struct {
	Key   bigquery.NullString
	Value bigquery.NullString
}

Directories

Path Synopsis
Package metrics provides a framework for cluster-based metrics.
Package metrics provides a framework for cluster-based metrics.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL