cloud

package
v0.26.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 24, 2020 License: AGPL-3.0 Imports: 28 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// RequestTimeout is the default cloud request timeout
	RequestTimeout = 20 * time.Second
	// RetryInterval is the default cloud request retry interval
	RetryInterval = 500 * time.Millisecond
	// MaxRetries specifies max retry attempts
	MaxRetries = 3
)
View Source
const DataTypeAggregatedHTTPReqs = "AggregatedPoints"
View Source
const DataTypeMap = "Points"
View Source
const DataTypeSingle = "Point"
View Source
const TestName = "k6 test"

TestName is the default Load Impact Cloud test name

Variables

View Source
var (
	ErrNotAuthorized    = errors.New("Not allowed to upload result to Load Impact cloud")
	ErrNotAuthenticated = errors.New("Failed to authenticate with Load Impact cloud")
	ErrUnknown          = errors.New("An error occurred talking to Load Impact cloud")
)

Functions

func MergeFromExternal added in v0.23.1

func MergeFromExternal(external map[string]json.RawMessage, conf *Config) error

MergeFromExternal merges three fields from json in a loadimact key of the provided external map

func URLForResults added in v0.19.0

func URLForResults(refID string, config Config) string

Types

type AggregatedMetric added in v0.21.0

type AggregatedMetric struct {

	// Updated by Calc() and used in the JSON output
	Min float64 `json:"min"`
	Max float64 `json:"max"`
	Avg float64 `json:"avg"`
	// contains filtered or unexported fields
}

AggregatedMetric is used to store aggregated information for a particular metric in an SampleDataAggregatedMap.

func (*AggregatedMetric) Add added in v0.21.0

func (am *AggregatedMetric) Add(t time.Duration)

Add the new duration to the internal sum and update Min and Max if necessary

func (*AggregatedMetric) Calc added in v0.21.0

func (am *AggregatedMetric) Calc(count float64)

Calc populates the float fields for min and max and calculates the average value

type Client

type Client struct {
	// contains filtered or unexported fields
}

Client handles communication with Load Impact cloud API.

func NewClient

func NewClient(token, host, version string) *Client

func (*Client) CreateTestRun

func (c *Client) CreateTestRun(testRun *TestRun) (*CreateTestRunResponse, error)

func (*Client) Do

func (c *Client) Do(req *http.Request, v interface{}) error

func (*Client) GetTestProgress added in v0.20.0

func (c *Client) GetTestProgress(referenceID string) (*TestProgressResponse, error)

func (*Client) Login added in v0.19.0

func (c *Client) Login(email string, password string) (*LoginResponse, error)

func (*Client) NewRequest

func (c *Client) NewRequest(method, url string, data interface{}) (*http.Request, error)

NewRequest creates new HTTP request.

This is the same as http.NewRequest, except that data if not nil will be serialized in json format.

func (*Client) PushMetric

func (c *Client) PushMetric(referenceID string, noCompress bool, samples []*Sample) error

func (*Client) StartCloudTestRun added in v0.19.0

func (c *Client) StartCloudTestRun(name string, projectID int64, arc *lib.Archive) (string, error)

func (*Client) StopCloudTestRun added in v0.20.0

func (c *Client) StopCloudTestRun(referenceID string) error

func (*Client) TestFinished

func (c *Client) TestFinished(referenceID string, thresholds ThresholdResult, tained bool, runStatus lib.RunStatus) error

func (*Client) ValidateOptions added in v0.19.0

func (c *Client) ValidateOptions(options lib.Options) error

type Collector

type Collector struct {
	// contains filtered or unexported fields
}

Collector sends result data to the Load Impact cloud service.

func New

func New(conf Config, src *loader.SourceData, opts lib.Options, version string) (*Collector, error)

New creates a new cloud collector

func (*Collector) Collect

func (c *Collector) Collect(sampleContainers []stats.SampleContainer)

Collect receives a set of samples. This method is never called concurrently, and only while the context for Run() is valid, but should defer as much work as possible to Run().

func (*Collector) GetRequiredSystemTags added in v0.20.0

func (c *Collector) GetRequiredSystemTags() stats.SystemTagSet

GetRequiredSystemTags returns which sample tags are needed by this collector

func (*Collector) Init

func (c *Collector) Init() error

Init is called between the collector's creation and the call to Run(). You should do any lengthy setup here rather than in New.

func (c *Collector) Link() string

Link return a link that is shown to the user.

func (*Collector) Run

func (c *Collector) Run(ctx context.Context)

Run is called in a goroutine and starts the collector. Should commit samples to the backend at regular intervals and when the context is terminated.

func (*Collector) SetRunStatus added in v0.21.1

func (c *Collector) SetRunStatus(status lib.RunStatus)

SetRunStatus Set run status

type Config added in v0.18.0

type Config struct {
	// TODO: refactor common stuff between cloud execution and output
	Token           null.String `json:"token" envconfig:"K6_CLOUD_TOKEN"`
	DeprecatedToken null.String `json:"-" envconfig:"K6CLOUD_TOKEN"`
	ProjectID       null.Int    `json:"projectID" envconfig:"K6_CLOUD_PROJECT_ID"`
	Name            null.String `json:"name" envconfig:"K6_CLOUD_NAME"`

	Host       null.String `json:"host" envconfig:"K6_CLOUD_HOST"`
	WebAppURL  null.String `json:"webAppURL" envconfig:"K6_CLOUD_WEB_APP_URL"`
	NoCompress null.Bool   `json:"noCompress" envconfig:"K6_CLOUD_NO_COMPRESS"`

	MaxMetricSamplesPerPackage null.Int `json:"maxMetricSamplesPerPackage" envconfig:"K6_CLOUD_MAX_METRIC_SAMPLES_PER_PACKAGE"`

	// The time interval between periodic API calls for sending samples to the cloud ingest service.
	MetricPushInterval types.NullDuration `json:"metricPushInterval" envconfig:"K6_CLOUD_METRIC_PUSH_INTERVAL"`

	// If specified and is greater than 0, sample aggregation with that period is enabled
	AggregationPeriod types.NullDuration `json:"aggregationPeriod" envconfig:"K6_CLOUD_AGGREGATION_PERIOD"`

	// If aggregation is enabled, this is how often new HTTP trails will be sorted into buckets and sub-buckets and aggregated.
	AggregationCalcInterval types.NullDuration `json:"aggregationCalcInterval" envconfig:"K6_CLOUD_AGGREGATION_CALC_INTERVAL"`

	// If aggregation is enabled, this specifies how long we'll wait for period samples to accumulate before trying to aggregate them.
	AggregationWaitPeriod types.NullDuration `json:"aggregationWaitPeriod" envconfig:"K6_CLOUD_AGGREGATION_WAIT_PERIOD"`

	// If aggregation is enabled, but the collected samples for a certain AggregationPeriod after AggregationPushDelay has passed are less than this number, they won't be aggregated.
	AggregationMinSamples null.Int `json:"aggregationMinSamples" envconfig:"K6_CLOUD_AGGREGATION_MIN_SAMPLES"`

	// If this is enabled and a sub-bucket has more than AggregationMinSamples HTTP trails in it, they would all be
	// aggregated without attempting to find and separate any outlier metrics first.
	// IMPORTANT: This is intended for testing purposes only or, in extreme cases, when the result precision
	// isn't very important and the improved aggregation percentage would be worth the potentially huge loss
	// of metric granularity and possible masking of any outlier samples.
	AggregationSkipOutlierDetection null.Bool `json:"aggregationSkipOutlierDetection" envconfig:"K6_CLOUD_AGGREGATION_SKIP_OUTLIER_DETECTION"`

	// If aggregation and outlier detection are enabled, this option specifies the
	// number of HTTP trails in a sub-bucket that determine which quartile-calculating
	// algorithm would be used:
	// - for fewer samples (between MinSamples and OutlierAlgoThreshold), a more precise
	//   (i.e. supporting interpolation), but also more computationally-heavy sorting
	//   algorithm will be used to find the quartiles.
	// - if there are more samples than OutlierAlgoThreshold in the sub-bucket, a
	//   QuickSelect-based (https://en.wikipedia.org/wiki/Quickselect) algorithm will
	//   be used. It doesn't support interpolation, so there's a small loss of precision
	//   in the outlier detection, but it's not as resource-heavy as the sorting algorithm.
	AggregationOutlierAlgoThreshold null.Int `json:"aggregationOutlierAlgoThreshold" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_ALGO_THRESHOLD"`

	// The radius (as a fraction) from the median at which to sample Q1 and Q3.
	// By default it's one quarter (0.25) and if set to something different, the Q in IQR
	// won't make much sense... But this would allow us to select tighter sample groups for
	// aggregation if we want.
	AggregationOutlierIqrRadius null.Float `json:"aggregationOutlierIqrRadius" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_RADIUS"`

	// Connection or request times with how many IQRs below Q1 to consier as non-aggregatable outliers.
	AggregationOutlierIqrCoefLower null.Float `json:"aggregationOutlierIqrCoefLower" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_COEF_LOWER"`

	// Connection or request times with how many IQRs above Q3 to consier as non-aggregatable outliers.
	AggregationOutlierIqrCoefUpper null.Float `json:"aggregationOutlierIqrCoefUpper" envconfig:"K6_CLOUD_AGGREGATION_OUTLIER_IQR_COEF_UPPER"`
}

Config holds all the necessary data and options for sending metrics to the Load Impact cloud. nolint: lll

func NewConfig added in v0.21.0

func NewConfig() Config

NewConfig creates a new Config instance with default values for some fields.

func (Config) Apply added in v0.18.0

func (c Config) Apply(cfg Config) Config

Apply saves config non-zero config values from the passed config in the receiver.

type CreateTestRunResponse

type CreateTestRunResponse struct {
	ReferenceID    string  `json:"reference_id"`
	ConfigOverride *Config `json:"config"`
}

type ErrorResponse

type ErrorResponse struct {
	Response *http.Response `json:"-"`

	Code        int                 `json:"code"`
	Message     string              `json:"message"`
	Details     map[string][]string `json:"details"`
	FieldErrors map[string][]string `json:"field_errors"`
	Errors      []string            `json:"errors"`
}

ErrorResponse represents an error cause by talking to the API

func (ErrorResponse) Error

func (e ErrorResponse) Error() string

type LoginResponse added in v0.19.0

type LoginResponse struct {
	Token string `json:"token"`
}

type ResultStatus added in v0.22.0

type ResultStatus int
const (
	ResultStatusPassed ResultStatus = 0
	ResultStatusFailed ResultStatus = 1
)

type Sample added in v0.17.0

type Sample struct {
	Type   string      `json:"type"`
	Metric string      `json:"metric"`
	Data   interface{} `json:"data"`
}

Sample is the generic struct that contains all types of data that we send to the cloud.

func NewSampleFromTrail added in v0.21.0

func NewSampleFromTrail(trail *httpext.Trail) *Sample

NewSampleFromTrail just creates a ready-to-send Sample instance directly from a httpext.Trail.

func (*Sample) UnmarshalJSON added in v0.21.0

func (ct *Sample) UnmarshalJSON(p []byte) error

UnmarshalJSON decodes the Data into the corresponding struct

type SampleDataAggregatedHTTPReqs added in v0.21.0

type SampleDataAggregatedHTTPReqs struct {
	Time   Timestamp         `json:"time"`
	Type   string            `json:"type"`
	Count  uint64            `json:"count"`
	Tags   *stats.SampleTags `json:"tags,omitempty"`
	Values struct {
		Duration       AggregatedMetric `json:"http_req_duration"`
		Blocked        AggregatedMetric `json:"http_req_blocked"`
		Connecting     AggregatedMetric `json:"http_req_connecting"`
		TLSHandshaking AggregatedMetric `json:"http_req_tls_handshaking"`
		Sending        AggregatedMetric `json:"http_req_sending"`
		Waiting        AggregatedMetric `json:"http_req_waiting"`
		Receiving      AggregatedMetric `json:"http_req_receiving"`
	} `json:"values"`
}

SampleDataAggregatedHTTPReqs is used in aggregated samples for HTTP requests.

func (*SampleDataAggregatedHTTPReqs) Add added in v0.21.0

func (sdagg *SampleDataAggregatedHTTPReqs) Add(trail *httpext.Trail)

Add updates all agregated values with the supplied trail data

func (*SampleDataAggregatedHTTPReqs) CalcAverages added in v0.21.0

func (sdagg *SampleDataAggregatedHTTPReqs) CalcAverages()

CalcAverages calculates and sets all `Avg` properties in the `Values` struct

type SampleDataMap added in v0.21.0

type SampleDataMap struct {
	Time   Timestamp          `json:"time"`
	Type   stats.MetricType   `json:"type"`
	Tags   *stats.SampleTags  `json:"tags,omitempty"`
	Values map[string]float64 `json:"values,omitempty"`
}

SampleDataMap is used by samples that contain multiple values, currently that's only iteration metrics (`iter_li_all`) and unaggregated HTTP requests (`http_req_li_all`).

type SampleDataSingle added in v0.21.0

type SampleDataSingle struct {
	Time  Timestamp         `json:"time"`
	Type  stats.MetricType  `json:"type"`
	Tags  *stats.SampleTags `json:"tags,omitempty"`
	Value float64           `json:"value"`
}

SampleDataSingle is used in all simple un-aggregated single-value samples.

type TestProgressResponse added in v0.20.0

type TestProgressResponse struct {
	RunStatusText string        `json:"run_status_text"`
	RunStatus     lib.RunStatus `json:"run_status"`
	ResultStatus  ResultStatus  `json:"result_status"`
	Progress      float64       `json:"progress"`
}

type TestRun

type TestRun struct {
	Name       string              `json:"name"`
	ProjectID  int64               `json:"project_id,omitempty"`
	VUsMax     int64               `json:"vus"`
	Thresholds map[string][]string `json:"thresholds"`
	// Duration of test in seconds. -1 for unknown length, 0 for continuous running.
	Duration int64 `json:"duration"`
}

type ThresholdResult

type ThresholdResult map[string]map[string]bool

type Timestamp added in v0.21.0

type Timestamp time.Time

Timestamp is used for sending times encoded as microsecond UNIX timestamps to the cloud servers

func (Timestamp) Equal added in v0.21.0

func (ct Timestamp) Equal(other Timestamp) bool

Equal will return true if the difference between the timestamps is less than 1 microsecond

func (Timestamp) MarshalJSON added in v0.21.0

func (ct Timestamp) MarshalJSON() ([]byte, error)

MarshalJSON encodes the microsecond UNIX timestamps as strings because JavaScripts doesn't have actual integers and tends to round big numbers

func (*Timestamp) UnmarshalJSON added in v0.21.0

func (ct *Timestamp) UnmarshalJSON(p []byte) error

UnmarshalJSON decodes the string-enclosed microsecond timestamp back into the proper time.Time alias

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL