cloud

package
v0.22.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 6, 2018 License: AGPL-3.0 Imports: 24 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// Default request timeout
	RequestTimeout = 10 * time.Second
	// Retry interval
	RetryInterval = 500 * time.Millisecond
	// Retry attempts
	MaxRetries = 3
)
View Source
const DataTypeAggregatedHTTPReqs = "AggregatedPoints"
View Source
const DataTypeMap = "Points"
View Source
const DataTypeSingle = "Point"
View Source
const TestName = "k6 test"

TestName is the default Load Impact Cloud test name

Variables

View Source
var (
	ErrNotAuthorized    = errors.New("Not allowed to upload result to Load Impact cloud")
	ErrNotAuthenticated = errors.New("Failed to authenticate with Load Impact cloud")
	ErrUnknown          = errors.New("An error occurred talking to Load Impact cloud")
)

Functions

func URLForResults added in v0.19.0

func URLForResults(refID string, config Config) string

Types

type AggregatedMetric added in v0.21.0

type AggregatedMetric struct {

	// Updated by Calc() and used in the JSON output
	Min float64 `json:"min"`
	Max float64 `json:"max"`
	Avg float64 `json:"avg"`
	// contains filtered or unexported fields
}

AggregatedMetric is used to store aggregated information for a particular metric in an SampleDataAggregatedMap.

func (*AggregatedMetric) Add added in v0.21.0

func (am *AggregatedMetric) Add(t time.Duration)

Add the new duration to the internal sum and update Min and Max if necessary

func (*AggregatedMetric) Calc added in v0.21.0

func (am *AggregatedMetric) Calc(count float64)

Calc populates the float fields for min and max and calulates the average value

type Client

type Client struct {
	// contains filtered or unexported fields
}

Client handles communication with Load Impact cloud API.

func NewClient

func NewClient(token, host, version string) *Client

func (*Client) CreateTestRun

func (c *Client) CreateTestRun(testRun *TestRun) (*CreateTestRunResponse, error)

func (*Client) Do

func (c *Client) Do(req *http.Request, v interface{}) error

func (*Client) GetTestProgress added in v0.20.0

func (c *Client) GetTestProgress(referenceID string) (*TestProgressResponse, error)

func (*Client) Login added in v0.19.0

func (c *Client) Login(email string, password string) (*LoginResponse, error)

func (*Client) NewRequest

func (c *Client) NewRequest(method, url string, data interface{}) (*http.Request, error)

func (*Client) PushMetric

func (c *Client) PushMetric(referenceID string, noCompress bool, samples []*Sample) error

func (*Client) StartCloudTestRun added in v0.19.0

func (c *Client) StartCloudTestRun(name string, projectID int64, arc *lib.Archive) (string, error)

func (*Client) StopCloudTestRun added in v0.20.0

func (c *Client) StopCloudTestRun(referenceID string) error

func (*Client) TestFinished

func (c *Client) TestFinished(referenceID string, thresholds ThresholdResult, tained bool, runStatus lib.RunStatus) error

func (*Client) ValidateOptions added in v0.19.0

func (c *Client) ValidateOptions(options lib.Options) error

type Collector

type Collector struct {
	// contains filtered or unexported fields
}

Collector sends result data to the Load Impact cloud service.

func New

func New(conf Config, src *lib.SourceData, opts lib.Options, version string) (*Collector, error)

New creates a new cloud collector

func (*Collector) Collect

func (c *Collector) Collect(sampleContainers []stats.SampleContainer)

func (*Collector) GetRequiredSystemTags added in v0.20.0

func (c *Collector) GetRequiredSystemTags() lib.TagSet

GetRequiredSystemTags returns which sample tags are needed by this collector

func (*Collector) Init

func (c *Collector) Init() error

func (*Collector) IsReady added in v0.17.0

func (c *Collector) IsReady() bool
func (c *Collector) Link() string

func (*Collector) Run

func (c *Collector) Run(ctx context.Context)

func (*Collector) SetRunStatus added in v0.21.1

func (c *Collector) SetRunStatus(status lib.RunStatus)

type Config added in v0.18.0

type Config struct {
	Token           null.String `json:"token" envconfig:"CLOUD_TOKEN"`
	DeprecatedToken null.String `json:"-" envconfig:"K6CLOUD_TOKEN"`
	Name            null.String `json:"name" envconfig:"CLOUD_NAME"`
	Host            null.String `json:"host" envconfig:"CLOUD_HOST"`
	WebAppURL       null.String `json:"webAppURL" envconfig:"CLOUD_WEB_APP_URL"`
	NoCompress      null.Bool   `json:"noCompress" envconfig:"CLOUD_NO_COMPRESS"`
	ProjectID       null.Int    `json:"projectID" envconfig:"CLOUD_PROJECT_ID"`

	// The time interval between periodic API calls for sending samples to the cloud ingest service.
	MetricPushInterval types.NullDuration `json:"metricPushInterval" envconfig:"CLOUD_METRIC_PUSH_INTERVAL"`

	// If specified and greater than 0, sample aggregation with that period is enabled:
	// - HTTP trail samples will be collected separately and not
	//   included in the default sample buffer that's directly sent
	//   to the cloud service every MetricPushInterval.
	// - Every AggregationCalcInterval, all collected HTTP Trails will be
	//   split into AggregationPeriod-sized time buckets (time slots) and
	//   then into sub-buckets according to their tags (each sub-bucket
	//   will contain only HTTP trails with the same sample tags).
	// - If AggregationWaitPeriod is not passed for a particular time
	//   bucket, it's left undisturbed until the next AggregationCalcInterval
	//   tick comes along.
	// - If AggregationWaitPeriod is passed for a time bucket, all of its
	//   sub-buckets are traversed:
	//     - Any sub-buckets that have less than AggregationMinSamples HTTP
	//       trails in them are not aggregated, instead the HTTP trails are
	//       just added to the default sample buffer.
	//     - Sub-buckets with at least AggregationMinSamples HTTP trails
	//       are aggregated. The HTTP trails are checked for outliers
	//       (Trails with metrics outside of the AggregationOutliers) and
	//       all non-outliers are aggregated. The aggregation result and all
	//       found outliers are then added to the default sample buffer for
	//       sending to the cloud ingest service on the next MetricPushInterval.
	AggregationPeriod types.NullDuration `json:"aggregationPeriod" envconfig:"CLOUD_AGGREGATION_PERIOD"`

	// If aggregation is enabled, this is how often new HTTP trails will be sorted into buckets and sub-buckets and aggregated.
	AggregationCalcInterval types.NullDuration `json:"aggregationCalcInterval" envconfig:"CLOUD_AGGREGATION_CALC_INTERVAL"`

	// If aggregation is enabled, this specifies how long we'll wait for period samples to accumulate before trying to aggregate them.
	AggregationWaitPeriod types.NullDuration `json:"aggregationWaitPeriod" envconfig:"CLOUD_AGGREGATION_WAIT_PERIOD"`

	// If aggregation is enabled, but the collected samples for a certain AggregationPeriod after AggregationPushDelay has passed are less than this number, they won't be aggregated.
	AggregationMinSamples null.Int `json:"aggregationMinSamples" envconfig:"CLOUD_AGGREGATION_MIN_SAMPLES"`

	// The radius (as a fraction) from the median at which to sample Q1 and Q3.
	// By default it's one quarter (0.25) and if set to something different, the Q in IQR
	// won't make much sense... But this would allow us to select tighter sample groups for
	// aggregation if we want.
	AggregationOutlierIqrRadius null.Float `json:"aggregationOutlierIqrRadius" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_RADIUS"`

	// Connection or request times with how many IQRs below Q1 to consier as non-aggregatable outliers.
	AggregationOutlierIqrCoefLower null.Float `json:"aggregationOutlierIqrCoefLower" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_COEF_LOWER"`

	// Connection or request times with how many IQRs above Q3 to consier as non-aggregatable outliers.
	AggregationOutlierIqrCoefUpper null.Float `json:"aggregationOutlierIqrCoefUpper" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_COEF_UPPER"`
}

Config holds all the necessary data and options for sending metrics to the Load Impact cloud.

func NewConfig added in v0.21.0

func NewConfig() Config

NewConfig creates a new Config instance with default values for some fields.

func (Config) Apply added in v0.18.0

func (c Config) Apply(cfg Config) Config

Apply saves config non-zero config values from the passed config in the receiver.

type CreateTestRunResponse

type CreateTestRunResponse struct {
	ReferenceID    string  `json:"reference_id"`
	ConfigOverride *Config `json:"config"`
}

type ErrorResponse

type ErrorResponse struct {
	Response *http.Response `json:"-"`

	Code    int               `json:"code"`
	Message string            `json:"message"`
	Details map[string]string `json:"details"`
}

ErrorResponse represents an error cause by talking to the API

func (ErrorResponse) Error

func (e ErrorResponse) Error() string

type LoginResponse added in v0.19.0

type LoginResponse struct {
	Token string `json:"token"`
}

type ResultStatus added in v0.22.0

type ResultStatus int
const (
	ResultStatusPassed ResultStatus = 0
	ResultStatusFailed ResultStatus = 1
)

type Sample added in v0.17.0

type Sample struct {
	Type   string      `json:"type"`
	Metric string      `json:"metric"`
	Data   interface{} `json:"data"`
}

Sample is the generic struct that contains all types of data that we send to the cloud.

func NewSampleFromTrail added in v0.21.0

func NewSampleFromTrail(trail *netext.Trail) *Sample

NewSampleFromTrail just creates a ready-to-send Sample instance directly from a netext.Trail.

func (*Sample) UnmarshalJSON added in v0.21.0

func (ct *Sample) UnmarshalJSON(p []byte) error

UnmarshalJSON decodes the Data into the corresponding struct

type SampleDataAggregatedHTTPReqs added in v0.21.0

type SampleDataAggregatedHTTPReqs struct {
	Time   Timestamp         `json:"time"`
	Type   string            `json:"type"`
	Count  uint64            `json:"count"`
	Tags   *stats.SampleTags `json:"tags,omitempty"`
	Values struct {
		Duration       AggregatedMetric `json:"http_req_duration"`
		Blocked        AggregatedMetric `json:"http_req_blocked"`
		Connecting     AggregatedMetric `json:"http_req_connecting"`
		TLSHandshaking AggregatedMetric `json:"http_req_tls_handshaking"`
		Sending        AggregatedMetric `json:"http_req_sending"`
		Waiting        AggregatedMetric `json:"http_req_waiting"`
		Receiving      AggregatedMetric `json:"http_req_receiving"`
	} `json:"values"`
}

SampleDataAggregatedHTTPReqs is used in aggregated samples for HTTP requests.

func (*SampleDataAggregatedHTTPReqs) Add added in v0.21.0

func (sdagg *SampleDataAggregatedHTTPReqs) Add(trail *netext.Trail)

Add updates all agregated values with the supplied trail data

func (*SampleDataAggregatedHTTPReqs) CalcAverages added in v0.21.0

func (sdagg *SampleDataAggregatedHTTPReqs) CalcAverages()

CalcAverages calculates and sets all `Avg` properties in the `Values` struct

type SampleDataMap added in v0.21.0

type SampleDataMap struct {
	Time   Timestamp          `json:"time"`
	Type   stats.MetricType   `json:"type"`
	Tags   *stats.SampleTags  `json:"tags,omitempty"`
	Values map[string]float64 `json:"values,omitempty"`
}

SampleDataMap is used by samples that contain multiple values, currently that's only iteration metrics (`iter_li_all`) and unaggregated HTTP requests (`http_req_li_all`).

type SampleDataSingle added in v0.21.0

type SampleDataSingle struct {
	Time  Timestamp         `json:"time"`
	Type  stats.MetricType  `json:"type"`
	Tags  *stats.SampleTags `json:"tags,omitempty"`
	Value float64           `json:"value"`
}

SampleDataSingle is used in all simple un-aggregated single-value samples.

type TestProgressResponse added in v0.20.0

type TestProgressResponse struct {
	RunStatusText string        `json:"run_status_text"`
	RunStatus     lib.RunStatus `json:"run_status"`
	ResultStatus  ResultStatus  `json:"result_status"`
	Progress      float64       `json:"progress"`
}

type TestRun

type TestRun struct {
	Name       string              `json:"name"`
	ProjectID  int64               `json:"project_id,omitempty"`
	VUsMax     int64               `json:"vus"`
	Thresholds map[string][]string `json:"thresholds"`
	// Duration of test in seconds. -1 for unknown length, 0 for continuous running.
	Duration int64 `json:"duration"`
}

type ThresholdResult

type ThresholdResult map[string]map[string]bool

type Timestamp added in v0.21.0

type Timestamp time.Time

Timestamp is used for sending times encoded as microsecond UNIX timestamps to the cloud servers

func (Timestamp) Equal added in v0.21.0

func (ct Timestamp) Equal(other Timestamp) bool

Equal will return true if the difference between the timestamps is less than 1 microsecond

func (Timestamp) MarshalJSON added in v0.21.0

func (ct Timestamp) MarshalJSON() ([]byte, error)

MarshalJSON encodes the microsecond UNIX timestamps as strings because JavaScripts doesn't have actual integers and tends to round big numbers

func (*Timestamp) UnmarshalJSON added in v0.21.0

func (ct *Timestamp) UnmarshalJSON(p []byte) error

UnmarshalJSON decodes the string-enclosed microsecond timestamp back into the proper time.Time alias

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL