Documentation ¶
Index ¶
- Constants
- Variables
- func CheckTotalValues(line string) (totalPoints, totalValues int64, err error)
- func LoadURL(url string) (tree *toml.Tree, err error)
- func NewSerializerInflux() *serializerInflux
- func RandChoice(choices [][]byte) []byte
- func Seed(seed int64)
- type ClampedRandomWalkDistribution
- type ConstantDistribution
- type Distribution
- type ExternalConfig
- func (c *ExternalConfig) GetFieldBytesValue(measurementName, tagKey []byte, failIfNotFound bool, defaultValue interface{}) interface{}
- func (c *ExternalConfig) GetFieldValue(measurementName, fieldKey string, failIfNotFound bool, ...) interface{}
- func (c *ExternalConfig) GetTagBytesValue(measurementName, tagKey []byte, failIfNotFound bool, defaultValue []byte) []byte
- func (c *ExternalConfig) GetTagValue(measurementName, tagKey string, failIfNotFound bool, defaultValue string) string
- func (c *ExternalConfig) String() string
- type Field
- type Measurement
- type MonotonicRandomWalkDistribution
- type MonotonicUpDownRandomWalkDistribution
- type NormalDistribution
- type Point
- type RandomWalkDistribution
- type Serializer
- type SerializerCassandra
- type SerializerElastic
- type SerializerGraphiteLine
- type SerializerMongo
- type SerializerOpenTSDB
- type SerializerSplunkJson
- type SerializerTimescaleBin
- type SerializerTimescaleSql
- type SimulatedMeasurement
- type Simulator
- type Source
- type Tag
- type TwoStateDistribution
- type UniformDistribution
Constants ¶
const ( DefaultDateTimeStart = "2018-01-01T00:00:00Z" DefaultDateTimeEnd = "2018-01-02T00:00:00Z" UseCaseDevOps = "devops" UseCaseIot = "iot" UseCaseDashboard = "dashboard" UseCaseMetaquery = "metaquery" UseCaseWindowAggregate = "window-agg" UseCaseGroupAggregate = "group-agg" UseCaseBareAggregate = "bare-agg" UseCaseUngroupedAggregate = "ungrouped-agg" UseCaseGroupWindowTransposeHighCard = "group-window-transpose-high-card" UseCaseGroupWindowTransposeLowCard = "group-window-transpose-low-card" UseCaseMultiMeasurement = "multi-measurement" )
const DatasetSizeMarker = "dataset-size:"
Variables ¶
var DatasetSizeMarkerRE = regexp.MustCompile(DatasetSizeMarker + `(\d+),(\d+)`)
var DefaultValueGenerator = map[string]interface{}{
"type": "default",
}
var UseCaseChoices = []string{ UseCaseDevOps, UseCaseIot, UseCaseDashboard, UseCaseMetaquery, UseCaseWindowAggregate, UseCaseGroupAggregate, UseCaseBareAggregate, UseCaseUngroupedAggregate, UseCaseMultiMeasurement, }
Use case choices:
Functions ¶
func CheckTotalValues ¶
func NewSerializerInflux ¶
func NewSerializerInflux() *serializerInflux
func RandChoice ¶
Types ¶
type ClampedRandomWalkDistribution ¶
type ClampedRandomWalkDistribution struct { Step Distribution Min float64 Max float64 State float64 // optional }
ClampedRandomWalkDistribution is a stateful random walk, with minimum and maximum bounds. Initialize it with a Min, Max, and an underlying distribution, which is used to compute the new step value.
func CWD ¶
func CWD(step Distribution, min, max, state float64) *ClampedRandomWalkDistribution
func (*ClampedRandomWalkDistribution) Advance ¶
func (d *ClampedRandomWalkDistribution) Advance()
Advance computes the next value of this distribution and stores it.
func (*ClampedRandomWalkDistribution) Get ¶
func (d *ClampedRandomWalkDistribution) Get() float64
Get returns the last computed value for this distribution.
type ConstantDistribution ¶
type ConstantDistribution struct {
State float64
}
func (*ConstantDistribution) Advance ¶
func (d *ConstantDistribution) Advance()
func (*ConstantDistribution) Get ¶
func (d *ConstantDistribution) Get() float64
type Distribution ¶
type Distribution interface { Advance() Get() float64 // should be idempotent }
Distribution provides an interface to model a statistical distribution.
type ExternalConfig ¶
type ExternalConfig struct {
// contains filtered or unexported fields
}
var Config *ExternalConfig
func NewConfig ¶
func NewConfig(path string) (*ExternalConfig, error)
func (*ExternalConfig) GetFieldBytesValue ¶
func (c *ExternalConfig) GetFieldBytesValue(measurementName, tagKey []byte, failIfNotFound bool, defaultValue interface{}) interface{}
func (*ExternalConfig) GetFieldValue ¶
func (c *ExternalConfig) GetFieldValue(measurementName, fieldKey string, failIfNotFound bool, defaultValue interface{}) interface{}
func (*ExternalConfig) GetTagBytesValue ¶
func (c *ExternalConfig) GetTagBytesValue(measurementName, tagKey []byte, failIfNotFound bool, defaultValue []byte) []byte
func (*ExternalConfig) GetTagValue ¶
func (c *ExternalConfig) GetTagValue(measurementName, tagKey string, failIfNotFound bool, defaultValue string) string
func (*ExternalConfig) String ¶
func (c *ExternalConfig) String() string
type MonotonicRandomWalkDistribution ¶
type MonotonicRandomWalkDistribution struct { Step Distribution State float64 }
MonotonicRandomWalkDistribution is a stateful random walk that only increases. Initialize it with a Start and an underlying distribution, which is used to compute the new step value. The sign of any value of the u.d. is always made positive.
func MWD ¶
func MWD(step Distribution, state float64) *MonotonicRandomWalkDistribution
func (*MonotonicRandomWalkDistribution) Advance ¶
func (d *MonotonicRandomWalkDistribution) Advance()
Advance computes the next value of this distribution and stores it.
func (*MonotonicRandomWalkDistribution) Get ¶
func (d *MonotonicRandomWalkDistribution) Get() float64
type MonotonicUpDownRandomWalkDistribution ¶
type MonotonicUpDownRandomWalkDistribution struct { Step Distribution State float64 Min float64 Max float64 // contains filtered or unexported fields }
MonotonicUpDownRandomWalkDistribution is a stateful random walk that continually increases and decreases. Initialize it with State, Min And Max an underlying distribution, which is used to compute the new step value.
func MUDWD ¶
func MUDWD(step Distribution, min float64, max float64, state float64) *MonotonicUpDownRandomWalkDistribution
func (*MonotonicUpDownRandomWalkDistribution) Advance ¶
func (d *MonotonicUpDownRandomWalkDistribution) Advance()
Advance computes the next value of this distribution and stores it.
func (*MonotonicUpDownRandomWalkDistribution) Get ¶
func (d *MonotonicUpDownRandomWalkDistribution) Get() float64
type NormalDistribution ¶
type NormalDistribution struct { Mean float64 StdDev float64 // contains filtered or unexported fields }
NormalDistribution models a normal distribution.
func ND ¶
func ND(mean, stddev float64) *NormalDistribution
func (*NormalDistribution) Advance ¶
func (d *NormalDistribution) Advance()
Advance advances this distribution. Since a normal distribution is stateless, this is just overwrites the internal cache value.
func (*NormalDistribution) Get ¶
func (d *NormalDistribution) Get() float64
Get returns the last computed value for this distribution.
type Point ¶
type Point struct { MeasurementName []byte TagKeys [][]byte TagValues [][]byte FieldKeys [][]byte FieldValues []interface{} Timestamp *time.Time // contains filtered or unexported fields }
Point wraps a single data point. It stores database-agnostic data representing one point in time of one measurement.
Internally, Point uses byte slices instead of strings to try to minimize overhead.
func MakeUsablePoint ¶
func MakeUsablePoint() *Point
MakeUsablePoint allocates a new Point ready for use by a Simulator.
func (*Point) AppendField ¶
func (*Point) SetMeasurementName ¶
func (*Point) SetTimestamp ¶
type RandomWalkDistribution ¶
type RandomWalkDistribution struct { Step Distribution State float64 // optional }
RandomWalkDistribution is a stateful random walk. Initialize it with an underlying distribution, which is used to compute the new step value.
func WD ¶
func WD(step Distribution, state float64) *RandomWalkDistribution
func (*RandomWalkDistribution) Advance ¶
func (d *RandomWalkDistribution) Advance()
Advance computes the next value of this distribution and stores it.
func (*RandomWalkDistribution) Get ¶
func (d *RandomWalkDistribution) Get() float64
Get returns the last computed value for this distribution.
type Serializer ¶
type SerializerCassandra ¶
type SerializerCassandra struct { }
func NewSerializerCassandra ¶
func NewSerializerCassandra() *SerializerCassandra
func (*SerializerCassandra) SerializePoint ¶
func (m *SerializerCassandra) SerializePoint(w io.Writer, p *Point) (err error)
SerializeCassandra writes Point data to the given writer, conforming to the Cassandra query format.
This function writes output that looks like: INSERT INTO <tablename> (series_id, ts_ns, value) VALUES (<series_id>, <timestamp_nanoseconds>, <field value>) where series_id looks like: <measurement>,<tagset>#<field name>#<time shard>
For example: INSERT INTO all_series (series_id, timestamp_ns, value) VALUES ('cpu,hostname=host_01#user#2016-01-01', 12345, 42.1)\n
func (*SerializerCassandra) SerializeSize ¶
type SerializerElastic ¶
type SerializerElastic struct {
// contains filtered or unexported fields
}
func NewSerializerElastic ¶
func NewSerializerElastic(version string) *SerializerElastic
func (*SerializerElastic) SerializePoint ¶
func (s *SerializerElastic) SerializePoint(w io.Writer, p *Point) error
func (*SerializerElastic) SerializeSize ¶
type SerializerGraphiteLine ¶
type SerializerGraphiteLine struct {
// contains filtered or unexported fields
}
func NewSerializerGraphiteLine ¶
func NewSerializerGraphiteLine() *SerializerGraphiteLine
func (*SerializerGraphiteLine) SerializePoint ¶
func (s *SerializerGraphiteLine) SerializePoint(w io.Writer, p *Point) (err error)
SerializePoint writes Point data to the given writer, conforming to the Graphite plain text line protocol.
func (*SerializerGraphiteLine) SerializeSize ¶
type SerializerMongo ¶
type SerializerMongo struct { }
func NewSerializerMongo ¶
func NewSerializerMongo() *SerializerMongo
func (*SerializerMongo) SerializePoint ¶
func (s *SerializerMongo) SerializePoint(w io.Writer, p *Point) (err error)
SerializeMongo writes Point data to the given writer, conforming to the mongo_serialization FlatBuffers format.
func (*SerializerMongo) SerializeSize ¶
type SerializerOpenTSDB ¶
type SerializerOpenTSDB struct { }
func NewSerializerOpenTSDB ¶
func NewSerializerOpenTSDB() *SerializerOpenTSDB
func (*SerializerOpenTSDB) SerializePoint ¶
func (m *SerializerOpenTSDB) SerializePoint(w io.Writer, p *Point) (err error)
SerializeOpenTSDBBulk writes Point data to the given writer, conforming to the OpenTSDB bulk load protocol (the /api/put endpoint). Note that no line has a trailing comma. Downstream programs are responsible for creating batches for POSTing using a JSON array, and for adding any trailing commas (to conform to JSON). We use only millisecond-precision timestamps.
N.B. OpenTSDB only supports millisecond or second resolution timestamps. N.B. OpenTSDB millisecond timestamps must be 13 digits long. N.B. OpenTSDB only supports floating-point field values.
This function writes JSON lines that looks like: { <metric>, <timestamp>, <value>, <tags> }
For example: { "metric": "cpu.usage_user", "timestamp": 14516064000000, "value": 99.5170917755353770, "tags": { "hostname": "host_01", "region": "ap-southeast-2", "datacenter": "ap-southeast-2a" } }
func (*SerializerOpenTSDB) SerializeSize ¶
type SerializerSplunkJson ¶
type SerializerSplunkJson struct {
// contains filtered or unexported fields
}
func NewSerializerSplunkJson ¶
func NewSerializerSplunkJson() *SerializerSplunkJson
func (*SerializerSplunkJson) SerializePoint ¶
func (s *SerializerSplunkJson) SerializePoint(w io.Writer, p *Point) (err error)
SerializePoint writes Point data to the given writer, conforming to the Splunk JSON format.
This function writes output that looks like: ...
func (*SerializerSplunkJson) SerializeSize ¶
type SerializerTimescaleBin ¶
type SerializerTimescaleBin struct { }
func NewSerializerTimescaleBin ¶
func NewSerializerTimescaleBin() *SerializerTimescaleBin
func (*SerializerTimescaleBin) SerializePoint ¶
func (t *SerializerTimescaleBin) SerializePoint(w io.Writer, p *Point) (err error)
SerializeTimeScaleBin writes Point data to the given writer, conforming to the Binary GOP encoded format to write
func (*SerializerTimescaleBin) SerializeSize ¶
type SerializerTimescaleSql ¶
type SerializerTimescaleSql struct { }
func NewSerializerTimescaleSql ¶
func NewSerializerTimescaleSql() *SerializerTimescaleSql
func (*SerializerTimescaleSql) SerializePoint ¶
func (s *SerializerTimescaleSql) SerializePoint(w io.Writer, p *Point) (err error)
SerializePoint writes Point data to the given writer, conforming to the Timescale insert format.
This function writes output that looks like: INSERT INTO <tablename> (time,<tag_name list>,<field_name list>') VALUES (<timestamp in nanoseconds>, <tag values list>, <field values>)
func (*SerializerTimescaleSql) SerializeSize ¶
type SimulatedMeasurement ¶
type SimulatedMeasurement interface { Tick(time.Duration) ToPoint(*Point) bool //returns true if point if properly filled, false means, that point should be skipped }
SimulatedMeasurement simulates one measurement (e.g. Redis for DevOps).
type Simulator ¶
type Simulator interface { Total() int64 SeenPoints() int64 SeenValues() int64 Finished() bool Next(*Point) }
Simulator simulates a use case.
type TwoStateDistribution ¶
TwoStateDistribution randomly chooses state from two values
func (*TwoStateDistribution) Advance ¶
func (d *TwoStateDistribution) Advance()
func (*TwoStateDistribution) Get ¶
func (d *TwoStateDistribution) Get() float64
type UniformDistribution ¶
type UniformDistribution struct { Low float64 High float64 // contains filtered or unexported fields }
UniformDistribution models a uniform distribution.
func UD ¶
func UD(low, high float64) *UniformDistribution
func (*UniformDistribution) Advance ¶
func (d *UniformDistribution) Advance()
Advance advances this distribution. Since a uniform distribution is stateless, this is just overwrites the internal cache value.
func (*UniformDistribution) Get ¶
func (d *UniformDistribution) Get() float64
Get computes and returns the next value in the distribution.