Documentation ¶
Index ¶
- Constants
- Variables
- func Dialect(cfg *SQLConf) (gorm.Dialector, error)
- func LoadHeadersFromRawRequest(rawRequest string) (http.Header, error)
- func LoadHeadersFromRawResponse(rawResponse string) (http.Header, error)
- func Min(a, b int) int
- func NewLogzioClient(conf *LogzioPumpConfig) (*lg.LogzioSender, error)
- type ApiKeyTransport
- type BaseMongoConf
- type CSVConf
- type CSVPump
- type CommonPumpConfig
- func (p *CommonPumpConfig) GetEnvPrefix() string
- func (p *CommonPumpConfig) GetFilters() analytics.AnalyticsFilters
- func (p *CommonPumpConfig) GetMaxRecordSize() int
- func (p *CommonPumpConfig) GetOmitDetailedRecording() bool
- func (p *CommonPumpConfig) GetTimeout() int
- func (p *CommonPumpConfig) SetFilters(filters analytics.AnalyticsFilters)
- func (p *CommonPumpConfig) SetMaxRecordSize(size int)
- func (p *CommonPumpConfig) SetOmitDetailedRecording(OmitDetailedRecording bool)
- func (p *CommonPumpConfig) SetTimeout(timeout int)
- func (p *CommonPumpConfig) Shutdown() error
- type DogStatsdConf
- type DogStatsdPump
- type DummyPump
- type Elasticsearch3Operator
- type Elasticsearch5Operator
- type Elasticsearch6Operator
- type Elasticsearch7Operator
- type ElasticsearchBulkConfig
- type ElasticsearchConf
- type ElasticsearchOperator
- type ElasticsearchPump
- func (e *ElasticsearchPump) GetEnvPrefix() string
- func (e *ElasticsearchPump) GetName() string
- func (e *ElasticsearchPump) Init(config interface{}) error
- func (e *ElasticsearchPump) New() Pump
- func (e *ElasticsearchPump) Shutdown() error
- func (e *ElasticsearchPump) WriteData(ctx context.Context, data []interface{}) error
- type GraylogConf
- type GraylogPump
- type GroupLoginRequest
- type HybridPump
- type Influx2Conf
- type Influx2Pump
- type InfluxConf
- type InfluxPump
- type Json
- type KafkaConf
- type KafkaPump
- type LogzioPump
- type LogzioPumpConfig
- type MoesifConf
- type MoesifPump
- func (p *MoesifPump) GetEnvPrefix() string
- func (p *MoesifPump) GetName() string
- func (p *MoesifPump) GetTimeout() int
- func (p *MoesifPump) Init(config interface{}) error
- func (p *MoesifPump) New() Pump
- func (p *MoesifPump) SetTimeout(timeout int)
- func (p *MoesifPump) Shutdown() error
- func (p *MoesifPump) WriteData(ctx context.Context, data []interface{}) error
- type MongoAggregateConf
- type MongoAggregatePump
- func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, orgID string, ...) error
- func (m *MongoAggregatePump) GetCollectionName(orgid string) (string, error)
- func (m *MongoAggregatePump) GetEnvPrefix() string
- func (m *MongoAggregatePump) GetName() string
- func (m *MongoAggregatePump) HandleWriteErr(err error) error
- func (m *MongoAggregatePump) Init(config interface{}) error
- func (m *MongoAggregatePump) New() Pump
- func (m *MongoAggregatePump) WriteData(ctx context.Context, data []interface{}) error
- func (m *MongoAggregatePump) WriteUptimeData(data []interface{})
- type MongoConf
- type MongoPump
- func (m *MongoPump) AccumulateSet(data []interface{}) [][]interface{}
- func (m *MongoPump) GetEnvPrefix() string
- func (m *MongoPump) GetName() string
- func (m *MongoPump) Init(config interface{}) error
- func (m *MongoPump) New() Pump
- func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error
- func (m *MongoPump) WriteUptimeData(data []interface{})
- type MongoSelectiveConf
- type MongoSelectivePump
- func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]interface{}
- func (m *MongoSelectivePump) GetCollectionName(orgid string) (string, error)
- func (m *MongoSelectivePump) GetEnvPrefix() string
- func (m *MongoSelectivePump) GetName() string
- func (m *MongoSelectivePump) Init(config interface{}) error
- func (m *MongoSelectivePump) New() Pump
- func (m *MongoSelectivePump) WriteData(ctx context.Context, data []interface{}) error
- func (m *MongoSelectivePump) WriteUptimeData(data []interface{})
- type MongoType
- type MysqlConfig
- type NewBucket
- type PostgresConfig
- type PrometheusConf
- type PrometheusMetric
- func (pm *PrometheusMetric) Expose() error
- func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) []string
- func (pm *PrometheusMetric) Inc(values ...string) error
- func (pm *PrometheusMetric) InitVec() error
- func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error
- type PrometheusPump
- func (p *PrometheusPump) CreateBasicMetrics()
- func (p *PrometheusPump) GetEnvPrefix() string
- func (p *PrometheusPump) GetName() string
- func (p *PrometheusPump) Init(conf interface{}) error
- func (p *PrometheusPump) New() Pump
- func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) error
- type Pump
- type RetentionRule
- type SQLAggregatePump
- func (c *SQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID string, ...) error
- func (c *SQLAggregatePump) GetEnvPrefix() string
- func (c *SQLAggregatePump) GetName() string
- func (c *SQLAggregatePump) Init(conf interface{}) error
- func (c *SQLAggregatePump) New() Pump
- func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) error
- type SQLAggregatePumpConf
- type SQLConf
- type SQLPump
- type SegmentConf
- type SegmentPump
- func (s *SegmentPump) GetEnvPrefix() string
- func (s *SegmentPump) GetName() string
- func (s *SegmentPump) Init(config interface{}) error
- func (s *SegmentPump) New() Pump
- func (s *SegmentPump) ToJSONMap(obj interface{}) (map[string]interface{}, error)
- func (s *SegmentPump) WriteData(ctx context.Context, data []interface{}) error
- func (s *SegmentPump) WriteDataRecord(record analytics.AnalyticsRecord) error
- type SplunkClient
- type SplunkPump
- func (p *SplunkPump) FilterTags(filteredTags []string) []string
- func (p *SplunkPump) GetEnvPrefix() string
- func (p *SplunkPump) GetName() string
- func (p *SplunkPump) Init(config interface{}) error
- func (p *SplunkPump) New() Pump
- func (p *SplunkPump) WriteData(ctx context.Context, data []interface{}) error
- type SplunkPumpConfig
- type StatsdConf
- type StatsdPump
- type StdOutConf
- type StdOutPump
- type SyslogConf
- type SyslogPump
- func (s *SyslogPump) GetEnvPrefix() string
- func (s *SyslogPump) GetFilters() analytics.AnalyticsFilters
- func (s *SyslogPump) GetName() string
- func (s *SyslogPump) GetTimeout() int
- func (s *SyslogPump) Init(config interface{}) error
- func (s *SyslogPump) New() Pump
- func (s *SyslogPump) SetFilters(filters analytics.AnalyticsFilters)
- func (s *SyslogPump) SetTimeout(timeout int)
- func (s *SyslogPump) WriteData(ctx context.Context, data []interface{}) error
- type TimestreamPump
- func (t *TimestreamPump) BuildTimestreamInputIterator(data []interface{}) (func() (records []types.Record, hasNext bool), bool)
- func (t *TimestreamPump) GetAnalyticsRecordDimensions(decoded *analytics.AnalyticsRecord) (dimensions []types.Dimension)
- func (t *TimestreamPump) GetAnalyticsRecordMeasures(decoded *analytics.AnalyticsRecord) (measureValues []types.MeasureValue)
- func (t *TimestreamPump) GetEnvPrefix() string
- func (t *TimestreamPump) GetName() string
- func (t *TimestreamPump) Init(config interface{}) error
- func (t *TimestreamPump) MapAnalyticRecord2TimestreamMultimeasureRecord(decoded *analytics.AnalyticsRecord) types.Record
- func (t *TimestreamPump) New() Pump
- func (t *TimestreamPump) NewTimesteramWriter() (c *timestreamwrite.Client, err error)
- func (t *TimestreamPump) WriteData(ctx context.Context, data []interface{}) error
- type TimestreamPumpConf
- type TimestreamWriteRecordsAPI
- type UptimePump
Constants ¶
const ( LogzioPumpPrefix = "logzio-pump" LogzioPumpName = "Logzio Pump" )
const ( KiB = 1 << (10 * iota) MiB GiB TiB )
const COUNTER_TYPE = "counter"
const HISTOGRAM_TYPE = "histogram"
const PUMPS_ENV_META_PREFIX = "_META"
const PUMPS_ENV_PREFIX = "TYK_PMP_PUMPS"
Variables ¶
var AvailablePumps map[string]Pump
var COMMON_TAGS_COUNT = 5
var Commit string
var SQLAggregateDefaultENV = PUMPS_ENV_PREFIX + "_SQLAGGREGATE" + PUMPS_ENV_META_PREFIX
var SQLAggregatePumpPrefix = "SQL-aggregate-pump"
var SQLDefaultENV = PUMPS_ENV_PREFIX + "_SQL" + PUMPS_ENV_META_PREFIX
var SQLDefaultQueryBatchSize = 1000
var SQLPrefix = "SQL-pump"
var THRESHOLD_LEN_TAG_LIST = 1000
var VERSION = "v1.6.0"
Functions ¶
func LoadHeadersFromRawRequest ¶ added in v1.6.0
func LoadHeadersFromRawResponse ¶ added in v1.6.0
func NewLogzioClient ¶
func NewLogzioClient(conf *LogzioPumpConfig) (*lg.LogzioSender, error)
Types ¶
type ApiKeyTransport ¶ added in v1.2.0
type BaseMongoConf ¶ added in v1.0.0
type BaseMongoConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // The full URL to your MongoDB instance, this can be a clustered instance if necessary and // should include the database and username / password data. MongoURL string `json:"mongo_url" mapstructure:"mongo_url"` // Set to true to enable Mongo SSL connection. MongoUseSSL bool `json:"mongo_use_ssl" mapstructure:"mongo_use_ssl"` // Allows the use of self-signed certificates when connecting to an encrypted MongoDB database. MongoSSLInsecureSkipVerify bool `json:"mongo_ssl_insecure_skip_verify" mapstructure:"mongo_ssl_insecure_skip_verify"` // Ignore hostname check when it differs from the original (for example with SSH tunneling). // The rest of the TLS verification will still be performed. MongoSSLAllowInvalidHostnames bool `json:"mongo_ssl_allow_invalid_hostnames" mapstructure:"mongo_ssl_allow_invalid_hostnames"` // Path to the PEM file with trusted root certificates MongoSSLCAFile string `json:"mongo_ssl_ca_file" mapstructure:"mongo_ssl_ca_file"` // Path to the PEM file which contains both client certificate and private key. This is // required for Mutual TLS. MongoSSLPEMKeyfile string `json:"mongo_ssl_pem_keyfile" mapstructure:"mongo_ssl_pem_keyfile"` // Specifies the mongo DB Type. If it's 0, it means that you are using standard mongo db, but if it's 1 it means you are using AWS Document DB. // Defaults to Standard mongo (0). MongoDBType MongoType `json:"mongo_db_type" mapstructure:"mongo_db_type"` // Set to true to disable the default tyk index creation. OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"` }
func (*BaseMongoConf) GetBlurredURL ¶ added in v1.2.0
func (b *BaseMongoConf) GetBlurredURL() string
type CSVConf ¶
type CSVConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // The directory and the filename where the CSV data will be stored. CSVDir string `json:"csv_dir" mapstructure:"csv_dir"` }
@PumpConf CSV
type CSVPump ¶
type CSVPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*CSVPump) GetEnvPrefix ¶ added in v1.3.0
type CommonPumpConfig ¶ added in v1.2.0
type CommonPumpConfig struct { OmitDetailedRecording bool // contains filtered or unexported fields }
func (*CommonPumpConfig) GetEnvPrefix ¶ added in v1.3.0
func (p *CommonPumpConfig) GetEnvPrefix() string
func (*CommonPumpConfig) GetFilters ¶ added in v1.2.0
func (p *CommonPumpConfig) GetFilters() analytics.AnalyticsFilters
func (*CommonPumpConfig) GetMaxRecordSize ¶ added in v1.5.0
func (p *CommonPumpConfig) GetMaxRecordSize() int
func (*CommonPumpConfig) GetOmitDetailedRecording ¶ added in v1.2.0
func (p *CommonPumpConfig) GetOmitDetailedRecording() bool
func (*CommonPumpConfig) GetTimeout ¶ added in v1.2.0
func (p *CommonPumpConfig) GetTimeout() int
func (*CommonPumpConfig) SetFilters ¶ added in v1.2.0
func (p *CommonPumpConfig) SetFilters(filters analytics.AnalyticsFilters)
func (*CommonPumpConfig) SetMaxRecordSize ¶ added in v1.5.0
func (p *CommonPumpConfig) SetMaxRecordSize(size int)
func (*CommonPumpConfig) SetOmitDetailedRecording ¶ added in v1.2.0
func (p *CommonPumpConfig) SetOmitDetailedRecording(OmitDetailedRecording bool)
func (*CommonPumpConfig) SetTimeout ¶ added in v1.2.0
func (p *CommonPumpConfig) SetTimeout(timeout int)
func (*CommonPumpConfig) Shutdown ¶ added in v1.5.0
func (p *CommonPumpConfig) Shutdown() error
type DogStatsdConf ¶
type DogStatsdConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // Prefix for your metrics to datadog. Namespace string `json:"namespace" mapstructure:"namespace"` // Address of the datadog agent including host & port. Address string `json:"address" mapstructure:"address"` // Defaults to `1` which equates to `100%` of requests. To sample at `50%`, set to `0.5`. SampleRate float64 `json:"sample_rate" mapstructure:"sample_rate"` // Enable async UDS over UDP https://github.com/Datadog/datadog-go#unix-domain-sockets-client. AsyncUDS bool `json:"async_uds" mapstructure:"async_uds"` // Integer write timeout in seconds if `async_uds: true`. AsyncUDSWriteTimeout int `json:"async_uds_write_timeout_seconds" mapstructure:"async_uds_write_timeout_seconds"` // Enable buffering of messages. Buffered bool `json:"buffered" mapstructure:"buffered"` // Max messages in single datagram if `buffered: true`. Default 16. BufferedMaxMessages int `json:"buffered_max_messages" mapstructure:"buffered_max_messages"` // List of tags to be added to the metric. The possible options are listed in the below example. // // If no tag is specified the fallback behavior is to use the below tags: // - `path` // - `method` // - `response_code` // - `api_version` // - `api_name` // - `api_id` // - `org_id` // - `tracked` // - `oauth_id` // // Note that this configuration can generate significant charges due to the unbound nature of // the `path` tag. // // “`{.json} // "dogstatsd": { // "type": "dogstatsd", // "meta": { // "address": "localhost:8125", // "namespace": "pump", // "async_uds": true, // "async_uds_write_timeout_seconds": 2, // "buffered": true, // "buffered_max_messages": 32, // "sample_rate": 0.5, // "tags": [ // "method", // "response_code", // "api_version", // "api_name", // "api_id", // "org_id", // "tracked", // "path", // "oauth_id" // ] // } // }, // “` // // On startup, you should see the loaded configs when initializing the dogstatsd pump // “` // [May 10 15:23:44] INFO dogstatsd: initializing pump // [May 10 15:23:44] INFO dogstatsd: namespace: pump. // [May 10 15:23:44] INFO dogstatsd: sample_rate: 50% // [May 10 15:23:44] INFO dogstatsd: buffered: true, max_messages: 32 // [May 10 15:23:44] INFO dogstatsd: async_uds: true, write_timeout: 2s // “` Tags []string `json:"tags" mapstructure:"tags"` }
@PumpConf DogStatsd
type DogStatsdPump ¶
type DogStatsdPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*DogStatsdPump) GetEnvPrefix ¶ added in v1.3.0
func (s *DogStatsdPump) GetEnvPrefix() string
func (*DogStatsdPump) GetName ¶
func (s *DogStatsdPump) GetName() string
func (*DogStatsdPump) Init ¶
func (s *DogStatsdPump) Init(conf interface{}) error
func (*DogStatsdPump) New ¶
func (s *DogStatsdPump) New() Pump
func (*DogStatsdPump) Shutdown ¶ added in v1.5.0
func (s *DogStatsdPump) Shutdown() error
type DummyPump ¶
type DummyPump struct {
CommonPumpConfig
}
type Elasticsearch3Operator ¶
type Elasticsearch3Operator struct {
// contains filtered or unexported fields
}
type Elasticsearch5Operator ¶
type Elasticsearch5Operator struct {
// contains filtered or unexported fields
}
type Elasticsearch6Operator ¶
type Elasticsearch6Operator struct {
// contains filtered or unexported fields
}
type Elasticsearch7Operator ¶ added in v1.5.0
type Elasticsearch7Operator struct {
// contains filtered or unexported fields
}
type ElasticsearchBulkConfig ¶ added in v1.0.0
type ElasticsearchBulkConfig struct { // Number of workers. Defaults to 1. Workers int `json:"workers" mapstructure:"workers"` // Specifies the time in seconds to flush the data and send it to ES. Default disabled. FlushInterval int `json:"flush_interval" mapstructure:"flush_interval"` // Specifies the number of requests needed to flush the data and send it to ES. Defaults to // 1000 requests. If it is needed, can be disabled with -1. BulkActions int `json:"bulk_actions" mapstructure:"bulk_actions"` // Specifies the size (in bytes) needed to flush the data and send it to ES. Defaults to 5MB. // If it is needed, can be disabled with -1. BulkSize int `json:"bulk_size" mapstructure:"bulk_size"` }
type ElasticsearchConf ¶
type ElasticsearchConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // The name of the index that all the analytics data will be placed in. Defaults to // "tyk_analytics". IndexName string `json:"index_name" mapstructure:"index_name"` // If sniffing is disabled, the URL that all data will be sent to. Defaults to // "http://localhost:9200". ElasticsearchURL string `json:"elasticsearch_url" mapstructure:"elasticsearch_url"` // If sniffing is enabled, the "elasticsearch_url" will be used to make a request to get a // list of all the nodes in the cluster, the returned addresses will then be used. Defaults to // `false`. EnableSniffing bool `json:"use_sniffing" mapstructure:"use_sniffing"` // The type of the document that is created in ES. Defaults to "tyk_analytics". DocumentType string `json:"document_type" mapstructure:"document_type"` // Appends the date to the end of the index name, so each days data is split into a different // index name. E.g. tyk_analytics-2016.02.28. Defaults to `false`. RollingIndex bool `json:"rolling_index" mapstructure:"rolling_index"` // If set to `true` will include the following additional fields: Raw Request, Raw Response and // User Agent. ExtendedStatistics bool `json:"extended_stats" mapstructure:"extended_stats"` // When enabled, generate _id for outgoing records. This prevents duplicate records when // retrying ES. GenerateID bool `json:"generate_id" mapstructure:"generate_id"` // Allows for the base64 bits to be decode before being passed to ES. DecodeBase64 bool `json:"decode_base64" mapstructure:"decode_base64"` // Specifies the ES version. Use "3" for ES 3.X, "5" for ES 5.X, "6" for ES 6.X, "7" for ES // 7.X . Defaults to "3". Version string `json:"version" mapstructure:"version"` // Disable batch writing. Defaults to false. DisableBulk bool `json:"disable_bulk" mapstructure:"disable_bulk"` // Batch writing trigger configuration. Each option is an OR with eachother: BulkConfig ElasticsearchBulkConfig `json:"bulk_config" mapstructure:"bulk_config"` // API Key ID used for APIKey auth in ES. It's send to ES in the Authorization header as ApiKey base64(auth_api_key_id:auth_api_key) AuthAPIKeyID string `json:"auth_api_key_id" mapstructure:"auth_api_key_id"` // API Key used for APIKey auth in ES. It's send to ES in the Authorization header as ApiKey base64(auth_api_key_id:auth_api_key) AuthAPIKey string `json:"auth_api_key" mapstructure:"auth_api_key"` // Basic auth username. It's send to ES in the Authorization header as username:password encoded in base64. Username string `json:"auth_basic_username" mapstructure:"auth_basic_username"` // Basic auth password. It's send to ES in the Authorization header as username:password encoded in base64. Password string `json:"auth_basic_password" mapstructure:"auth_basic_password"` }
@PumpConf Elasticsearch
type ElasticsearchOperator ¶
type ElasticsearchOperator interface {
// contains filtered or unexported methods
}
type ElasticsearchPump ¶
type ElasticsearchPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*ElasticsearchPump) GetEnvPrefix ¶ added in v1.3.0
func (e *ElasticsearchPump) GetEnvPrefix() string
func (*ElasticsearchPump) GetName ¶
func (e *ElasticsearchPump) GetName() string
func (*ElasticsearchPump) Init ¶
func (e *ElasticsearchPump) Init(config interface{}) error
func (*ElasticsearchPump) New ¶
func (e *ElasticsearchPump) New() Pump
func (*ElasticsearchPump) Shutdown ¶ added in v1.5.0
func (e *ElasticsearchPump) Shutdown() error
type GraylogConf ¶
type GraylogConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // Graylog host. GraylogHost string `json:"host" mapstructure:"host"` // Graylog port. GraylogPort int `json:"port" mapstructure:"port"` // List of tags to be added to the metric. The possible options are listed in the below example. // // If no tag is specified the fallback behaviour is to don't send anything. // The possible values are: // - `path` // - `method` // - `response_code` // - `api_version` // - `api_name` // - `api_id` // - `org_id` // - `tracked` // - `oauth_id` // - `raw_request` // - `raw_response` // - `request_time` // - `ip_address` Tags []string `json:"tags" mapstructure:"tags"` }
@PumpConf Graylog
type GraylogPump ¶
type GraylogPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*GraylogPump) GetEnvPrefix ¶ added in v1.3.0
func (p *GraylogPump) GetEnvPrefix() string
func (*GraylogPump) GetName ¶
func (p *GraylogPump) GetName() string
func (*GraylogPump) Init ¶
func (p *GraylogPump) Init(conf interface{}) error
func (*GraylogPump) New ¶
func (p *GraylogPump) New() Pump
type GroupLoginRequest ¶
type HybridPump ¶
type HybridPump struct { CommonPumpConfig // contains filtered or unexported fields }
HybridPump allows to send analytics to MDCB over RPC
func (*HybridPump) GetName ¶
func (p *HybridPump) GetName() string
func (*HybridPump) Init ¶
func (p *HybridPump) Init(config interface{}) error
func (*HybridPump) New ¶
func (p *HybridPump) New() Pump
type Influx2Conf ¶ added in v1.5.1
type Influx2Conf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // InfluxDB2 pump bucket name. BucketName string `mapstructure:"bucket" json:"bucket"` // InfluxDB2 pump organization name. OrgName string `mapstructure:"organization" json:"organization"` // InfluxDB2 pump host. Addr string `mapstructure:"address" json:"address"` // InfluxDB2 pump database token. Token string `mapstructure:"token" json:"token"` // Define which Analytics fields should be sent to InfluxDB2. Check the available // fields in the example below. Default value is `["method", // "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", // "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]`. Fields []string `mapstructure:"fields" json:"fields"` // List of tags to be added to the metric. Tags []string `mapstructure:"tags" json:"tags"` // Flush data to InfluxDB2 as soon as the pump receives it Flush bool `mapstructure:"flush" json:"flush"` // Create the bucket if it doesn't exist CreateMissingBucket bool `mapstructure:"create_missing_bucket" json:"create_missing_bucket"` // New bucket configuration NewBucketConfig NewBucket `mapstructure:"new_bucket_config" json:"new_bucket_config"` }
@PumpConf Influx2
type Influx2Pump ¶ added in v1.5.1
type Influx2Pump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*Influx2Pump) GetEnvPrefix ¶ added in v1.5.1
func (i *Influx2Pump) GetEnvPrefix() string
func (*Influx2Pump) GetName ¶ added in v1.5.1
func (i *Influx2Pump) GetName() string
func (*Influx2Pump) Init ¶ added in v1.5.1
func (i *Influx2Pump) Init(config interface{}) error
func (*Influx2Pump) New ¶ added in v1.5.1
func (i *Influx2Pump) New() Pump
func (*Influx2Pump) Shutdown ¶ added in v1.5.1
func (i *Influx2Pump) Shutdown() error
type InfluxConf ¶
type InfluxConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // InfluxDB pump database name. DatabaseName string `json:"database_name" mapstructure:"database_name"` // InfluxDB pump host. Addr string `json:"address" mapstructure:"address"` // InfluxDB pump database username. Username string `json:"username" mapstructure:"username"` // InfluxDB pump database password. Password string `json:"password" mapstructure:"password"` // Define which Analytics fields should be sent to InfluxDB. Check the available // fields in the example below. Default value is `["method", // "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", // "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]`. Fields []string `json:"fields" mapstructure:"fields"` // List of tags to be added to the metric. Tags []string `json:"tags" mapstructure:"tags"` }
@PumpConf Influx
type InfluxPump ¶
type InfluxPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*InfluxPump) GetEnvPrefix ¶ added in v1.3.0
func (i *InfluxPump) GetEnvPrefix() string
func (*InfluxPump) GetName ¶
func (i *InfluxPump) GetName() string
func (*InfluxPump) Init ¶
func (i *InfluxPump) Init(config interface{}) error
func (*InfluxPump) New ¶
func (i *InfluxPump) New() Pump
type KafkaConf ¶
type KafkaConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // The list of brokers used to discover the partitions available on the kafka cluster. E.g. // "localhost:9092". Broker []string `json:"broker" mapstructure:"broker"` // Unique identifier for client connections established with Kafka. ClientId string `json:"client_id" mapstructure:"client_id"` // The topic that the writer will produce messages to. Topic string `json:"topic" mapstructure:"topic"` // Timeout is the maximum amount of time will wait for a connect or write to complete. Timeout time.Duration `json:"timeout" mapstructure:"timeout"` // Enable "github.com/golang/snappy" codec to be used to compress Kafka messages. By default // is `false`. Compressed bool `json:"compressed" mapstructure:"compressed"` // Can be used to set custom metadata inside the kafka message. MetaData map[string]string `json:"meta_data" mapstructure:"meta_data"` // Enables SSL connection. UseSSL bool `json:"use_ssl" mapstructure:"use_ssl"` // Controls whether the pump client verifies the kafka server's certificate chain and host // name. SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify" mapstructure:"ssl_insecure_skip_verify"` // Can be used to set custom certificate file for authentication with kafka. SSLCertFile string `json:"ssl_cert_file" mapstructure:"ssl_cert_file"` // Can be used to set custom key file for authentication with kafka. SSLKeyFile string `json:"ssl_key_file" mapstructure:"ssl_key_file"` // SASL mechanism configuration. Only "plain" and "scram" are supported. SASLMechanism string `json:"sasl_mechanism" mapstructure:"sasl_mechanism"` // SASL username. Username string `json:"sasl_username" mapstructure:"sasl_username"` // SASL password. Password string `json:"sasl_password" mapstructure:"sasl_password"` // SASL algorithm. It's the algorithm specified for scram mechanism. It could be sha-512 or sha-256. // Defaults to "sha-256". Algorithm string `json:"sasl_algorithm" mapstructure:"sasl_algorithm"` }
@PumpConf Kafka
type KafkaPump ¶
type KafkaPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*KafkaPump) GetEnvPrefix ¶ added in v1.3.0
type LogzioPump ¶
type LogzioPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*LogzioPump) GetEnvPrefix ¶ added in v1.3.0
func (p *LogzioPump) GetEnvPrefix() string
func (*LogzioPump) GetName ¶
func (p *LogzioPump) GetName() string
func (*LogzioPump) Init ¶
func (p *LogzioPump) Init(config interface{}) error
func (*LogzioPump) New ¶
func (p *LogzioPump) New() Pump
type LogzioPumpConfig ¶
type LogzioPumpConfig struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // Set the sender to check if it crosses the maximum allowed disk usage. Default value is // `true`. CheckDiskSpace bool `json:"check_disk_space" mapstructure:"check_disk_space"` // Set disk queue threshold, once the threshold is crossed the sender will not enqueue the // received logs. Default value is `98` (percentage of disk). DiskThreshold int `json:"disk_threshold" mapstructure:"disk_threshold"` // Set drain duration (flush logs on disk). Default value is `3s`. DrainDuration string `json:"drain_duration" mapstructure:"drain_duration"` // The directory for the queue. QueueDir string `json:"queue_dir" mapstructure:"queue_dir"` // Token for sending data to your logzio account. Token string `json:"token" mapstructure:"token"` // If you do not want to use the default Logzio url i.e. when using a proxy. Default is // `https://listener.logz.io:8071`. URL string `json:"url" mapstructure:"url"` }
@PumpConf Logzio
func NewLogzioPumpConfig ¶
func NewLogzioPumpConfig() *LogzioPumpConfig
type MoesifConf ¶
type MoesifConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // Moesif Application Id. You can find your Moesif Application Id from // [_Moesif Dashboard_](https://www.moesif.com/) -> _Top Right Menu_ -> _API Keys_ . Moesif // recommends creating separate Application Ids for each environment such as Production, // Staging, and Development to keep data isolated. ApplicationID string `json:"application_id" mapstructure:"application_id"` // An option to mask a specific request header field. RequestHeaderMasks []string `json:"request_header_masks" mapstructure:"request_header_masks"` // An option to mask a specific response header field. ResponseHeaderMasks []string `json:"response_header_masks" mapstructure:"response_header_masks"` // An option to mask a specific - request body field. RequestBodyMasks []string `json:"request_body_masks" mapstructure:"request_body_masks"` // An option to mask a specific response body field. ResponseBodyMasks []string `json:"response_body_masks" mapstructure:"response_body_masks"` // An option to disable logging of request body. Default value is `false`. DisableCaptureRequestBody bool `json:"disable_capture_request_body" mapstructure:"disable_capture_request_body"` // An option to disable logging of response body. Default value is `false`. DisableCaptureResponseBody bool `json:"disable_capture_response_body" mapstructure:"disable_capture_response_body"` // An optional field name to identify User from a request or response header. UserIDHeader string `json:"user_id_header" mapstructure:"user_id_header"` // An optional field name to identify Company (Account) from a request or response header. CompanyIDHeader string `json:"company_id_header" mapstructure:"company_id_header"` // Set this to `true` to enable `bulk_config`. EnableBulk bool `json:"enable_bulk" mapstructure:"enable_bulk"` // Batch writing trigger configuration. // * `"event_queue_size"` - (optional) An optional field name which specify the maximum // number of events to hold in queue before sending to Moesif. In case of network issues when // not able to connect/send event to Moesif, skips adding new events to the queue to prevent // memory overflow. Type: int. Default value is `10000`. // * `"batch_size"` - (optional) An optional field name which specify the maximum batch size // when sending to Moesif. Type: int. Default value is `200`. // * `"timer_wake_up_seconds"` - (optional) An optional field which specifies a time (every n // seconds) how often background thread runs to send events to moesif. Type: int. Default value // is `2` seconds. BulkConfig map[string]interface{} `json:"bulk_config" mapstructure:"bulk_config"` // An optional request header field name to used to identify the User in Moesif. Default value // is `authorization`. AuthorizationHeaderName string `json:"authorization_header_name" mapstructure:"authorization_header_name"` // An optional field name use to parse the User from authorization header in Moesif. Default // value is `sub`. AuthorizationUserIdField string `json:"authorization_user_id_field" mapstructure:"authorization_user_id_field"` }
@PumpConf Moesif
type MoesifPump ¶
type MoesifPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*MoesifPump) GetEnvPrefix ¶ added in v1.3.0
func (p *MoesifPump) GetEnvPrefix() string
func (*MoesifPump) GetName ¶
func (p *MoesifPump) GetName() string
func (*MoesifPump) GetTimeout ¶ added in v1.0.0
func (p *MoesifPump) GetTimeout() int
func (*MoesifPump) Init ¶
func (p *MoesifPump) Init(config interface{}) error
func (*MoesifPump) New ¶
func (p *MoesifPump) New() Pump
func (*MoesifPump) SetTimeout ¶ added in v1.0.0
func (p *MoesifPump) SetTimeout(timeout int)
func (*MoesifPump) Shutdown ¶ added in v1.6.0
func (p *MoesifPump) Shutdown() error
type MongoAggregateConf ¶
type MongoAggregateConf struct { // TYKCONFIGEXPAND BaseMongoConf // If set to `true` your pump will store analytics to both your organisation defined // collections z_tyk_analyticz_aggregate_{ORG ID} and your org-less tyk_analytics_aggregates // collection. When set to 'false' your pump will only store analytics to your org defined // collection. UseMixedCollection bool `json:"use_mixed_collection" mapstructure:"use_mixed_collection"` // Specifies if it should store aggregated data for all the endpoints. By default, `false` // which means that only store aggregated data for `tracked endpoints`. TrackAllPaths bool `json:"track_all_paths" mapstructure:"track_all_paths"` // Specifies prefixes of tags that should be ignored. IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"` // Determines the threshold of amount of tags of an aggregation. If the amount of tags is superior to the threshold, // it will print an alert. // Defaults to 1000. ThresholdLenTagList int `json:"threshold_len_tag_list" mapstructure:"threshold_len_tag_list"` // Determines if the aggregations should be made per minute instead of per hour. StoreAnalyticsPerMinute bool `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"` // This list determines which aggregations are going to be dropped and not stored in the collection. // Posible values are: "APIID","errors","versions","apikeys","oauthids","geo","tags","endpoints","keyendpoints", // "oauthendpoints", and "apiendpoints". IgnoreAggregationsList []string `json:"ignore_aggregations" mapstructure:"ignore_aggregations"` }
@PumpConf MongoAggregate
type MongoAggregatePump ¶
type MongoAggregatePump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*MongoAggregatePump) DoAggregatedWriting ¶ added in v1.5.1
func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, orgID string, filteredData analytics.AnalyticsRecordAggregate) error
func (*MongoAggregatePump) GetCollectionName ¶
func (m *MongoAggregatePump) GetCollectionName(orgid string) (string, error)
func (*MongoAggregatePump) GetEnvPrefix ¶ added in v1.3.0
func (m *MongoAggregatePump) GetEnvPrefix() string
func (*MongoAggregatePump) GetName ¶
func (m *MongoAggregatePump) GetName() string
func (*MongoAggregatePump) HandleWriteErr ¶
func (m *MongoAggregatePump) HandleWriteErr(err error) error
func (*MongoAggregatePump) Init ¶
func (m *MongoAggregatePump) Init(config interface{}) error
func (*MongoAggregatePump) New ¶
func (m *MongoAggregatePump) New() Pump
func (*MongoAggregatePump) WriteData ¶
func (m *MongoAggregatePump) WriteData(ctx context.Context, data []interface{}) error
func (*MongoAggregatePump) WriteUptimeData ¶
func (m *MongoAggregatePump) WriteUptimeData(data []interface{})
WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection
type MongoConf ¶
type MongoConf struct { // TYKCONFIGEXPAND BaseMongoConf // Specifies the mongo collection name. CollectionName string `json:"collection_name" mapstructure:"collection_name"` // Maximum insert batch size for mongo selective pump. If the batch we are writing surpass this value, it will be send in multiple batchs. // Defaults to 10Mb. MaxInsertBatchSizeBytes int `json:"max_insert_batch_size_bytes" mapstructure:"max_insert_batch_size_bytes"` // Maximum document size. If the document exceed this value, it will be skipped. // Defaults to 10Mb. MaxDocumentSizeBytes int `json:"max_document_size_bytes" mapstructure:"max_document_size_bytes"` // Amount of bytes of the capped collection in 64bits architectures. // Defaults to 5GB. CollectionCapMaxSizeBytes int `json:"collection_cap_max_size_bytes" mapstructure:"collection_cap_max_size_bytes"` // Enable collection capping. It's used to set a maximum size of the collection. CollectionCapEnable bool `json:"collection_cap_enable" mapstructure:"collection_cap_enable"` }
@PumpConf Mongo
type MongoPump ¶
type MongoPump struct { IsUptime bool CommonPumpConfig // contains filtered or unexported fields }
func (*MongoPump) AccumulateSet ¶
func (m *MongoPump) AccumulateSet(data []interface{}) [][]interface{}
func (*MongoPump) GetEnvPrefix ¶ added in v1.3.0
func (*MongoPump) WriteUptimeData ¶
func (m *MongoPump) WriteUptimeData(data []interface{})
WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection
type MongoSelectiveConf ¶
type MongoSelectiveConf struct { // TYKCONFIGEXPAND BaseMongoConf // Maximum insert batch size for mongo selective pump. If the batch we are writing surpass this value, it will be send in multiple batchs. // Defaults to 10Mb. MaxInsertBatchSizeBytes int `json:"max_insert_batch_size_bytes" mapstructure:"max_insert_batch_size_bytes"` // Maximum document size. If the document exceed this value, it will be skipped. // Defaults to 10Mb. MaxDocumentSizeBytes int `json:"max_document_size_bytes" mapstructure:"max_document_size_bytes"` }
@PumpConf MongoSelective
type MongoSelectivePump ¶
type MongoSelectivePump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*MongoSelectivePump) AccumulateSet ¶
func (m *MongoSelectivePump) AccumulateSet(data []interface{}) [][]interface{}
func (*MongoSelectivePump) GetCollectionName ¶
func (m *MongoSelectivePump) GetCollectionName(orgid string) (string, error)
func (*MongoSelectivePump) GetEnvPrefix ¶ added in v1.3.0
func (m *MongoSelectivePump) GetEnvPrefix() string
func (*MongoSelectivePump) GetName ¶
func (m *MongoSelectivePump) GetName() string
func (*MongoSelectivePump) Init ¶
func (m *MongoSelectivePump) Init(config interface{}) error
func (*MongoSelectivePump) New ¶
func (m *MongoSelectivePump) New() Pump
func (*MongoSelectivePump) WriteData ¶
func (m *MongoSelectivePump) WriteData(ctx context.Context, data []interface{}) error
func (*MongoSelectivePump) WriteUptimeData ¶
func (m *MongoSelectivePump) WriteUptimeData(data []interface{})
WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection
type MysqlConfig ¶ added in v1.5.0
type MysqlConfig struct { // Default size for string fields. Defaults to `256`. DefaultStringSize uint `json:"default_string_size" mapstructure:"default_string_size"` // Disable datetime precision, which not supported before MySQL 5.6. DisableDatetimePrecision bool `json:"disable_datetime_precision" mapstructure:"disable_datetime_precision"` // Drop & create when rename index, rename index not supported before MySQL 5.7, MariaDB. DontSupportRenameIndex bool `json:"dont_support_rename_index" mapstructure:"dont_support_rename_index"` // `change` when rename column, rename column not supported before MySQL 8, MariaDB. DontSupportRenameColumn bool `json:"dont_support_rename_column" mapstructure:"dont_support_rename_column"` // Auto configure based on currently MySQL version. SkipInitializeWithVersion bool `json:"skip_initialize_with_version" mapstructure:"skip_initialize_with_version"` }
type NewBucket ¶ added in v1.5.1
type NewBucket struct { // A description visible on the InfluxDB2 UI Description string `mapstructure:"description" json:"description"` // Rules to expire or retain data. No rules means data never expires. RetentionRules []RetentionRule `mapstructure:"retention_rules" json:"retention_rules"` }
Configuration required to create the Bucket if it doesn't already exist See https://docs.influxdata.com/influxdb/v2.1/api/#operation/PostBuckets
type PostgresConfig ¶ added in v1.5.0
type PostgresConfig struct { // Disables implicit prepared statement usage. PreferSimpleProtocol bool `json:"prefer_simple_protocol" mapstructure:"prefer_simple_protocol"` }
type PrometheusConf ¶
type PrometheusConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // The full URL to your Prometheus instance, {HOST}:{PORT}. For example `localhost:9090`. Addr string `json:"listen_address" mapstructure:"listen_address"` // The path to the Prometheus collection. For example `/metrics`. Path string `json:"path" mapstructure:"path"` // This will enable an experimental feature that will aggregate the histogram metrics request time values before exposing them to prometheus. // Enabling this will reduce the CPU usage of your prometheus pump but you will loose histogram precision. Experimental. AggregateObservations bool `json:"aggregate_observations" mapstructure:"aggregate_observations"` // Custom Prometheus metrics. CustomMetrics []PrometheusMetric `json:"custom_metrics" mapstructure:"custom_metrics"` }
@PumpConf Prometheus
type PrometheusMetric ¶ added in v1.6.0
type PrometheusMetric struct { // The name of the custom metric. For example: `tyk_http_status_per_api_name` Name string `json:"name" mapstructure:"name"` // Description text of the custom metric. For example: `HTTP status codes per API` Help string `json:"help" mapstructure:"help"` // Determines the type of the metric. There's currently 2 available options: `counter` or `histogram`. // In case of histogram, you can only modify the labels since it always going to use the request_time. MetricType string `json:"metric_type" mapstructure:"metric_type"` // Defines the buckets into which observations are counted. The type is float64 array and by default, [1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 1000, 2000, 5000, 10000, 30000, 60000] Buckets []float64 `json:"buckets" mapstructure:"buckets"` // Defines the partitions in the metrics. For example: ['response_code','api_name']. // The available labels are: `["host","method", // "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", // "org_id", "oauth_id","request_time", "ip_address"]`. Labels []string `json:"labels" mapstructure:"labels"` // contains filtered or unexported fields }
func (*PrometheusMetric) Expose ¶ added in v1.7.0
func (pm *PrometheusMetric) Expose() error
Expose executes prometheus library functions using the counter/histogram vector from the PrometheusMetric struct. If the PrometheusMetric is COUNTER_TYPE, it will execute prometheus client Add function to add the counters from counterMap to the labels value metric If the PrometheusMetric is HISTOGRAM_TYPE and aggregate_observations config is true, it will calculate the average value of the metrics in the histogramMap and execute prometheus Observe. If aggregate_observations is false, it won't do anything since it means that we already exposed the metric.
func (*PrometheusMetric) GetLabelsValues ¶ added in v1.6.0
func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) []string
GetLabelsValues return a list of string values based on the custom metric labels.
func (*PrometheusMetric) Inc ¶ added in v1.7.0
func (pm *PrometheusMetric) Inc(values ...string) error
Inc is going to fill counterMap and histogramMap with the data from record.
func (*PrometheusMetric) InitVec ¶ added in v1.6.0
func (pm *PrometheusMetric) InitVec() error
InitVec inits the prometheus metric based on the metric_type. It only can create counter and histogram, if the metric_type is anything else it returns an error
func (*PrometheusMetric) Observe ¶ added in v1.7.0
func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error
Observe will fill hitogramMap with the sum of totalRequest and hits per label value if aggregate_observations is true. If aggregate_observations is set to false (default) it will execute prometheus Observe directly.
type PrometheusPump ¶
type PrometheusPump struct { // Per service TotalStatusMetrics *prometheus.CounterVec PathStatusMetrics *prometheus.CounterVec KeyStatusMetrics *prometheus.CounterVec OauthStatusMetrics *prometheus.CounterVec TotalLatencyMetrics *prometheus.HistogramVec CommonPumpConfig // contains filtered or unexported fields }
func (*PrometheusPump) CreateBasicMetrics ¶ added in v1.7.0
func (p *PrometheusPump) CreateBasicMetrics()
CreateBasicMetrics stores all the predefined pump metrics in allMetrics slice
func (*PrometheusPump) GetEnvPrefix ¶ added in v1.3.0
func (p *PrometheusPump) GetEnvPrefix() string
func (*PrometheusPump) GetName ¶
func (p *PrometheusPump) GetName() string
func (*PrometheusPump) Init ¶
func (p *PrometheusPump) Init(conf interface{}) error
func (*PrometheusPump) New ¶
func (p *PrometheusPump) New() Pump
type Pump ¶
type Pump interface { GetName() string New() Pump Init(interface{}) error WriteData(context.Context, []interface{}) error SetFilters(analytics.AnalyticsFilters) GetFilters() analytics.AnalyticsFilters SetTimeout(timeout int) GetTimeout() int SetOmitDetailedRecording(bool) GetOmitDetailedRecording() bool GetEnvPrefix() string Shutdown() error SetMaxRecordSize(size int) GetMaxRecordSize() int }
func GetPumpByName ¶
type RetentionRule ¶ added in v1.5.1
type RetentionRule struct { // Duration in seconds for how long data will be kept in the database. 0 means infinite. EverySeconds int64 `mapstructure:"every_seconds" json:"every_seconds"` // Shard duration measured in seconds. ShardGroupDurationSeconds int64 `mapstructure:"shard_group_duration_seconds" json:"shard_group_duration_seconds"` // Retention rule type. For example "expire" Type string `mapstructure:"type" json:"type"` }
type SQLAggregatePump ¶ added in v1.5.0
type SQLAggregatePump struct { CommonPumpConfig SQLConf *SQLAggregatePumpConf // contains filtered or unexported fields }
func (*SQLAggregatePump) DoAggregatedWriting ¶ added in v1.5.1
func (c *SQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID string, ag analytics.AnalyticsRecordAggregate) error
func (*SQLAggregatePump) GetEnvPrefix ¶ added in v1.5.0
func (c *SQLAggregatePump) GetEnvPrefix() string
func (*SQLAggregatePump) GetName ¶ added in v1.5.0
func (c *SQLAggregatePump) GetName() string
func (*SQLAggregatePump) Init ¶ added in v1.5.0
func (c *SQLAggregatePump) Init(conf interface{}) error
func (*SQLAggregatePump) New ¶ added in v1.5.0
func (c *SQLAggregatePump) New() Pump
func (*SQLAggregatePump) WriteData ¶ added in v1.5.0
func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) error
WriteData aggregates and writes the passed data to SQL database. When table sharding is enabled, startIndex and endIndex are found by checking timestamp of the records. The main for loop iterates and finds the index where a new day starts. Then, the data is passed to AggregateData function and written to database day by day on different tables. However, if table sharding is not enabled, the for loop iterates one time and all data is passed at once to the AggregateData function and written to database on single table.
type SQLAggregatePumpConf ¶ added in v1.5.0
type SQLAggregatePumpConf struct { // TYKCONFIGEXPAND SQLConf `mapstructure:",squash"` EnvPrefix string `mapstructure:"meta_env_prefix"` // Specifies if it should store aggregated data for all the endpoints. By default, `false` // which means that only store aggregated data for `tracked endpoints`. TrackAllPaths bool `json:"track_all_paths" mapstructure:"track_all_paths"` // Specifies prefixes of tags that should be ignored. IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"` ThresholdLenTagList int `json:"threshold_len_tag_list" mapstructure:"threshold_len_tag_list"` // Determines if the aggregations should be made per minute instead of per hour. StoreAnalyticsPerMinute bool `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"` IgnoreAggregationsList []string `json:"ignore_aggregations" mapstructure:"ignore_aggregations"` }
@PumpConf SQLAggregate
type SQLConf ¶ added in v1.5.0
type SQLConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // The supported and tested types are `sqlite` and `postgres`. Type string `json:"type" mapstructure:"type"` // Specifies the connection string to the database. ConnectionString string `json:"connection_string" mapstructure:"connection_string"` // Postgres configurations. Postgres PostgresConfig `json:"postgres" mapstructure:"postgres"` // Mysql configurations. Mysql MysqlConfig `json:"mysql" mapstructure:"mysql"` // Specifies if all the analytics records are going to be stored in one table or in multiple // tables (one per day). By default, `false`. If `false`, all the records are going to be // stored in `tyk_aggregated` table. Instead, if it's `true`, all the records of the day are // going to be stored in `tyk_aggregated_YYYYMMDD` table, where `YYYYMMDD` is going to change // depending on the date. TableSharding bool `json:"table_sharding" mapstructure:"table_sharding"` // Specifies the SQL log verbosity. The possible values are: `info`,`error` and `warning`. By // default, the value is `silent`, which means that it won't log any SQL query. LogLevel string `json:"log_level" mapstructure:"log_level"` // Specifies the amount of records that are going to be written each batch. Type int. By // default, it writes 1000 records max per batch. BatchSize int `json:"batch_size" mapstructure:"batch_size"` }
@PumpConf SQL
type SQLPump ¶ added in v1.5.0
type SQLPump struct { CommonPumpConfig IsUptime bool SQLConf *SQLConf // contains filtered or unexported fields }
func (*SQLPump) GetEnvPrefix ¶ added in v1.5.0
func (*SQLPump) WriteUptimeData ¶ added in v1.5.0
func (c *SQLPump) WriteUptimeData(data []interface{})
type SegmentConf ¶
type SegmentPump ¶
type SegmentPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*SegmentPump) GetEnvPrefix ¶ added in v1.3.0
func (s *SegmentPump) GetEnvPrefix() string
func (*SegmentPump) GetName ¶
func (s *SegmentPump) GetName() string
func (*SegmentPump) Init ¶
func (s *SegmentPump) Init(config interface{}) error
func (*SegmentPump) New ¶
func (s *SegmentPump) New() Pump
func (*SegmentPump) ToJSONMap ¶
func (s *SegmentPump) ToJSONMap(obj interface{}) (map[string]interface{}, error)
func (*SegmentPump) WriteData ¶
func (s *SegmentPump) WriteData(ctx context.Context, data []interface{}) error
func (*SegmentPump) WriteDataRecord ¶
func (s *SegmentPump) WriteDataRecord(record analytics.AnalyticsRecord) error
type SplunkClient ¶
type SplunkClient struct { Token string CollectorURL string TLSSkipVerify bool // contains filtered or unexported fields }
SplunkClient contains Splunk client methods.
func NewSplunkClient ¶
func NewSplunkClient(token string, collectorURL string, skipVerify bool, certFile string, keyFile string, serverName string) (c *SplunkClient, err error)
NewSplunkClient initializes a new SplunkClient.
type SplunkPump ¶
type SplunkPump struct { CommonPumpConfig // contains filtered or unexported fields }
SplunkPump is a Tyk Pump driver for Splunk.
func (*SplunkPump) FilterTags ¶ added in v1.5.0
func (p *SplunkPump) FilterTags(filteredTags []string) []string
Filters the tags based on config rule
func (*SplunkPump) GetEnvPrefix ¶ added in v1.3.0
func (p *SplunkPump) GetEnvPrefix() string
func (*SplunkPump) Init ¶
func (p *SplunkPump) Init(config interface{}) error
Init performs the initialization of the SplunkClient.
type SplunkPumpConfig ¶
type SplunkPumpConfig struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // Address of the datadog agent including host & port. CollectorToken string `json:"collector_token" mapstructure:"collector_token"` // Endpoint the Pump will send analytics too. Should look something like: // `https://splunk:8088/services/collector/event`. CollectorURL string `json:"collector_url" mapstructure:"collector_url"` // Controls whether the pump client verifies the Splunk server's certificate chain and host name. SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify" mapstructure:"ssl_insecure_skip_verify"` // SSL cert file location. SSLCertFile string `json:"ssl_cert_file" mapstructure:"ssl_cert_file"` // SSL cert key location. SSLKeyFile string `json:"ssl_key_file" mapstructure:"ssl_key_file"` // SSL Server name used in the TLS connection. SSLServerName string `json:"ssl_server_name" mapstructure:"ssl_server_name"` // Controls whether the pump client should hide the API key. In case you still need substring // of the value, check the next option. Default value is `false`. ObfuscateAPIKeys bool `json:"obfuscate_api_keys" mapstructure:"obfuscate_api_keys"` // Define the number of the characters from the end of the API key. The `obfuscate_api_keys` // should be set to `true`. Default value is `0`. ObfuscateAPIKeysLength int `json:"obfuscate_api_keys_length" mapstructure:"obfuscate_api_keys_length"` // Define which Analytics fields should participate in the Splunk event. Check the available // fields in the example below. Default value is `["method", // "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id", // "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]`. Fields []string `json:"fields" mapstructure:"fields"` // Choose which tags to be ignored by the Splunk Pump. Keep in mind that the tag name and value // are hyphenated. Default value is `[]`. IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"` // If this is set to `true`, pump is going to send the analytics records in batch to Splunk. // Default value is `false`. EnableBatch bool `json:"enable_batch" mapstructure:"enable_batch"` // Max content length in bytes to be sent in batch requests. It should match the // `max_content_length` configured in Splunk. If the purged analytics records size don't reach // the amount of bytes, they're send anyways in each `purge_loop`. Default value is 838860800 // (~ 800 MB), the same default value as Splunk config. BatchMaxContentLength int `json:"batch_max_content_length" mapstructure:"batch_max_content_length"` }
SplunkPumpConfig contains the driver configuration parameters. @PumpConf Splunk
type StatsdConf ¶
type StatsdConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // Address of statsd including host & port. Address string `json:"address" mapstructure:"address"` // Define which Analytics fields should have its own metric calculation. Fields []string `json:"fields" mapstructure:"fields"` // List of tags to be added to the metric. Tags []string `json:"tags" mapstructure:"tags"` // Allows to have a separated method field instead of having it embedded in the path field. SeparatedMethod bool `json:"separated_method" mapstructure:"separated_method"` }
@PumpConf Statsd
type StatsdPump ¶
type StatsdPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*StatsdPump) GetEnvPrefix ¶ added in v1.3.0
func (s *StatsdPump) GetEnvPrefix() string
func (*StatsdPump) GetName ¶
func (s *StatsdPump) GetName() string
func (*StatsdPump) Init ¶
func (s *StatsdPump) Init(config interface{}) error
func (*StatsdPump) New ¶
func (s *StatsdPump) New() Pump
type StdOutConf ¶ added in v1.4.0
type StdOutConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` // Format of the analytics logs. Default is `text` if `json` is not explicitly specified. When // JSON logging is used all pump logs to stdout will be JSON. Format string `json:"format" mapstructure:"format"` // Root name of the JSON object the analytics record is nested in. LogFieldName string `json:"log_field_name" mapstructure:"log_field_name"` }
@PumpConf StdOut
type StdOutPump ¶ added in v1.4.0
type StdOutPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*StdOutPump) GetEnvPrefix ¶ added in v1.4.0
func (s *StdOutPump) GetEnvPrefix() string
func (*StdOutPump) GetName ¶ added in v1.4.0
func (s *StdOutPump) GetName() string
func (*StdOutPump) Init ¶ added in v1.4.0
func (s *StdOutPump) Init(config interface{}) error
func (*StdOutPump) New ¶ added in v1.4.0
func (s *StdOutPump) New() Pump
type SyslogConf ¶ added in v1.2.0
type SyslogConf struct { EnvPrefix string `json:"meta_env_prefix" mapstructure:"meta_env_prefix"` // Possible values are `udp, tcp, tls` in string form. Transport string `json:"transport" mapstructure:"transport"` // Host & Port combination of your syslog daemon ie: `"localhost:5140"`. NetworkAddr string `json:"network_addr" mapstructure:"network_addr"` // The severity level, an integer from 0-7, based off the Standard: // [Syslog Severity Levels](https://en.wikipedia.org/wiki/Syslog#Severity_level). LogLevel int `json:"log_level" mapstructure:"log_level"` // Prefix tag // // When working with FluentD, you should provide a // [FluentD Parser](https://docs.fluentd.org/input/syslog) based on the OS you are using so // that FluentD can correctly read the logs. // // “`{.json} // "syslog": { // "name": "syslog", // "meta": { // "transport": "udp", // "network_addr": "localhost:5140", // "log_level": 6, // "tag": "syslog-pump" // } // “` Tag string `json:"tag" mapstructure:"tag"` }
@PumpConf Syslog
type SyslogPump ¶ added in v1.2.0
type SyslogPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*SyslogPump) GetEnvPrefix ¶ added in v1.3.0
func (s *SyslogPump) GetEnvPrefix() string
func (*SyslogPump) GetFilters ¶ added in v1.2.0
func (s *SyslogPump) GetFilters() analytics.AnalyticsFilters
func (*SyslogPump) GetName ¶ added in v1.2.0
func (s *SyslogPump) GetName() string
func (*SyslogPump) GetTimeout ¶ added in v1.2.0
func (s *SyslogPump) GetTimeout() int
func (*SyslogPump) Init ¶ added in v1.2.0
func (s *SyslogPump) Init(config interface{}) error
func (*SyslogPump) New ¶ added in v1.2.0
func (s *SyslogPump) New() Pump
func (*SyslogPump) SetFilters ¶ added in v1.2.0
func (s *SyslogPump) SetFilters(filters analytics.AnalyticsFilters)
func (*SyslogPump) SetTimeout ¶ added in v1.2.0
func (s *SyslogPump) SetTimeout(timeout int)
type TimestreamPump ¶ added in v1.6.0
type TimestreamPump struct { CommonPumpConfig // contains filtered or unexported fields }
func (*TimestreamPump) BuildTimestreamInputIterator ¶ added in v1.6.0
func (t *TimestreamPump) BuildTimestreamInputIterator(data []interface{}) (func() (records []types.Record, hasNext bool), bool)
func (*TimestreamPump) GetAnalyticsRecordDimensions ¶ added in v1.6.0
func (t *TimestreamPump) GetAnalyticsRecordDimensions(decoded *analytics.AnalyticsRecord) (dimensions []types.Dimension)
func (*TimestreamPump) GetAnalyticsRecordMeasures ¶ added in v1.6.0
func (t *TimestreamPump) GetAnalyticsRecordMeasures(decoded *analytics.AnalyticsRecord) (measureValues []types.MeasureValue)
func (*TimestreamPump) GetEnvPrefix ¶ added in v1.6.0
func (t *TimestreamPump) GetEnvPrefix() string
func (*TimestreamPump) GetName ¶ added in v1.6.0
func (t *TimestreamPump) GetName() string
func (*TimestreamPump) Init ¶ added in v1.6.0
func (t *TimestreamPump) Init(config interface{}) error
func (*TimestreamPump) MapAnalyticRecord2TimestreamMultimeasureRecord ¶ added in v1.6.0
func (t *TimestreamPump) MapAnalyticRecord2TimestreamMultimeasureRecord(decoded *analytics.AnalyticsRecord) types.Record
func (*TimestreamPump) New ¶ added in v1.6.0
func (t *TimestreamPump) New() Pump
func (*TimestreamPump) NewTimesteramWriter ¶ added in v1.6.0
func (t *TimestreamPump) NewTimesteramWriter() (c *timestreamwrite.Client, err error)
type TimestreamPumpConf ¶ added in v1.6.0
type TimestreamPumpConf struct { EnvPrefix string `mapstructure:"meta_env_prefix"` //The aws region that contains the timestream database AWSRegion string `mapstructure:"aws_region"` //The table name where the data is going to be written TableName string `mapstructure:"timestream_table_name"` //The timestream database name that contains the table being written to DatabaseName string `mapstructure:"timestream_database_name"` //A filter of all the dimensions that will be written to the table. The possible options are //["Method","Host","Path","RawPath","APIKey","APIVersion","APIName","APIID","OrgID","OauthID"] Dimensions []string `mapstructure:"dimensions"` //A filter of all the measures that will be written to the table. The possible options are //["ContentLength","ResponseCode","RequestTime","NetworkStats.OpenConnections", //"NetworkStats.ClosedConnection","NetworkStats.BytesIn","NetworkStats.BytesOut", //"Latency.Total","Latency.Upstream","GeoData.City.GeoNameID","IPAddress", //"GeoData.Location.Latitude","GeoData.Location.Longitude","UserAgent","RawRequest","RawResponse", //"RateLimit.Limit","Ratelimit.Remaining","Ratelimit.Reset", //"GeoData.Country.ISOCode","GeoData.City.Names","GeoData.Location.TimeZone"] Measures []string `mapstructure:"measures"` //Set to true in order to save any of the `RateLimit` measures. Default value is `false`. WriteRateLimit bool `mapstructure:"write_rate_limit"` //If set true, we will try to read geo information from the headers if //values aren't found on the analytic record . Default value is `false`. ReadGeoFromRequest bool `mapstructure:"read_geo_from_request"` //Set to true, in order to save numerical values with value zero. Default value is `false`. WriteZeroValues bool `mapstructure:"write_zero_values"` //A name mapping for both Dimensions and Measures names. It's not required NameMappings map[string]string `mapstructure:"field_name_mappings"` }
@PumpConf Timesteram
type TimestreamWriteRecordsAPI ¶ added in v1.6.0
type TimestreamWriteRecordsAPI interface {
WriteRecords(ctx context.Context, params *timestreamwrite.WriteRecordsInput, optFns ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error)
}
type UptimePump ¶ added in v1.5.0
Source Files ¶
- common.go
- csv.go
- dogstatsd.go
- dummy.go
- elasticsearch.go
- graylog.go
- hybrid.go
- influx.go
- influx2.go
- init.go
- kafka.go
- logzio.go
- moesif.go
- mongo.go
- mongo_aggregate.go
- mongo_selective.go
- prometheus.go
- pump.go
- segment.go
- splunk.go
- sql.go
- sql_aggregate.go
- statsd.go
- stdout.go
- syslog.go
- timestream.go
- version.go