pumps

package
v1.11.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 8, 2024 License: MPL-2.0 Imports: 87 Imported by: 1

Documentation

Index

Constants

View Source
const (
	GraphSQLPrefix = "GraphSQL-Pump"
	GraphSQLTable  = "tyk_analytics_graph"
)
View Source
const (
	LogzioPumpPrefix = "logzio-pump"
	LogzioPumpName   = "Logzio Pump"
)
View Source
const (
	KiB = 1 << (10 * iota)
	MiB
	GiB
	TiB
)
View Source
const (
	AWSDBError    = 303
	CosmosDBError = 115
)
View Source
const PUMPS_ENV_META_PREFIX = "_META"
View Source
const PUMPS_ENV_PREFIX = "TYK_PMP_PUMPS"

Variables

View Source
var (
	DefaultRPCCallTimeout = 10
	ErrRPCLogin           = errors.New("RPC login incorrect")
)
View Source
var (
	ThresholdLenTagList = 1000
	CommonTagsCount     = 5
)
View Source
var (
	SQLPrefix                = "SQL-pump"
	SQLDefaultENV            = PUMPS_ENV_PREFIX + "_SQL" + PUMPS_ENV_META_PREFIX
	SQLDefaultQueryBatchSize = 1000
)
View Source
var (
	SQLAggregatePumpPrefix = "SQL-aggregate-pump"
	SQLAggregateDefaultENV = PUMPS_ENV_PREFIX + "_SQLAGGREGATE" + PUMPS_ENV_META_PREFIX
)
View Source
var (
	SQSPrefix     = "sqs-pump"
	SQSDefaultENV = PUMPS_ENV_PREFIX + "_SQS" + PUMPS_ENV_META_PREFIX
)
View Source
var (
	Version   = "v1.11.0"
	BuiltBy   string
	Commit    string
	BuildDate string
)
View Source
var AvailablePumps map[string]Pump
View Source
var GraphSQLDefaultENV = PUMPS_ENV_PREFIX + "_GRAPH_SQL" + PUMPS_ENV_META_PREFIX
View Source
var SQLGraphAggregateDefaultENV = PUMPS_ENV_PREFIX + "_SQLGRAPHAGGREGATE" + PUMPS_ENV_META_PREFIX

Functions

func Dialect added in v1.5.0

func Dialect(cfg *SQLConf) (gorm.Dialector, error)

func LoadHeadersFromRawRequest added in v1.6.0

func LoadHeadersFromRawRequest(rawRequest string) (http.Header, error)

func LoadHeadersFromRawResponse added in v1.6.0

func LoadHeadersFromRawResponse(rawResponse string) (http.Header, error)

func Min added in v1.6.0

func Min(a, b int) int

func NewLogzioClient

func NewLogzioClient(conf *LogzioPumpConfig) (*lg.LogzioSender, error)

Types

type ApiKeyTransport added in v1.2.0

type ApiKeyTransport struct {
	APIKey   string
	APIKeyID string
}

func (*ApiKeyTransport) RoundTrip added in v1.2.0

func (t *ApiKeyTransport) RoundTrip(r *http.Request) (*http.Response, error)

RoundTrip for ApiKeyTransport auth

type BaseMongoConf added in v1.0.0

type BaseMongoConf struct {
	// Prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_MONGO_META` for Mongo Pump
	// `TYK_PMP_PUMPS_UPTIME_META` for Uptime Pump
	// `TYK_PMP_PUMPS_MONGOAGGREGATE_META` for Mongo Aggregate Pump
	// `TYK_PMP_PUMPS_MONGOSELECTIVE_META` for Mongo Selective Pump
	// `TYK_PMP_PUMPS_MONGOGRAPH_META` for Mongo Graph Pump.
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// The full URL to your MongoDB instance, this can be a clustered instance if necessary and
	// should include the database and username / password data.
	MongoURL string `json:"mongo_url" mapstructure:"mongo_url"`
	// Set to true to enable Mongo SSL connection.
	MongoUseSSL bool `json:"mongo_use_ssl" mapstructure:"mongo_use_ssl"`
	// Allows the use of self-signed certificates when connecting to an encrypted MongoDB database.
	MongoSSLInsecureSkipVerify bool `json:"mongo_ssl_insecure_skip_verify" mapstructure:"mongo_ssl_insecure_skip_verify"`
	// Ignore hostname check when it differs from the original (for example with SSH tunneling).
	// The rest of the TLS verification will still be performed.
	MongoSSLAllowInvalidHostnames bool `json:"mongo_ssl_allow_invalid_hostnames" mapstructure:"mongo_ssl_allow_invalid_hostnames"`
	// Path to the PEM file with trusted root certificates
	MongoSSLCAFile string `json:"mongo_ssl_ca_file" mapstructure:"mongo_ssl_ca_file"`
	// Path to the PEM file which contains both client certificate and private key. This is
	// required for Mutual TLS.
	MongoSSLPEMKeyfile string `json:"mongo_ssl_pem_keyfile" mapstructure:"mongo_ssl_pem_keyfile"`
	// Specifies the mongo DB Type. If it's 0, it means that you are using standard mongo db. If it's 1 it means you are using AWS Document DB. If it's 2, it means you are using CosmosDB.
	// Defaults to Standard mongo (0).
	MongoDBType MongoType `json:"mongo_db_type" mapstructure:"mongo_db_type"`
	// Set to true to disable the default tyk index creation.
	OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"`
	// Set the consistency mode for the session, it defaults to `Strong`. The valid values are: strong, monotonic, eventual.
	MongoSessionConsistency string `json:"mongo_session_consistency" mapstructure:"mongo_session_consistency"`
	// MongoDriverType is the type of the driver (library) to use. The valid values are: “mongo-go” and “mgo”.
	// Since v1.9, the default driver is "mongo-go". Check out this guide to [learn about MongoDB drivers supported by Tyk Pump](https://github.com/TykTechnologies/tyk-pump#driver-type).
	MongoDriverType string `json:"driver" mapstructure:"driver"`
	// MongoDirectConnection informs whether to establish connections only with the specified seed servers,
	// or to obtain information for the whole cluster and establish connections with further servers too.
	// If true, the client will only connect to the host provided in the ConnectionString
	// and won't attempt to discover other hosts in the cluster. Useful when network restrictions
	// prevent discovery, such as with SSH tunneling. Default is false.
	MongoDirectConnection bool `json:"mongo_direct_connection" mapstructure:"mongo_direct_connection"`
}

func (*BaseMongoConf) GetBlurredURL added in v1.2.0

func (b *BaseMongoConf) GetBlurredURL() string

type CSVConf

type CSVConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_CSV_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// The directory and the filename where the CSV data will be stored.
	CSVDir string `json:"csv_dir" mapstructure:"csv_dir"`
}

@PumpConf CSV

type CSVPump

type CSVPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*CSVPump) GetEnvPrefix added in v1.3.0

func (c *CSVPump) GetEnvPrefix() string

func (*CSVPump) GetName

func (c *CSVPump) GetName() string

func (*CSVPump) Init

func (c *CSVPump) Init(conf interface{}) error

func (*CSVPump) New

func (c *CSVPump) New() Pump

func (*CSVPump) WriteData

func (c *CSVPump) WriteData(ctx context.Context, data []interface{}) error

type CommonPumpConfig added in v1.2.0

type CommonPumpConfig struct {
	OmitDetailedRecording bool
	// contains filtered or unexported fields
}

func (*CommonPumpConfig) GetDecodedRequest added in v1.8.0

func (p *CommonPumpConfig) GetDecodedRequest() bool

func (*CommonPumpConfig) GetDecodedResponse added in v1.8.0

func (p *CommonPumpConfig) GetDecodedResponse() bool

func (*CommonPumpConfig) GetEnvPrefix added in v1.3.0

func (p *CommonPumpConfig) GetEnvPrefix() string

func (*CommonPumpConfig) GetFilters added in v1.2.0

func (*CommonPumpConfig) GetIgnoreFields added in v1.7.0

func (p *CommonPumpConfig) GetIgnoreFields() []string

func (*CommonPumpConfig) GetMaxRecordSize added in v1.5.0

func (p *CommonPumpConfig) GetMaxRecordSize() int

func (*CommonPumpConfig) GetOmitDetailedRecording added in v1.2.0

func (p *CommonPumpConfig) GetOmitDetailedRecording() bool

func (*CommonPumpConfig) GetTimeout added in v1.2.0

func (p *CommonPumpConfig) GetTimeout() int

func (*CommonPumpConfig) SetDecodingRequest added in v1.8.0

func (p *CommonPumpConfig) SetDecodingRequest(decoding bool)

func (*CommonPumpConfig) SetDecodingResponse added in v1.8.0

func (p *CommonPumpConfig) SetDecodingResponse(decoding bool)

func (*CommonPumpConfig) SetFilters added in v1.2.0

func (p *CommonPumpConfig) SetFilters(filters analytics.AnalyticsFilters)

func (*CommonPumpConfig) SetIgnoreFields added in v1.7.0

func (p *CommonPumpConfig) SetIgnoreFields(fields []string)

func (*CommonPumpConfig) SetLogLevel added in v1.7.0

func (p *CommonPumpConfig) SetLogLevel(level logrus.Level)

func (*CommonPumpConfig) SetMaxRecordSize added in v1.5.0

func (p *CommonPumpConfig) SetMaxRecordSize(size int)

func (*CommonPumpConfig) SetOmitDetailedRecording added in v1.2.0

func (p *CommonPumpConfig) SetOmitDetailedRecording(OmitDetailedRecording bool)

func (*CommonPumpConfig) SetTimeout added in v1.2.0

func (p *CommonPumpConfig) SetTimeout(timeout int)

func (*CommonPumpConfig) Shutdown added in v1.5.0

func (p *CommonPumpConfig) Shutdown() error

type CustomMetrics added in v1.7.0

type CustomMetrics []PrometheusMetric

func (*CustomMetrics) Set added in v1.7.0

func (metrics *CustomMetrics) Set(data string) error

type DogStatsdConf

type DogStatsdConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_DOGSTATSD_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Prefix for your metrics to datadog.
	Namespace string `json:"namespace" mapstructure:"namespace"`
	// Address of the datadog agent including host & port.
	Address string `json:"address" mapstructure:"address"`
	// Defaults to `1` which equates to `100%` of requests. To sample at `50%`, set to `0.5`.
	SampleRate float64 `json:"sample_rate" mapstructure:"sample_rate"`
	// Enable async UDS over UDP https://github.com/Datadog/datadog-go#unix-domain-sockets-client.
	AsyncUDS bool `json:"async_uds" mapstructure:"async_uds"`
	// Integer write timeout in seconds if `async_uds: true`.
	AsyncUDSWriteTimeout int `json:"async_uds_write_timeout_seconds" mapstructure:"async_uds_write_timeout_seconds"`
	// Enable buffering of messages.
	Buffered bool `json:"buffered" mapstructure:"buffered"`
	// Max messages in single datagram if `buffered: true`. Default 16.
	BufferedMaxMessages int `json:"buffered_max_messages" mapstructure:"buffered_max_messages"`
	// List of tags to be added to the metric. The possible options are listed in the below example.
	//
	// If no tag is specified the fallback behavior is to use the below tags:
	// - `path`
	// - `method`
	// - `response_code`
	// - `api_version`
	// - `api_name`
	// - `api_id`
	// - `org_id`
	// - `tracked`
	// - `oauth_id`
	//
	// Note that this configuration can generate significant charges due to the unbound nature of
	// the `path` tag.
	//
	// “`{.json}
	// "dogstatsd": {
	//   "type": "dogstatsd",
	//   "meta": {
	//     "address": "localhost:8125",
	//     "namespace": "pump",
	//     "async_uds": true,
	//     "async_uds_write_timeout_seconds": 2,
	//     "buffered": true,
	//     "buffered_max_messages": 32,
	//     "sample_rate": 0.5,
	//     "tags": [
	//       "method",
	//       "response_code",
	//       "api_version",
	//       "api_name",
	//       "api_id",
	//       "org_id",
	//       "tracked",
	//       "path",
	//       "oauth_id"
	//     ]
	//   }
	// },
	// “`
	//
	// On startup, you should see the loaded configs when initializing the dogstatsd pump
	// “`
	// [May 10 15:23:44]  INFO dogstatsd: initializing pump
	// [May 10 15:23:44]  INFO dogstatsd: namespace: pump.
	// [May 10 15:23:44]  INFO dogstatsd: sample_rate: 50%
	// [May 10 15:23:44]  INFO dogstatsd: buffered: true, max_messages: 32
	// [May 10 15:23:44]  INFO dogstatsd: async_uds: true, write_timeout: 2s
	// “`
	Tags []string `json:"tags" mapstructure:"tags"`
}

@PumpConf DogStatsd

type DogStatsdPump

type DogStatsdPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*DogStatsdPump) GetEnvPrefix added in v1.3.0

func (s *DogStatsdPump) GetEnvPrefix() string

func (*DogStatsdPump) GetName

func (s *DogStatsdPump) GetName() string

func (*DogStatsdPump) Init

func (s *DogStatsdPump) Init(conf interface{}) error

func (*DogStatsdPump) New

func (s *DogStatsdPump) New() Pump

func (*DogStatsdPump) Shutdown added in v1.5.0

func (s *DogStatsdPump) Shutdown() error

func (*DogStatsdPump) WriteData

func (s *DogStatsdPump) WriteData(ctx context.Context, data []interface{}) error

type DummyPump

type DummyPump struct {
	CommonPumpConfig
}

func (*DummyPump) GetName

func (p *DummyPump) GetName() string

func (*DummyPump) Init

func (p *DummyPump) Init(conf interface{}) error

func (*DummyPump) New

func (p *DummyPump) New() Pump

func (*DummyPump) WriteData

func (p *DummyPump) WriteData(ctx context.Context, data []interface{}) error

type Elasticsearch3Operator

type Elasticsearch3Operator struct {
	// contains filtered or unexported fields
}

type Elasticsearch5Operator

type Elasticsearch5Operator struct {
	// contains filtered or unexported fields
}

type Elasticsearch6Operator

type Elasticsearch6Operator struct {
	// contains filtered or unexported fields
}

type Elasticsearch7Operator added in v1.5.0

type Elasticsearch7Operator struct {
	// contains filtered or unexported fields
}

type ElasticsearchBulkConfig added in v1.0.0

type ElasticsearchBulkConfig struct {
	// Number of workers. Defaults to 1.
	Workers int `json:"workers" mapstructure:"workers"`
	// Specifies the time in seconds to flush the data and send it to ES. Default disabled.
	FlushInterval int `json:"flush_interval" mapstructure:"flush_interval"`
	// Specifies the number of requests needed to flush the data and send it to ES. Defaults to
	// 1000 requests. If it is needed, can be disabled with -1.
	BulkActions int `json:"bulk_actions" mapstructure:"bulk_actions"`
	// Specifies the size (in bytes) needed to flush the data and send it to ES. Defaults to 5MB.
	// If it is needed, can be disabled with -1.
	BulkSize int `json:"bulk_size" mapstructure:"bulk_size"`
}

type ElasticsearchConf

type ElasticsearchConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_ELASTICSEARCH_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// The name of the index that all the analytics data will be placed in. Defaults to
	// "tyk_analytics".
	IndexName string `json:"index_name" mapstructure:"index_name"`
	// If sniffing is disabled, the URL that all data will be sent to. Defaults to
	// "http://localhost:9200".
	ElasticsearchURL string `json:"elasticsearch_url" mapstructure:"elasticsearch_url"`
	// If sniffing is enabled, the "elasticsearch_url" will be used to make a request to get a
	// list of all the nodes in the cluster, the returned addresses will then be used. Defaults to
	// `false`.
	EnableSniffing bool `json:"use_sniffing" mapstructure:"use_sniffing"`
	// The type of the document that is created in ES. Defaults to "tyk_analytics".
	DocumentType string `json:"document_type" mapstructure:"document_type"`
	// Appends the date to the end of the index name, so each days data is split into a different
	// index name. E.g. tyk_analytics-2016.02.28. Defaults to `false`.
	RollingIndex bool `json:"rolling_index" mapstructure:"rolling_index"`
	// If set to `true` will include the following additional fields: Raw Request, Raw Response and
	// User Agent.
	ExtendedStatistics bool `json:"extended_stats" mapstructure:"extended_stats"`
	// When enabled, generate _id for outgoing records. This prevents duplicate records when
	// retrying ES.
	GenerateID bool `json:"generate_id" mapstructure:"generate_id"`
	// Allows for the base64 bits to be decode before being passed to ES.
	DecodeBase64 bool `json:"decode_base64" mapstructure:"decode_base64"`
	// Specifies the ES version. Use "3" for ES 3.X, "5" for ES 5.X, "6" for ES 6.X, "7" for ES
	// 7.X . Defaults to "3".
	Version string `json:"version" mapstructure:"version"`
	// Disable batch writing. Defaults to false.
	DisableBulk bool `json:"disable_bulk" mapstructure:"disable_bulk"`
	// Batch writing trigger configuration. Each option is an OR with eachother:
	BulkConfig ElasticsearchBulkConfig `json:"bulk_config" mapstructure:"bulk_config"`
	// API Key ID used for APIKey auth in ES. It's send to ES in the Authorization header as ApiKey base64(auth_api_key_id:auth_api_key)
	AuthAPIKeyID string `json:"auth_api_key_id" mapstructure:"auth_api_key_id"`
	// API Key used for APIKey auth in ES. It's send to ES in the Authorization header as ApiKey base64(auth_api_key_id:auth_api_key)
	AuthAPIKey string `json:"auth_api_key" mapstructure:"auth_api_key"`
	// Basic auth username. It's send to ES in the Authorization header as username:password encoded in base64.
	Username string `json:"auth_basic_username" mapstructure:"auth_basic_username"`
	// Basic auth password. It's send to ES in the Authorization header as username:password encoded in base64.
	Password string `json:"auth_basic_password" mapstructure:"auth_basic_password"`
	// Enables SSL connection.
	UseSSL bool `json:"use_ssl" mapstructure:"use_ssl"`
	// Controls whether the pump client verifies the Elastic Search server's certificate chain and hostname.
	SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify" mapstructure:"ssl_insecure_skip_verify"`
	// Can be used to set custom certificate file for authentication with Elastic Search.
	SSLCertFile string `json:"ssl_cert_file" mapstructure:"ssl_cert_file"`
	// Can be used to set custom key file for authentication with Elastic Search.
	SSLKeyFile string `json:"ssl_key_file" mapstructure:"ssl_key_file"`
}

@PumpConf Elasticsearch

type ElasticsearchOperator

type ElasticsearchOperator interface {
	// contains filtered or unexported methods
}

type ElasticsearchPump

type ElasticsearchPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*ElasticsearchPump) GetEnvPrefix added in v1.3.0

func (e *ElasticsearchPump) GetEnvPrefix() string

func (*ElasticsearchPump) GetName

func (e *ElasticsearchPump) GetName() string

func (*ElasticsearchPump) GetTLSConfig added in v1.7.0

func (e *ElasticsearchPump) GetTLSConfig() (*tls.Config, error)

GetTLSConfig sets the TLS config for the pump

func (*ElasticsearchPump) Init

func (e *ElasticsearchPump) Init(config interface{}) error

func (*ElasticsearchPump) New

func (e *ElasticsearchPump) New() Pump

func (*ElasticsearchPump) Shutdown added in v1.5.0

func (e *ElasticsearchPump) Shutdown() error

func (*ElasticsearchPump) WriteData

func (e *ElasticsearchPump) WriteData(ctx context.Context, data []interface{}) error

type GraphMongoPump added in v1.7.0

type GraphMongoPump struct {
	CommonPumpConfig
	MongoPump
}

func (*GraphMongoPump) GetEnvPrefix added in v1.7.0

func (g *GraphMongoPump) GetEnvPrefix() string

func (*GraphMongoPump) GetName added in v1.7.0

func (g *GraphMongoPump) GetName() string

func (*GraphMongoPump) Init added in v1.7.0

func (g *GraphMongoPump) Init(config interface{}) error

func (*GraphMongoPump) New added in v1.7.0

func (g *GraphMongoPump) New() Pump

func (*GraphMongoPump) SetDecodingRequest added in v1.8.0

func (g *GraphMongoPump) SetDecodingRequest(decoding bool)

func (*GraphMongoPump) SetDecodingResponse added in v1.8.0

func (g *GraphMongoPump) SetDecodingResponse(decoding bool)

func (*GraphMongoPump) WriteData added in v1.7.0

func (g *GraphMongoPump) WriteData(ctx context.Context, data []interface{}) error

type GraphSQLAggregatePump added in v1.8.0

type GraphSQLAggregatePump struct {
	SQLConf *SQLAggregatePumpConf

	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*GraphSQLAggregatePump) DoAggregatedWriting added in v1.8.0

func (s *GraphSQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID, apiID string, ag *analytics.GraphRecordAggregate) error

func (*GraphSQLAggregatePump) GetEnvPrefix added in v1.9.0

func (s *GraphSQLAggregatePump) GetEnvPrefix() string

func (*GraphSQLAggregatePump) GetName added in v1.8.0

func (s *GraphSQLAggregatePump) GetName() string

func (*GraphSQLAggregatePump) Init added in v1.8.0

func (s *GraphSQLAggregatePump) Init(conf interface{}) error

func (*GraphSQLAggregatePump) New added in v1.8.0

func (s *GraphSQLAggregatePump) New() Pump

func (*GraphSQLAggregatePump) WriteData added in v1.8.0

func (s *GraphSQLAggregatePump) WriteData(ctx context.Context, data []interface{}) error

type GraphSQLConf added in v1.8.0

type GraphSQLConf struct {
	// TableName is a configuration field unique to the sql-graph pump, this field specifies
	// the name of the sql table to be created/used for the pump in the cases of non-sharding
	// in the case of sharding, it specifies the table prefix
	TableName string `json:"table_name" mapstructure:"table_name"`

	SQLConf `mapstructure:",squash"`
}

type GraphSQLPump added in v1.8.0

type GraphSQLPump struct {
	Conf *GraphSQLConf

	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*GraphSQLPump) GetEnvPrefix added in v1.8.0

func (g *GraphSQLPump) GetEnvPrefix() string

func (*GraphSQLPump) GetName added in v1.8.0

func (g *GraphSQLPump) GetName() string

func (*GraphSQLPump) Init added in v1.8.0

func (g *GraphSQLPump) Init(conf interface{}) error

func (*GraphSQLPump) New added in v1.8.0

func (g *GraphSQLPump) New() Pump

func (*GraphSQLPump) SetLogLevel added in v1.8.0

func (g *GraphSQLPump) SetLogLevel(level logrus.Level)

func (*GraphSQLPump) WriteData added in v1.8.0

func (g *GraphSQLPump) WriteData(ctx context.Context, data []interface{}) error

type GraylogConf

type GraylogConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_GRAYLOG_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Graylog host.
	GraylogHost string `json:"host" mapstructure:"host"`
	// Graylog port.
	GraylogPort int `json:"port" mapstructure:"port"`
	// List of tags to be added to the metric. The possible options are listed in the below example.
	//
	// If no tag is specified the fallback behaviour is to don't send anything.
	// The possible values are:
	// - `path`
	// - `method`
	// - `response_code`
	// - `api_version`
	// - `api_name`
	// - `api_id`
	// - `org_id`
	// - `tracked`
	// - `oauth_id`
	// - `raw_request`
	// - `raw_response`
	// - `request_time`
	// - `ip_address`
	Tags []string `json:"tags" mapstructure:"tags"`
}

@PumpConf Graylog

type GraylogPump

type GraylogPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*GraylogPump) GetEnvPrefix added in v1.3.0

func (p *GraylogPump) GetEnvPrefix() string

func (*GraylogPump) GetName

func (p *GraylogPump) GetName() string

func (*GraylogPump) Init

func (p *GraylogPump) Init(conf interface{}) error

func (*GraylogPump) New

func (p *GraylogPump) New() Pump

func (*GraylogPump) WriteData

func (p *GraylogPump) WriteData(ctx context.Context, data []interface{}) error

type HybridPump

type HybridPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

HybridPump allows to send analytics to MDCB over RPC

func (*HybridPump) GetName

func (p *HybridPump) GetName() string

func (*HybridPump) Init

func (p *HybridPump) Init(config interface{}) error

func (*HybridPump) New

func (p *HybridPump) New() Pump

func (*HybridPump) RPCLogin added in v1.8.0

func (p *HybridPump) RPCLogin() error

func (*HybridPump) Shutdown added in v1.8.0

func (p *HybridPump) Shutdown() error

func (*HybridPump) WriteData

func (p *HybridPump) WriteData(ctx context.Context, data []interface{}) error

type HybridPumpConf added in v1.8.0

type HybridPumpConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_HYBRID_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`

	// MDCB URL connection string
	ConnectionString string `mapstructure:"connection_string"`
	// Your organization ID to connect to the MDCB installation.
	RPCKey string `mapstructure:"rpc_key"`
	// This the API key of a user used to authenticate and authorize the Hybrid Pump access through MDCB.
	// The user should be a standard Dashboard user with minimal privileges so as to reduce any risk if the user is compromised.
	APIKey string `mapstructure:"api_key"`

	// Specifies prefixes of tags that should be ignored if `aggregated` is set to `true`.
	IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"`

	// Hybrid pump RPC calls timeout in seconds. Defaults to `10` seconds.
	CallTimeout int `mapstructure:"call_timeout"`
	// Hybrid pump connection pool size. Defaults to `5`.
	RPCPoolSize int `mapstructure:"rpc_pool_size"`

	// Send aggregated analytics data to Tyk MDCB
	Aggregated bool `mapstructure:"aggregated"`
	// Specifies if it should store aggregated data for all the endpoints if `aggregated` is set to `true`. By default, `false`
	// which means that only store aggregated data for `tracked endpoints`.
	TrackAllPaths bool `mapstructure:"track_all_paths"`
	// Determines if the aggregations should be made per minute (true) or per hour (false) if `aggregated` is set to `true`.
	StoreAnalyticsPerMinute bool `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"`

	// Use SSL to connect to Tyk MDCB
	UseSSL bool `mapstructure:"use_ssl"`
	// Skip SSL verification
	SSLInsecureSkipVerify bool `mapstructure:"ssl_insecure_skip_verify"`
	// contains filtered or unexported fields
}

@PumpConf Hybrid

func (*HybridPumpConf) CheckDefaults added in v1.8.0

func (conf *HybridPumpConf) CheckDefaults()

type Influx2Conf added in v1.5.1

type Influx2Conf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_INFLUX2_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// InfluxDB2 pump bucket name.
	BucketName string `mapstructure:"bucket" json:"bucket"`
	// InfluxDB2 pump organization name.
	OrgName string `mapstructure:"organization" json:"organization"`
	// InfluxDB2 pump host.
	Addr string `mapstructure:"address" json:"address"`
	// InfluxDB2 pump database token.
	Token string `mapstructure:"token" json:"token"`
	// Define which Analytics fields should be sent to InfluxDB2. Check the available
	// fields in the example below. Default value is `["method",
	// "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id",
	// "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]`.
	Fields []string `mapstructure:"fields" json:"fields"`
	// List of tags to be added to the metric.
	Tags []string `mapstructure:"tags" json:"tags"`
	// Flush data to InfluxDB2 as soon as the pump receives it
	Flush bool `mapstructure:"flush" json:"flush"`
	// Create the bucket if it doesn't exist
	CreateMissingBucket bool `mapstructure:"create_missing_bucket" json:"create_missing_bucket"`
	// New bucket configuration
	NewBucketConfig NewBucket `mapstructure:"new_bucket_config" json:"new_bucket_config"`
}

@PumpConf Influx2

type Influx2Pump added in v1.5.1

type Influx2Pump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*Influx2Pump) GetEnvPrefix added in v1.5.1

func (i *Influx2Pump) GetEnvPrefix() string

func (*Influx2Pump) GetName added in v1.5.1

func (i *Influx2Pump) GetName() string

func (*Influx2Pump) Init added in v1.5.1

func (i *Influx2Pump) Init(config interface{}) error

func (*Influx2Pump) New added in v1.5.1

func (i *Influx2Pump) New() Pump

func (*Influx2Pump) Shutdown added in v1.5.1

func (i *Influx2Pump) Shutdown() error

func (*Influx2Pump) WriteData added in v1.5.1

func (i *Influx2Pump) WriteData(ctx context.Context, data []interface{}) error

type InfluxConf

type InfluxConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_INFLUX_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// InfluxDB pump database name.
	DatabaseName string `json:"database_name" mapstructure:"database_name"`
	// InfluxDB pump host.
	Addr string `json:"address" mapstructure:"address"`
	// InfluxDB pump database username.
	Username string `json:"username" mapstructure:"username"`
	// InfluxDB pump database password.
	Password string `json:"password" mapstructure:"password"`
	// Define which Analytics fields should be sent to InfluxDB. Check the available
	// fields in the example below. Default value is `["method",
	// "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id",
	// "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]`.
	Fields []string `json:"fields" mapstructure:"fields"`
	// List of tags to be added to the metric.
	Tags []string `json:"tags" mapstructure:"tags"`
}

@PumpConf Influx

type InfluxPump

type InfluxPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*InfluxPump) GetEnvPrefix added in v1.3.0

func (i *InfluxPump) GetEnvPrefix() string

func (*InfluxPump) GetName

func (i *InfluxPump) GetName() string

func (*InfluxPump) Init

func (i *InfluxPump) Init(config interface{}) error

func (*InfluxPump) New

func (i *InfluxPump) New() Pump

func (*InfluxPump) WriteData

func (i *InfluxPump) WriteData(ctx context.Context, data []interface{}) error

type Json

type Json map[string]interface{}

type KafkaConf

type KafkaConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_KAFKA_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// The list of brokers used to discover the partitions available on the kafka cluster. E.g.
	// "localhost:9092".
	Broker []string `json:"broker" mapstructure:"broker"`
	// Unique identifier for client connections established with Kafka.
	ClientId string `json:"client_id" mapstructure:"client_id"`
	// The topic that the writer will produce messages to.
	Topic string `json:"topic" mapstructure:"topic"`
	// Timeout is the maximum amount of seconds to wait for a connect or write to complete.
	Timeout interface{} `json:"timeout" mapstructure:"timeout"`
	// Enable "github.com/golang/snappy" codec to be used to compress Kafka messages. By default
	// is `false`.
	Compressed bool `json:"compressed" mapstructure:"compressed"`
	// Can be used to set custom metadata inside the kafka message.
	MetaData map[string]string `json:"meta_data" mapstructure:"meta_data"`
	// Enables SSL connection.
	UseSSL bool `json:"use_ssl" mapstructure:"use_ssl"`
	// Controls whether the pump client verifies the kafka server's certificate chain and host
	// name.
	SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify" mapstructure:"ssl_insecure_skip_verify"`
	// Can be used to set custom certificate file for authentication with kafka.
	SSLCertFile string `json:"ssl_cert_file" mapstructure:"ssl_cert_file"`
	// Can be used to set custom key file for authentication with kafka.
	SSLKeyFile string `json:"ssl_key_file" mapstructure:"ssl_key_file"`
	// SASL mechanism configuration. Only "plain" and "scram" are supported.
	SASLMechanism string `json:"sasl_mechanism" mapstructure:"sasl_mechanism"`
	// SASL username.
	Username string `json:"sasl_username" mapstructure:"sasl_username"`
	// SASL password.
	Password string `json:"sasl_password" mapstructure:"sasl_password"`
	// SASL algorithm. It's the algorithm specified for scram mechanism. It could be sha-512 or sha-256.
	// Defaults to "sha-256".
	Algorithm string `json:"sasl_algorithm" mapstructure:"sasl_algorithm"`
}

@PumpConf Kafka

type KafkaPump

type KafkaPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*KafkaPump) GetEnvPrefix added in v1.3.0

func (k *KafkaPump) GetEnvPrefix() string

func (*KafkaPump) GetName

func (k *KafkaPump) GetName() string

func (*KafkaPump) Init

func (k *KafkaPump) Init(config interface{}) error

func (*KafkaPump) New

func (k *KafkaPump) New() Pump

func (*KafkaPump) WriteData

func (k *KafkaPump) WriteData(ctx context.Context, data []interface{}) error

type KinesisConf added in v1.11.0

type KinesisConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_KINESIS_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// A name to identify the stream. The stream name is scoped to the AWS account used by the application
	// that creates the stream. It is also scoped by AWS Region.
	// That is, two streams in two different AWS accounts can have the same name.
	// Two streams in the same AWS account but in two different Regions can also have the same name.
	StreamName string `mapstructure:"stream_name"`
	// AWS Region the Kinesis stream targets
	Region string `mapstructure:"region"`
	// Each PutRecords (the function used in this pump)request can support up to 500 records.
	// Each record in the request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, including partition keys.
	// Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second.
	BatchSize int `mapstructure:"batch_size"`
}

@PumpConf Kinesis

type KinesisPump added in v1.11.0

type KinesisPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

KinesisPump is a Tyk Pump that sends analytics records to AWS Kinesis.

func (*KinesisPump) GetEnvPrefix added in v1.11.0

func (p *KinesisPump) GetEnvPrefix() string

func (*KinesisPump) GetName added in v1.11.0

func (p *KinesisPump) GetName() string

GetName returns the name of the pump.

func (*KinesisPump) Init added in v1.11.0

func (p *KinesisPump) Init(config interface{}) error

Init initializes the pump with configuration settings.

func (*KinesisPump) New added in v1.11.0

func (p *KinesisPump) New() Pump

func (*KinesisPump) WriteData added in v1.11.0

func (p *KinesisPump) WriteData(ctx context.Context, records []interface{}) error

WriteData writes the analytics records to AWS Kinesis in batches.

type LogzioPump

type LogzioPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*LogzioPump) GetEnvPrefix added in v1.3.0

func (p *LogzioPump) GetEnvPrefix() string

func (*LogzioPump) GetName

func (p *LogzioPump) GetName() string

func (*LogzioPump) Init

func (p *LogzioPump) Init(config interface{}) error

func (*LogzioPump) New

func (p *LogzioPump) New() Pump

func (*LogzioPump) WriteData

func (p *LogzioPump) WriteData(ctx context.Context, data []interface{}) error

type LogzioPumpConfig

type LogzioPumpConfig struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_LOGZIO_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Set the sender to check if it crosses the maximum allowed disk usage. Default value is
	// `true`.
	CheckDiskSpace bool `json:"check_disk_space" mapstructure:"check_disk_space"`
	// Set disk queue threshold, once the threshold is crossed the sender will not enqueue the
	// received logs. Default value is `98` (percentage of disk).
	DiskThreshold int `json:"disk_threshold" mapstructure:"disk_threshold"`
	// Set drain duration (flush logs on disk). Default value is `3s`.
	DrainDuration string `json:"drain_duration" mapstructure:"drain_duration"`
	// The directory for the queue.
	QueueDir string `json:"queue_dir" mapstructure:"queue_dir"`
	// Token for sending data to your logzio account.
	Token string `json:"token" mapstructure:"token"`
	// If you do not want to use the default Logzio url i.e. when using a proxy. Default is
	// `https://listener.logz.io:8071`.
	URL string `json:"url" mapstructure:"url"`
}

@PumpConf Logzio

func NewLogzioPumpConfig

func NewLogzioPumpConfig() *LogzioPumpConfig

type MoesifConf

type MoesifConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_MOESIF_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Moesif Application Id. You can find your Moesif Application Id from
	// [_Moesif Dashboard_](https://www.moesif.com/) -> _Top Right Menu_ -> _API Keys_ . Moesif
	// recommends creating separate Application Ids for each environment such as Production,
	// Staging, and Development to keep data isolated.
	ApplicationID string `json:"application_id" mapstructure:"application_id"`
	// An option to mask a specific request header field.
	RequestHeaderMasks []string `json:"request_header_masks" mapstructure:"request_header_masks"`
	// An option to mask a specific response header field.
	ResponseHeaderMasks []string `json:"response_header_masks" mapstructure:"response_header_masks"`
	// An option to mask a specific - request body field.
	RequestBodyMasks []string `json:"request_body_masks" mapstructure:"request_body_masks"`
	// An option to mask a specific response body field.
	ResponseBodyMasks []string `json:"response_body_masks" mapstructure:"response_body_masks"`
	// An option to disable logging of request body. Default value is `false`.
	DisableCaptureRequestBody bool `json:"disable_capture_request_body" mapstructure:"disable_capture_request_body"`
	// An option to disable logging of response body. Default value is `false`.
	DisableCaptureResponseBody bool `json:"disable_capture_response_body" mapstructure:"disable_capture_response_body"`
	// An optional field name to identify User from a request or response header.
	UserIDHeader string `json:"user_id_header" mapstructure:"user_id_header"`
	// An optional field name to identify Company (Account) from a request or response header.
	CompanyIDHeader string `json:"company_id_header" mapstructure:"company_id_header"`
	// Set this to `true` to enable `bulk_config`.
	EnableBulk bool `json:"enable_bulk" mapstructure:"enable_bulk"`
	// Batch writing trigger configuration.
	//   * `"event_queue_size"` - (optional) An optional field name which specify the maximum
	// number of events to hold in queue before sending to Moesif. In case of network issues when
	// not able to connect/send event to Moesif, skips adding new events to the queue to prevent
	// memory overflow. Type: int. Default value is `10000`.
	//   * `"batch_size"` - (optional) An optional field name which specify the maximum batch size
	// when sending to Moesif. Type: int. Default value is `200`.
	//   * `"timer_wake_up_seconds"` - (optional) An optional field which specifies a time (every n
	// seconds) how often background thread runs to send events to moesif. Type: int. Default value
	// is `2` seconds.
	BulkConfig map[string]interface{} `json:"bulk_config" mapstructure:"bulk_config"`
	// An optional request header field name to used to identify the User in Moesif. Default value
	// is `authorization`.
	AuthorizationHeaderName string `json:"authorization_header_name" mapstructure:"authorization_header_name"`
	// An optional field name use to parse the User from authorization header in Moesif. Default
	// value is `sub`.
	AuthorizationUserIdField string `json:"authorization_user_id_field" mapstructure:"authorization_user_id_field"`
}

@PumpConf Moesif

type MoesifPump

type MoesifPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*MoesifPump) GetEnvPrefix added in v1.3.0

func (p *MoesifPump) GetEnvPrefix() string

func (*MoesifPump) GetName

func (p *MoesifPump) GetName() string

func (*MoesifPump) GetTimeout added in v1.0.0

func (p *MoesifPump) GetTimeout() int

func (*MoesifPump) Init

func (p *MoesifPump) Init(config interface{}) error

func (*MoesifPump) New

func (p *MoesifPump) New() Pump

func (*MoesifPump) SetTimeout added in v1.0.0

func (p *MoesifPump) SetTimeout(timeout int)

func (*MoesifPump) Shutdown added in v1.6.0

func (p *MoesifPump) Shutdown() error

func (*MoesifPump) WriteData

func (p *MoesifPump) WriteData(ctx context.Context, data []interface{}) error

type MongoAggregateConf

type MongoAggregateConf struct {
	// TYKCONFIGEXPAND
	BaseMongoConf
	// If set to `true` your pump will store analytics to both your organisation defined
	// collections z_tyk_analyticz_aggregate_{ORG ID} and your org-less tyk_analytics_aggregates
	// collection. When set to 'false' your pump will only store analytics to your org defined
	// collection.
	UseMixedCollection bool `json:"use_mixed_collection" mapstructure:"use_mixed_collection"`
	// Specifies if it should store aggregated data for all the endpoints. By default, `false`
	// which means that only store aggregated data for `tracked endpoints`.
	TrackAllPaths bool `json:"track_all_paths" mapstructure:"track_all_paths"`
	// Specifies prefixes of tags that should be ignored.
	IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"`
	// Determines the threshold of amount of tags of an aggregation. If the amount of tags is superior to the threshold,
	// it will print an alert.
	// Defaults to 1000.
	ThresholdLenTagList int `json:"threshold_len_tag_list" mapstructure:"threshold_len_tag_list"`
	// Determines if the aggregations should be made per minute (true) or per hour (false).
	StoreAnalyticsPerMinute bool `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"`
	// Determines the amount of time the aggregations should be made (in minutes). It defaults to the max value is 60 and the minimum is 1.
	// If StoreAnalyticsPerMinute is set to true, this field will be skipped.
	AggregationTime int `json:"aggregation_time" mapstructure:"aggregation_time"`
	// Determines if the self healing will be activated or not.
	// Self Healing allows pump to handle Mongo document's max-size errors by creating a new document when the max-size is reached.
	// It also divide by 2 the AggregationTime field to avoid the same error in the future.
	EnableAggregateSelfHealing bool `json:"enable_aggregate_self_healing" mapstructure:"enable_aggregate_self_healing"`
	// This list determines which aggregations are going to be dropped and not stored in the collection.
	// Posible values are: "APIID","errors","versions","apikeys","oauthids","geo","tags","endpoints","keyendpoints",
	// "oauthendpoints", and "apiendpoints".
	IgnoreAggregationsList []string `json:"ignore_aggregations" mapstructure:"ignore_aggregations"`
}

@PumpConf MongoAggregate

type MongoAggregatePump

type MongoAggregatePump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*MongoAggregatePump) DoAggregatedWriting added in v1.5.1

func (m *MongoAggregatePump) DoAggregatedWriting(ctx context.Context, filteredData *analytics.AnalyticsRecordAggregate, mixed bool) error

func (*MongoAggregatePump) GetCollectionName

func (m *MongoAggregatePump) GetCollectionName(orgid string) (string, error)

func (*MongoAggregatePump) GetEnvPrefix added in v1.3.0

func (m *MongoAggregatePump) GetEnvPrefix() string

func (*MongoAggregatePump) GetName

func (m *MongoAggregatePump) GetName() string

func (*MongoAggregatePump) Init

func (m *MongoAggregatePump) Init(config interface{}) error

func (*MongoAggregatePump) New

func (m *MongoAggregatePump) New() Pump

func (*MongoAggregatePump) SetAggregationTime added in v1.7.0

func (m *MongoAggregatePump) SetAggregationTime()

SetAggregationTime sets the aggregation time for the pump

func (*MongoAggregatePump) SetDecodingRequest added in v1.8.0

func (m *MongoAggregatePump) SetDecodingRequest(decoding bool)

func (*MongoAggregatePump) SetDecodingResponse added in v1.8.0

func (m *MongoAggregatePump) SetDecodingResponse(decoding bool)

func (*MongoAggregatePump) ShouldSelfHeal added in v1.7.0

func (m *MongoAggregatePump) ShouldSelfHeal(err error) bool

ShouldSelfHeal returns true if the pump should self heal

func (*MongoAggregatePump) WriteData

func (m *MongoAggregatePump) WriteData(ctx context.Context, data []interface{}) error

func (*MongoAggregatePump) WriteUptimeData

func (m *MongoAggregatePump) WriteUptimeData(data []interface{})

WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection

type MongoConf

type MongoConf struct {
	// TYKCONFIGEXPAND
	BaseMongoConf

	// Specifies the mongo collection name.
	CollectionName string `json:"collection_name" mapstructure:"collection_name"`
	// Maximum insert batch size for mongo selective pump. If the batch we are writing surpasses this value, it will be sent in multiple batches.
	// Defaults to 10Mb.
	MaxInsertBatchSizeBytes int `json:"max_insert_batch_size_bytes" mapstructure:"max_insert_batch_size_bytes"`
	// Maximum document size. If the document exceed this value, it will be skipped.
	// Defaults to 10Mb.
	MaxDocumentSizeBytes int `json:"max_document_size_bytes" mapstructure:"max_document_size_bytes"`
	// Amount of bytes of the capped collection in 64bits architectures.
	// Defaults to 5GB.
	CollectionCapMaxSizeBytes int `json:"collection_cap_max_size_bytes" mapstructure:"collection_cap_max_size_bytes"`
	// Enable collection capping. It's used to set a maximum size of the collection.
	CollectionCapEnable bool `json:"collection_cap_enable" mapstructure:"collection_cap_enable"`
}

@PumpConf Mongo

type MongoPump

type MongoPump struct {
	IsUptime bool

	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*MongoPump) AccumulateSet

func (m *MongoPump) AccumulateSet(data []interface{}, isForGraphRecords bool) [][]model.DBObject

AccumulateSet groups data items into chunks based on the max batch size limit while handling graph analytics records separately. It returns a 2D array of DBObjects.

func (*MongoPump) GetEnvPrefix added in v1.3.0

func (m *MongoPump) GetEnvPrefix() string

func (*MongoPump) GetName

func (m *MongoPump) GetName() string

func (*MongoPump) Init

func (m *MongoPump) Init(config interface{}) error

func (*MongoPump) New

func (m *MongoPump) New() Pump

func (*MongoPump) SetDecodingRequest added in v1.8.0

func (m *MongoPump) SetDecodingRequest(decoding bool)

func (*MongoPump) SetDecodingResponse added in v1.8.0

func (m *MongoPump) SetDecodingResponse(decoding bool)

func (*MongoPump) WriteData

func (m *MongoPump) WriteData(ctx context.Context, data []interface{}) error

func (*MongoPump) WriteUptimeData

func (m *MongoPump) WriteUptimeData(data []interface{})

WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection

type MongoSelectiveConf

type MongoSelectiveConf struct {
	// TYKCONFIGEXPAND
	BaseMongoConf
	// Maximum insert batch size for mongo selective pump. If the batch we are writing surpass this value, it will be send in multiple batchs.
	// Defaults to 10Mb.
	MaxInsertBatchSizeBytes int `json:"max_insert_batch_size_bytes" mapstructure:"max_insert_batch_size_bytes"`
	// Maximum document size. If the document exceed this value, it will be skipped.
	// Defaults to 10Mb.
	MaxDocumentSizeBytes int `json:"max_document_size_bytes" mapstructure:"max_document_size_bytes"`
}

@PumpConf MongoSelective

type MongoSelectivePump

type MongoSelectivePump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*MongoSelectivePump) AccumulateSet

func (m *MongoSelectivePump) AccumulateSet(data []interface{}, collectionName string) [][]model.DBObject

AccumulateSet organizes analytics data into a set of chunks based on their size.

func (*MongoSelectivePump) GetCollectionName

func (m *MongoSelectivePump) GetCollectionName(orgid string) (string, error)

func (*MongoSelectivePump) GetEnvPrefix added in v1.3.0

func (m *MongoSelectivePump) GetEnvPrefix() string

func (*MongoSelectivePump) GetName

func (m *MongoSelectivePump) GetName() string

func (*MongoSelectivePump) Init

func (m *MongoSelectivePump) Init(config interface{}) error

func (*MongoSelectivePump) New

func (m *MongoSelectivePump) New() Pump

func (*MongoSelectivePump) SetDecodingRequest added in v1.8.0

func (m *MongoSelectivePump) SetDecodingRequest(decoding bool)

func (*MongoSelectivePump) SetDecodingResponse added in v1.8.0

func (m *MongoSelectivePump) SetDecodingResponse(decoding bool)

func (*MongoSelectivePump) WriteData

func (m *MongoSelectivePump) WriteData(ctx context.Context, data []interface{}) error

func (*MongoSelectivePump) WriteUptimeData

func (m *MongoSelectivePump) WriteUptimeData(data []interface{})

WriteUptimeData will pull the data from the in-memory store and drop it into the specified MongoDB collection

type MongoType added in v1.0.0

type MongoType int
const (
	StandardMongo MongoType = iota
	AWSDocumentDB
	CosmosDB
)

type MysqlConfig added in v1.5.0

type MysqlConfig struct {
	// Default size for string fields. Defaults to `256`.
	DefaultStringSize uint `json:"default_string_size" mapstructure:"default_string_size"`
	// Disable datetime precision, which not supported before MySQL 5.6.
	DisableDatetimePrecision bool `json:"disable_datetime_precision" mapstructure:"disable_datetime_precision"`
	// Drop & create when rename index, rename index not supported before MySQL 5.7, MariaDB.
	DontSupportRenameIndex bool `json:"dont_support_rename_index" mapstructure:"dont_support_rename_index"`
	// `change` when rename column, rename column not supported before MySQL 8, MariaDB.
	DontSupportRenameColumn bool `json:"dont_support_rename_column" mapstructure:"dont_support_rename_column"`
	// Auto configure based on currently MySQL version.
	SkipInitializeWithVersion bool `json:"skip_initialize_with_version" mapstructure:"skip_initialize_with_version"`
}

type NewBucket added in v1.5.1

type NewBucket struct {
	// A description visible on the InfluxDB2 UI
	Description string `mapstructure:"description" json:"description"`
	// Rules to expire or retain data. No rules means data never expires.
	RetentionRules []RetentionRule `mapstructure:"retention_rules" json:"retention_rules"`
}

Configuration required to create the Bucket if it doesn't already exist See https://docs.influxdata.com/influxdb/v2.1/api/#operation/PostBuckets

type PostgresConfig added in v1.5.0

type PostgresConfig struct {
	// Disables implicit prepared statement usage.
	PreferSimpleProtocol bool `json:"prefer_simple_protocol" mapstructure:"prefer_simple_protocol"`
}

type PrometheusConf

type PrometheusConf struct {
	// Prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_PROMETHEUS_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// The full URL to your Prometheus instance, {HOST}:{PORT}. For example `localhost:9090`.
	Addr string `json:"listen_address" mapstructure:"listen_address"`
	// The path to the Prometheus collection. For example `/metrics`.
	Path string `json:"path" mapstructure:"path"`
	// This will enable an experimental feature that will aggregate the histogram metrics request time values before exposing them to prometheus.
	// Enabling this will reduce the CPU usage of your prometheus pump but you will loose histogram precision. Experimental.
	AggregateObservations bool `json:"aggregate_observations" mapstructure:"aggregate_observations"`
	// Metrics to exclude from exposition. Currently, excludes only the base metrics.
	DisabledMetrics []string `json:"disabled_metrics" mapstructure:"disabled_metrics"`
	// Specifies if it should expose aggregated metrics for all the endpoints. By default, `false`
	// which means that all APIs endpoints will be counted as 'unknown' unless the API uses the track endpoint plugin.
	TrackAllPaths bool `json:"track_all_paths" mapstructure:"track_all_paths"`
	// Custom Prometheus metrics.
	CustomMetrics CustomMetrics `json:"custom_metrics" mapstructure:"custom_metrics"`
}

@PumpConf Prometheus

type PrometheusMetric added in v1.6.0

type PrometheusMetric struct {
	// The name of the custom metric. For example: `tyk_http_status_per_api_name`
	Name string `json:"name" mapstructure:"name"`
	// Description text of the custom metric. For example: `HTTP status codes per API`
	Help string `json:"help" mapstructure:"help"`
	// Determines the type of the metric. There's currently 2 available options: `counter` or `histogram`.
	// In case of histogram, you can only modify the labels since it always going to use the request_time.
	MetricType string `json:"metric_type" mapstructure:"metric_type"`
	// Defines the buckets into which observations are counted. The type is float64 array and by default, [1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 1000, 2000, 5000, 10000, 30000, 60000]
	Buckets []float64 `json:"buckets" mapstructure:"buckets"`
	// Defines the partitions in the metrics. For example: ['response_code','api_name'].
	// The available labels are: `["host","method",
	// "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id",
	// "org_id", "oauth_id","request_time", "ip_address", "alias"]`.
	Labels []string `json:"labels" mapstructure:"labels"`
	// contains filtered or unexported fields
}

func (*PrometheusMetric) Expose added in v1.7.0

func (pm *PrometheusMetric) Expose() error

Expose executes prometheus library functions using the counter/histogram vector from the PrometheusMetric struct. If the PrometheusMetric is counterType, it will execute prometheus client Add function to add the counters from counterMap to the labels value metric If the PrometheusMetric is histogramType and aggregate_observations config is true, it will calculate the average value of the metrics in the histogramMap and execute prometheus Observe. If aggregate_observations is false, it won't do anything since it means that we already exposed the metric.

func (*PrometheusMetric) GetLabelsValues added in v1.6.0

func (pm *PrometheusMetric) GetLabelsValues(decoded analytics.AnalyticsRecord) []string

GetLabelsValues return a list of string values based on the custom metric labels.

func (*PrometheusMetric) Inc added in v1.7.0

func (pm *PrometheusMetric) Inc(values ...string) error

Inc is going to fill counterMap and histogramMap with the data from record.

func (*PrometheusMetric) InitVec added in v1.6.0

func (pm *PrometheusMetric) InitVec() error

InitVec inits the prometheus metric based on the metric_type. It only can create counter and histogram, if the metric_type is anything else it returns an error

func (*PrometheusMetric) Observe added in v1.7.0

func (pm *PrometheusMetric) Observe(requestTime int64, values ...string) error

Observe will fill hitogramMap with the sum of totalRequest and hits per label value if aggregate_observations is true. If aggregate_observations is set to false (default) it will execute prometheus Observe directly.

type PrometheusPump

type PrometheusPump struct {

	// Per service
	TotalStatusMetrics  *prometheus.CounterVec
	PathStatusMetrics   *prometheus.CounterVec
	KeyStatusMetrics    *prometheus.CounterVec
	OauthStatusMetrics  *prometheus.CounterVec
	TotalLatencyMetrics *prometheus.HistogramVec

	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*PrometheusPump) CreateBasicMetrics added in v1.7.0

func (p *PrometheusPump) CreateBasicMetrics()

CreateBasicMetrics stores all the predefined pump metrics in allMetrics slice

func (*PrometheusPump) GetEnvPrefix added in v1.3.0

func (p *PrometheusPump) GetEnvPrefix() string

func (*PrometheusPump) GetName

func (p *PrometheusPump) GetName() string

func (*PrometheusPump) Init

func (p *PrometheusPump) Init(conf interface{}) error

func (*PrometheusPump) InitCustomMetrics added in v1.7.0

func (p *PrometheusPump) InitCustomMetrics()

InitCustomMetrics initialise custom prometheus metrics based on p.conf.CustomMetrics and add them into p.allMetrics

func (*PrometheusPump) New

func (p *PrometheusPump) New() Pump

func (*PrometheusPump) WriteData

func (p *PrometheusPump) WriteData(ctx context.Context, data []interface{}) error

type Pump

type Pump interface {
	GetName() string
	New() Pump
	Init(interface{}) error
	WriteData(context.Context, []interface{}) error
	SetFilters(analytics.AnalyticsFilters)
	GetFilters() analytics.AnalyticsFilters
	SetTimeout(timeout int)
	GetTimeout() int
	SetOmitDetailedRecording(bool)
	GetOmitDetailedRecording() bool
	GetEnvPrefix() string
	Shutdown() error
	SetMaxRecordSize(size int)
	GetMaxRecordSize() int
	SetLogLevel(logrus.Level)
	SetIgnoreFields([]string)
	GetIgnoreFields() []string
	SetDecodingResponse(bool)
	GetDecodedResponse() bool
	SetDecodingRequest(bool)
	GetDecodedRequest() bool
}

func GetPumpByName

func GetPumpByName(name string) (Pump, error)

type ResurfacePump added in v1.8.0

type ResurfacePump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*ResurfacePump) Flush added in v1.9.0

func (rp *ResurfacePump) Flush() error

func (*ResurfacePump) GetEnvPrefix added in v1.8.0

func (rp *ResurfacePump) GetEnvPrefix() string

func (*ResurfacePump) GetName added in v1.8.0

func (rp *ResurfacePump) GetName() string

func (*ResurfacePump) Init added in v1.8.0

func (rp *ResurfacePump) Init(config interface{}) error

func (*ResurfacePump) New added in v1.8.0

func (rp *ResurfacePump) New() Pump

func (*ResurfacePump) Shutdown added in v1.9.0

func (rp *ResurfacePump) Shutdown() error

func (*ResurfacePump) WriteData added in v1.8.0

func (rp *ResurfacePump) WriteData(ctx context.Context, data []interface{}) error

type ResurfacePumpConfig added in v1.8.0

type ResurfacePumpConfig struct {
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	URL       string `mapstructure:"capture_url"`
	Rules     string
	Queue     []string
}

type RetentionRule added in v1.5.1

type RetentionRule struct {
	// Duration in seconds for how long data will be kept in the database. 0 means infinite.
	EverySeconds int64 `mapstructure:"every_seconds" json:"every_seconds"`
	// Shard duration measured in seconds.
	ShardGroupDurationSeconds int64 `mapstructure:"shard_group_duration_seconds" json:"shard_group_duration_seconds"`
	// Retention rule type. For example "expire"
	Type string `mapstructure:"type" json:"type"`
}

type SQLAggregatePump added in v1.5.0

type SQLAggregatePump struct {
	CommonPumpConfig
	SQLConf *SQLAggregatePumpConf
	// contains filtered or unexported fields
}

func (*SQLAggregatePump) DoAggregatedWriting added in v1.5.1

func (c *SQLAggregatePump) DoAggregatedWriting(ctx context.Context, table, orgID string, ag analytics.AnalyticsRecordAggregate) error

func (*SQLAggregatePump) GetEnvPrefix added in v1.5.0

func (c *SQLAggregatePump) GetEnvPrefix() string

func (*SQLAggregatePump) GetName added in v1.5.0

func (c *SQLAggregatePump) GetName() string

func (*SQLAggregatePump) Init added in v1.5.0

func (c *SQLAggregatePump) Init(conf interface{}) error

func (*SQLAggregatePump) New added in v1.5.0

func (c *SQLAggregatePump) New() Pump

func (*SQLAggregatePump) SetDecodingRequest added in v1.8.0

func (c *SQLAggregatePump) SetDecodingRequest(decoding bool)

func (*SQLAggregatePump) SetDecodingResponse added in v1.8.0

func (c *SQLAggregatePump) SetDecodingResponse(decoding bool)

func (*SQLAggregatePump) WriteData added in v1.5.0

func (c *SQLAggregatePump) WriteData(ctx context.Context, data []interface{}) error

WriteData aggregates and writes the passed data to SQL database. When table sharding is enabled, startIndex and endIndex are found by checking timestamp of the records. The main for loop iterates and finds the index where a new day starts. Then, the data is passed to AggregateData function and written to database day by day on different tables. However, if table sharding is not enabled, the for loop iterates one time and all data is passed at once to the AggregateData function and written to database on single table.

type SQLAggregatePumpConf added in v1.5.0

type SQLAggregatePumpConf struct {
	// TYKCONFIGEXPAND
	SQLConf `mapstructure:",squash"`

	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_SQLAGGREGATE_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Specifies if it should store aggregated data for all the endpoints. By default, `false`
	// which means that only store aggregated data for `tracked endpoints`.
	TrackAllPaths bool `json:"track_all_paths" mapstructure:"track_all_paths"`
	// Specifies prefixes of tags that should be ignored.
	IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"`
	ThresholdLenTagList int      `json:"threshold_len_tag_list" mapstructure:"threshold_len_tag_list"`
	// Determines if the aggregations should be made per minute instead of per hour.
	StoreAnalyticsPerMinute bool     `json:"store_analytics_per_minute" mapstructure:"store_analytics_per_minute"`
	IgnoreAggregationsList  []string `json:"ignore_aggregations" mapstructure:"ignore_aggregations"`
	// Set to true to disable the default tyk index creation.
	OmitIndexCreation bool `json:"omit_index_creation" mapstructure:"omit_index_creation"`
}

@PumpConf SQLAggregate

type SQLConf added in v1.5.0

type SQLConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_SQL_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// The supported and tested types are `sqlite` and `postgres`.
	Type string `json:"type" mapstructure:"type"`
	// Specifies the connection string to the database.
	ConnectionString string `json:"connection_string" mapstructure:"connection_string"`
	// Postgres configurations.
	Postgres PostgresConfig `json:"postgres" mapstructure:"postgres"`
	// Mysql configurations.
	Mysql MysqlConfig `json:"mysql" mapstructure:"mysql"`
	// Specifies if all the analytics records are going to be stored in one table or in multiple
	// tables (one per day). By default, `false`. If `false`, all the records are going to be
	// stored in `tyk_aggregated` table. Instead, if it's `true`, all the records of the day are
	// going to be stored in `tyk_aggregated_YYYYMMDD` table, where `YYYYMMDD` is going to change
	// depending on the date.
	TableSharding bool `json:"table_sharding" mapstructure:"table_sharding"`
	// Specifies the SQL log verbosity. The possible values are: `info`,`error` and `warning`. By
	// default, the value is `silent`, which means that it won't log any SQL query.
	LogLevel string `json:"log_level" mapstructure:"log_level"`
	// Specifies the amount of records that are going to be written each batch. Type int. By
	// default, it writes 1000 records max per batch.
	BatchSize int `json:"batch_size" mapstructure:"batch_size"`
}

@PumpConf SQL

type SQLPump added in v1.5.0

type SQLPump struct {
	CommonPumpConfig
	IsUptime bool

	SQLConf *SQLConf
	// contains filtered or unexported fields
}

func (*SQLPump) GetEnvPrefix added in v1.5.0

func (c *SQLPump) GetEnvPrefix() string

func (*SQLPump) GetName added in v1.5.0

func (c *SQLPump) GetName() string

func (*SQLPump) Init added in v1.5.0

func (c *SQLPump) Init(conf interface{}) error

func (*SQLPump) New added in v1.5.0

func (c *SQLPump) New() Pump

func (*SQLPump) SetDecodingRequest added in v1.8.0

func (c *SQLPump) SetDecodingRequest(decoding bool)

func (*SQLPump) SetDecodingResponse added in v1.8.0

func (c *SQLPump) SetDecodingResponse(decoding bool)

func (*SQLPump) WriteData added in v1.5.0

func (c *SQLPump) WriteData(ctx context.Context, data []interface{}) error

func (*SQLPump) WriteUptimeData added in v1.5.0

func (c *SQLPump) WriteUptimeData(data []interface{})

type SQSConf added in v1.9.0

type SQSConf struct {
	// EnvPrefix specifies the prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_SQS_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`

	// QueueName specifies the name of the AWS Simple Queue Service (SQS) queue for message delivery.
	QueueName string `mapstructure:"aws_queue_name"`

	// AWSRegion sets the AWS region where the SQS queue is located.
	AWSRegion string `mapstructure:"aws_region"`

	// AWSSecret is the AWS secret key used for authentication.
	AWSSecret string `mapstructure:"aws_secret"`

	// AWSKey is the AWS access key ID used for authentication.
	AWSKey string `mapstructure:"aws_key"`

	// AWSToken is the AWS session token used for authentication.
	// This is only required when using temporary credentials.
	AWSToken string `mapstructure:"aws_token"`

	// AWSEndpoint is the custom endpoint URL for AWS SQS, if applicable.
	AWSEndpoint string `mapstructure:"aws_endpoint"`

	// AWSMessageGroupID specifies the message group ID for ordered processing within the SQS queue.
	AWSMessageGroupID string `mapstructure:"aws_message_group_id"`

	// AWSMessageIDDeduplicationEnabled enables/disables message deduplication based on unique IDs.
	AWSMessageIDDeduplicationEnabled bool `mapstructure:"aws_message_id_deduplication_enabled"`

	// AWSDelaySeconds configures the delay (in seconds) before messages become available for processing.
	AWSDelaySeconds int32 `mapstructure:"aws_delay_seconds"`

	// AWSSQSBatchLimit sets the maximum number of messages in a single batch when sending to the SQS queue.
	AWSSQSBatchLimit int `mapstructure:"aws_sqs_batch_limit"`
}

SQSConf represents the configuration structure for the Tyk Pump SQS (Simple Queue Service) pump.

type SQSPump added in v1.9.0

type SQSPump struct {
	SQSClient   SQSSendMessageBatchAPI
	SQSQueueURL *string
	SQSConf     *SQSConf

	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*SQSPump) GetEnvPrefix added in v1.9.0

func (s *SQSPump) GetEnvPrefix() string

func (*SQSPump) GetName added in v1.9.0

func (s *SQSPump) GetName() string

func (*SQSPump) Init added in v1.9.0

func (s *SQSPump) Init(config interface{}) error

func (*SQSPump) New added in v1.9.0

func (s *SQSPump) New() Pump

func (*SQSPump) NewSQSPublisher added in v1.9.0

func (s *SQSPump) NewSQSPublisher() (c *sqs.Client, err error)

func (*SQSPump) WriteData added in v1.9.0

func (s *SQSPump) WriteData(ctx context.Context, data []interface{}) error

type SQSSendMessageBatchAPI added in v1.9.0

type SQSSendMessageBatchAPI interface {
	GetQueueUrl(ctx context.Context,
		params *sqs.GetQueueUrlInput,
		optFns ...func(*sqs.Options)) (*sqs.GetQueueUrlOutput, error)

	SendMessageBatch(ctx context.Context,
		params *sqs.SendMessageBatchInput,
		optFns ...func(*sqs.Options)) (*sqs.SendMessageBatchOutput, error)
}

type SegmentConf

type SegmentConf struct {
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	WriteKey  string `json:"segment_write_key" mapstructure:"segment_write_key"`
}

type SegmentPump

type SegmentPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*SegmentPump) GetEnvPrefix added in v1.3.0

func (s *SegmentPump) GetEnvPrefix() string

func (*SegmentPump) GetName

func (s *SegmentPump) GetName() string

func (*SegmentPump) Init

func (s *SegmentPump) Init(config interface{}) error

func (*SegmentPump) New

func (s *SegmentPump) New() Pump

func (*SegmentPump) ToJSONMap

func (s *SegmentPump) ToJSONMap(obj interface{}) (map[string]interface{}, error)

func (*SegmentPump) WriteData

func (s *SegmentPump) WriteData(ctx context.Context, data []interface{}) error

func (*SegmentPump) WriteDataRecord

func (s *SegmentPump) WriteDataRecord(record analytics.AnalyticsRecord) error

type SplunkClient

type SplunkClient struct {
	Token         string
	CollectorURL  string
	TLSSkipVerify bool
	// contains filtered or unexported fields
}

SplunkClient contains Splunk client methods.

func NewSplunkClient

func NewSplunkClient(token string, collectorURL string, skipVerify bool, certFile string, keyFile string, serverName string) (c *SplunkClient, err error)

NewSplunkClient initializes a new SplunkClient.

type SplunkPump

type SplunkPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

SplunkPump is a Tyk Pump driver for Splunk.

func (*SplunkPump) FilterTags added in v1.5.0

func (p *SplunkPump) FilterTags(filteredTags []string) []string

Filters the tags based on config rule

func (*SplunkPump) GetEnvPrefix added in v1.3.0

func (p *SplunkPump) GetEnvPrefix() string

func (*SplunkPump) GetName

func (p *SplunkPump) GetName() string

GetName returns the pump name.

func (*SplunkPump) Init

func (p *SplunkPump) Init(config interface{}) error

Init performs the initialization of the SplunkClient.

func (*SplunkPump) New

func (p *SplunkPump) New() Pump

New initializes a new pump.

func (*SplunkPump) WriteData

func (p *SplunkPump) WriteData(ctx context.Context, data []interface{}) error

WriteData prepares an appropriate data structure and sends it to the HTTP Event Collector.

type SplunkPumpConfig

type SplunkPumpConfig struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_SPLUNK_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Address of the datadog agent including host & port.
	CollectorToken string `json:"collector_token" mapstructure:"collector_token"`
	// Endpoint the Pump will send analytics too.  Should look something like:
	// `https://splunk:8088/services/collector/event`.
	CollectorURL string `json:"collector_url" mapstructure:"collector_url"`
	// Controls whether the pump client verifies the Splunk server's certificate chain and host name.
	SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify" mapstructure:"ssl_insecure_skip_verify"`
	// SSL cert file location.
	SSLCertFile string `json:"ssl_cert_file" mapstructure:"ssl_cert_file"`
	// SSL cert key location.
	SSLKeyFile string `json:"ssl_key_file" mapstructure:"ssl_key_file"`
	// SSL Server name used in the TLS connection.
	SSLServerName string `json:"ssl_server_name" mapstructure:"ssl_server_name"`
	// Controls whether the pump client should hide the API key. In case you still need substring
	// of the value, check the next option. Default value is `false`.
	ObfuscateAPIKeys bool `json:"obfuscate_api_keys" mapstructure:"obfuscate_api_keys"`
	// Define the number of the characters from the end of the API key. The `obfuscate_api_keys`
	// should be set to `true`. Default value is `0`.
	ObfuscateAPIKeysLength int `json:"obfuscate_api_keys_length" mapstructure:"obfuscate_api_keys_length"`
	// Define which Analytics fields should participate in the Splunk event. Check the available
	// fields in the example below. Default value is `["method",
	// "path", "response_code", "api_key", "time_stamp", "api_version", "api_name", "api_id",
	// "org_id", "oauth_id", "raw_request", "request_time", "raw_response", "ip_address"]`.
	Fields []string `json:"fields" mapstructure:"fields"`
	// Choose which tags to be ignored by the Splunk Pump. Keep in mind that the tag name and value
	// are hyphenated. Default value is `[]`.
	IgnoreTagPrefixList []string `json:"ignore_tag_prefix_list" mapstructure:"ignore_tag_prefix_list"`
	// If this is set to `true`, pump is going to send the analytics records in batch to Splunk.
	// Default value is `false`.
	EnableBatch bool `json:"enable_batch" mapstructure:"enable_batch"`
	// Max content length in bytes to be sent in batch requests. It should match the
	// `max_content_length` configured in Splunk. If the purged analytics records size don't reach
	// the amount of bytes, they're send anyways in each `purge_loop`. Default value is 838860800
	// (~ 800 MB), the same default value as Splunk config.
	BatchMaxContentLength int `json:"batch_max_content_length" mapstructure:"batch_max_content_length"`
	// MaxRetries represents the maximum amount of retries to attempt if failed to send requests to splunk HEC.
	// Default value is `0`
	MaxRetries uint64 `json:"max_retries" mapstructure:"max_retries"`
}

SplunkPumpConfig contains the driver configuration parameters. @PumpConf Splunk

type StatsdConf

type StatsdConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_STATSD_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Address of statsd including host & port.
	Address string `json:"address" mapstructure:"address"`
	// Define which Analytics fields should have its own metric calculation.
	Fields []string `json:"fields" mapstructure:"fields"`
	// List of tags to be added to the metric.
	Tags []string `json:"tags" mapstructure:"tags"`
	// Allows to have a separated method field instead of having it embedded in the path field.
	SeparatedMethod bool `json:"separated_method" mapstructure:"separated_method"`
}

@PumpConf Statsd

type StatsdPump

type StatsdPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*StatsdPump) GetEnvPrefix added in v1.3.0

func (s *StatsdPump) GetEnvPrefix() string

func (*StatsdPump) GetName

func (s *StatsdPump) GetName() string

func (*StatsdPump) Init

func (s *StatsdPump) Init(config interface{}) error

func (*StatsdPump) New

func (s *StatsdPump) New() Pump

func (*StatsdPump) WriteData

func (s *StatsdPump) WriteData(ctx context.Context, data []interface{}) error

type StdOutConf added in v1.4.0

type StdOutConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_STDOUT_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	// Format of the analytics logs. Default is `text` if `json` is not explicitly specified. When
	// JSON logging is used all pump logs to stdout will be JSON.
	Format string `json:"format" mapstructure:"format"`
	// Root name of the JSON object the analytics record is nested in.
	LogFieldName string `json:"log_field_name" mapstructure:"log_field_name"`
}

@PumpConf StdOut

type StdOutPump added in v1.4.0

type StdOutPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*StdOutPump) GetEnvPrefix added in v1.4.0

func (s *StdOutPump) GetEnvPrefix() string

func (*StdOutPump) GetName added in v1.4.0

func (s *StdOutPump) GetName() string

func (*StdOutPump) Init added in v1.4.0

func (s *StdOutPump) Init(config interface{}) error

func (*StdOutPump) New added in v1.4.0

func (s *StdOutPump) New() Pump

func (*StdOutPump) WriteData added in v1.4.0

func (s *StdOutPump) WriteData(ctx context.Context, data []interface{}) error

* ** Write the actual Data to Stdout Here

type SyslogConf added in v1.2.0

type SyslogConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_SYSLOG_META`
	EnvPrefix string `json:"meta_env_prefix" mapstructure:"meta_env_prefix"`
	// Possible values are `udp, tcp, tls` in string form.
	Transport string `json:"transport" mapstructure:"transport"`
	// Host & Port combination of your syslog daemon ie: `"localhost:5140"`.
	NetworkAddr string `json:"network_addr" mapstructure:"network_addr"`
	// The severity level, an integer from 0-7, based off the Standard:
	// [Syslog Severity Levels](https://en.wikipedia.org/wiki/Syslog#Severity_level).
	LogLevel int `json:"log_level" mapstructure:"log_level"`
	// Prefix tag
	//
	// When working with FluentD, you should provide a
	// [FluentD Parser](https://docs.fluentd.org/input/syslog) based on the OS you are using so
	// that FluentD can correctly read the logs.
	//
	// “`{.json}
	// "syslog": {
	//   "name": "syslog",
	//   "meta": {
	//     "transport": "udp",
	//     "network_addr": "localhost:5140",
	//     "log_level": 6,
	//     "tag": "syslog-pump"
	//   }
	// “`
	Tag string `json:"tag" mapstructure:"tag"`
}

@PumpConf Syslog

type SyslogPump added in v1.2.0

type SyslogPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*SyslogPump) GetEnvPrefix added in v1.3.0

func (s *SyslogPump) GetEnvPrefix() string

func (*SyslogPump) GetFilters added in v1.2.0

func (s *SyslogPump) GetFilters() analytics.AnalyticsFilters

func (*SyslogPump) GetName added in v1.2.0

func (s *SyslogPump) GetName() string

func (*SyslogPump) GetTimeout added in v1.2.0

func (s *SyslogPump) GetTimeout() int

func (*SyslogPump) Init added in v1.2.0

func (s *SyslogPump) Init(config interface{}) error

func (*SyslogPump) New added in v1.2.0

func (s *SyslogPump) New() Pump

func (*SyslogPump) SetFilters added in v1.2.0

func (s *SyslogPump) SetFilters(filters analytics.AnalyticsFilters)

func (*SyslogPump) SetTimeout added in v1.2.0

func (s *SyslogPump) SetTimeout(timeout int)

func (*SyslogPump) WriteData added in v1.2.0

func (s *SyslogPump) WriteData(ctx context.Context, data []interface{}) error

* ** Write the actual Data to Syslog Here

type TimestreamPump added in v1.6.0

type TimestreamPump struct {
	CommonPumpConfig
	// contains filtered or unexported fields
}

func (*TimestreamPump) BuildTimestreamInputIterator added in v1.6.0

func (t *TimestreamPump) BuildTimestreamInputIterator(data []interface{}) (func() (records []types.Record, hasNext bool), bool)

func (*TimestreamPump) GetAnalyticsRecordDimensions added in v1.6.0

func (t *TimestreamPump) GetAnalyticsRecordDimensions(decoded *analytics.AnalyticsRecord) (dimensions []types.Dimension)

func (*TimestreamPump) GetAnalyticsRecordMeasures added in v1.6.0

func (t *TimestreamPump) GetAnalyticsRecordMeasures(decoded *analytics.AnalyticsRecord) (measureValues []types.MeasureValue)

func (*TimestreamPump) GetEnvPrefix added in v1.6.0

func (t *TimestreamPump) GetEnvPrefix() string

func (*TimestreamPump) GetName added in v1.6.0

func (t *TimestreamPump) GetName() string

func (*TimestreamPump) Init added in v1.6.0

func (t *TimestreamPump) Init(config interface{}) error

func (*TimestreamPump) MapAnalyticRecord2TimestreamMultimeasureRecord added in v1.6.0

func (t *TimestreamPump) MapAnalyticRecord2TimestreamMultimeasureRecord(decoded *analytics.AnalyticsRecord) types.Record

func (*TimestreamPump) New added in v1.6.0

func (t *TimestreamPump) New() Pump

func (*TimestreamPump) NewTimestreamWriter added in v1.7.0

func (t *TimestreamPump) NewTimestreamWriter() (c *timestreamwrite.Client, err error)

func (*TimestreamPump) WriteData added in v1.6.0

func (t *TimestreamPump) WriteData(ctx context.Context, data []interface{}) error

type TimestreamPumpConf added in v1.6.0

type TimestreamPumpConf struct {
	// The prefix for the environment variables that will be used to override the configuration.
	// Defaults to `TYK_PMP_PUMPS_TIMESTREAM_META`
	EnvPrefix string `mapstructure:"meta_env_prefix"`
	//The aws region that contains the timestream database
	AWSRegion string `mapstructure:"aws_region"`
	//The table name where the data is going to be written
	TableName string `mapstructure:"timestream_table_name"`
	//The timestream database name that contains the table being written to
	DatabaseName string `mapstructure:"timestream_database_name"`
	//A filter of all the dimensions that will be written to the table. The possible options are
	//["Method","Host","Path","RawPath","APIKey","APIVersion","APIName","APIID","OrgID","OauthID"]
	Dimensions []string `mapstructure:"dimensions"`
	//A filter of all the measures that will be written to the table. The possible options are
	//["ContentLength","ResponseCode","RequestTime","NetworkStats.OpenConnections",
	//"NetworkStats.ClosedConnection","NetworkStats.BytesIn","NetworkStats.BytesOut",
	//"Latency.Total","Latency.Upstream","GeoData.City.GeoNameID","IPAddress",
	//"GeoData.Location.Latitude","GeoData.Location.Longitude","UserAgent","RawRequest","RawResponse",
	//"RateLimit.Limit","Ratelimit.Remaining","Ratelimit.Reset",
	//"GeoData.Country.ISOCode","GeoData.City.Names","GeoData.Location.TimeZone"]
	Measures []string `mapstructure:"measures"`
	//Set to true in order to save any of the `RateLimit` measures. Default value is `false`.
	WriteRateLimit bool `mapstructure:"write_rate_limit"`
	//If set true, we will try to read geo information from the headers if
	//values aren't found on the analytic record . Default value is `false`.
	ReadGeoFromRequest bool `mapstructure:"read_geo_from_request"`
	//Set to true, in order to save numerical values with value zero. Default value is `false`.
	WriteZeroValues bool `mapstructure:"write_zero_values"`
	//A name mapping for both Dimensions and Measures names. It's not required
	NameMappings map[string]string `mapstructure:"field_name_mappings"`
}

@PumpConf Timestream

type TimestreamWriteRecordsAPI added in v1.6.0

type TimestreamWriteRecordsAPI interface {
	WriteRecords(ctx context.Context, params *timestreamwrite.WriteRecordsInput, optFns ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error)
}

type UptimePump added in v1.5.0

type UptimePump interface {
	GetName() string
	Init(interface{}) error
	WriteUptimeData(data []interface{})
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL