config

package
v1.6.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 1, 2023 License: MIT Imports: 7 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AzureSink

type AzureSink struct {
	BaseSink              `yaml:",inline"`
	Container             string   `json:"container" yaml:"container" env:"CONTAINER"`                                     // The container name
	Prefix                string   `json:"prefix" yaml:"prefix" env:"PREFIX"`                                              // The prefix to add
	Parallelism           uint16   `json:"parallelism" yaml:"parallelism" env:"PARALLELISM"`                               // The BlockBlob upload parallelism
	BlockSize             int64    `json:"blockSize" yaml:"blockSize" env:"BLOCKSIZE"`                                     // The Block Size for upload
	BlobServiceURL        string   `json:"blobServiceURL" yaml:"blobServiceURL" env:"BLOBSERVICEURL"`                      // The blob service URL
	StorageAccounts       []string `json:"storageAccounts" yaml:"storageAccounts" env:"STORAGEACCOUNTS"`                   // The list of storage accounts
	StorageAccountWeights []uint   `json:"storageAccountWeights" yaml:"storageAccountWeights" env:"STORAGEACCOUNTWEIGHTS"` //The list of weighting factor of each storage accounts
}

AzureSink represents a sink to Azure

type Badger added in v1.2.5

type Badger struct {
	SyncWrites          *bool         `json:"syncWrites" yaml:"syncWrites" env:"SYNCWRITES"`                            // Whether to sync writes to disk before ack. defaults to true
	ValueLogMaxEntries  *uint32       `json:"valueLogMaxEntries" yaml:"valueLogMaxEntries" env:"VALUELOGMAXENTRIES"`    // Maximum number of entries a value log file can hold approximately. defaults to 5000
	MaxTableSize        *int64        `json:"maxTableSize" yaml:"maxTableSize" env:"MAXTABLESIZE"`                      // Maximum size in bytes for each LSM table or file.
	LevelOneSize        *int64        `json:"levelOneSize" yaml:"levelOneSize" env:"LEVELONESIZE"`                      // Maximum total size in bytes for Level 1. defaults to 1 Million
	LevelSizeMultiplier *int          `json:"levelSizeMultiplier" yaml:"levelSizeMultiplier" env:"LEVELSIZEMULTIPLIER"` // The ratio between the maximum sizes of contiguous levels in the LSM. defaults to 10
	MaxLevels           *int          `json:"maxLevels" yaml:"maxLevels" env:"MAXLEVELS"`                               // Maximum number of levels of compaction allowed in the LSM. defaults to 7
	BlockCacheSize      *int64        `json:"blockCacheSize" yaml:"blockCacheSize" env:"BLOCKCACHESIZE"`                // How much data cache should hold in memory, default is 256M
	Default             BadgerDefault `json:"default" yaml:"default" env:"DEFAULT"`                                     // default badger option to optimize for storage, ingestion or default that badger provides
}

Badger configures badger K-V store that we use underlying. This will help to tune the options to optimize for various uses cases like bigger files, point query or range queries.

type BadgerDefault added in v1.2.5

type BadgerDefault string

BadgerDefault represents a default way of configuring badger

const (
	BadgerStorage       BadgerDefault = "storage"
	BadgerIngestion     BadgerDefault = "ingestion"
	BadgerDefaultOption BadgerDefault = "default"
)

Various Badger options

type BaseSink added in v1.6.0

type BaseSink struct {
	Encoder string `json:"encoder" yaml:"encoder"` // The default encoder for the compaction
	Filter  string `json:"filter" yaml:"filter"`   // The default encoder for the compaction
}

type BigQuerySink

type BigQuerySink struct {
	BaseSink `yaml:",inline"`
	Project  string `json:"project" yaml:"project" env:"PROJECT"` // The project ID
	Dataset  string `json:"dataset" yaml:"dataset" env:"DATASET"` // The dataset ID
	Table    string `json:"table" yaml:"table" env:"TABLE"`       // The table ID
}

BigQuerySink represents a sink to Google Big Query

type Compaction

type Compaction struct {
	Sinks    []Sink `yaml:"sinks"`
	NameFunc string `json:"nameFunc" yaml:"nameFunc" env:"NAMEFUNC"` // The lua script to compute file name given a row
	Interval int    `json:"interval" yaml:"interval" env:"INTERVAL"` // The compaction interval, in seconds
}

Compaction represents a configuration for compaction sinks

type Computed

type Computed struct {
	Name     string      `json:"name"`
	Type     typeof.Type `json:"type"`
	Func     string      `json:"func"`
	FuncName string      `json:"funcname"`
}

Computed represents a computed column

type Config

type Config struct {
	URI      string     `json:"uri" yaml:"uri" env:"URI"`
	Env      string     `json:"env" yaml:"env" env:"ENV"`             // The environment (eg: prd, stg)
	AppName  string     `json:"appName" yaml:"appName" env:"APPNAME"` // app name used for monitoring
	Domain   string     `json:"domain" yaml:"domain" env:"DOMAIN"`
	Readers  Readers    `json:"readers" yaml:"readers" env:"READERS"`
	Writers  Writers    `json:"writers" yaml:"writers" env:"WRITERS"`
	Storage  Storage    `json:"storage" yaml:"storage" env:"STORAGE"`
	Tables   Tables     `json:"tables" yaml:"tables"`
	Statsd   *StatsD    `json:"statsd,omitempty" yaml:"statsd" env:"STATSD"`
	Computed []Computed `json:"computed" yaml:"computed" env:"COMPUTED"`
	K8s      *K8s       `json:"k8s,omitempty" yaml:"k8s" env:"K8S"`
}

Config global

type Configurer

type Configurer interface {
	Configure(*Config) error
}

Configurer is an interface for any component which will provide the config.

type ConsoleSink added in v1.6.1

type ConsoleSink struct {
	BaseSink `yaml:",inline"`
}

Console Sink represents a sink to local STDOUT

type FileSink

type FileSink struct {
	BaseSink  `yaml:",inline"`
	Directory string `json:"dir" yaml:"dir" env:"DIR"`
}

FileSink represents a sink to the local file system

type Func

type Func func() *Config

Func represents a config function

func Load

func Load(ctx context.Context, d time.Duration, configurers ...Configurer) Func

Load iterates through all the providers and fills the config object. Order of providers is important as the the last provider can override the previous one It sets watch on the config for hot reload of the config

type GCSSink

type GCSSink struct {
	BaseSink `yaml:",inline"`
	Bucket   string `json:"bucket" yaml:"bucket" env:"BUCKET"` // The name of the bucket
	Prefix   string `json:"prefix" yaml:"prefix" env:"PREFIX"` // The prefix to add
}

GCSSink represents a sink to Google Cloud Storage

type GRPC

type GRPC struct {
	Port           int32 `json:"port" yaml:"port" env:"PORT"` // The port for the gRPC listener (default: 8080)
	MaxRecvMsgSize int   `json:"maxRecvMsgSize" yaml:"maxRecvMsgSize" env:"MAXRECVMSGSIZE"`
	MaxSendMsgSize int   `json:"maxSendMsgSize" yaml:"maxSendMsgSize" env:"MAXSENDMSGSIZE"`
}

GRPC represents the configuration for gRPC ingress

type K8s added in v1.1.12

type K8s struct {
	ProbePort int32 `json:"probePort" yaml:"probePort" env:"PROBEPORT"` // The port which is used for liveness and readiness probes (default: 8080)
}

type NATS added in v1.6.1

type NATS struct {
	Host  string        `json:"host" yaml:"host" env:"HOST"`
	Port  int32         `json:"port" yaml:"port" env:"PORT"`
	Split []SplitWriter `json:"split" yaml:"split" env:"SPLIT"`
}

NATS represents NATS consumer configuration

type Presto

type Presto struct {
	Port   int32  `json:"port" yaml:"port" env:"PORT"`
	Schema string `json:"schema" yaml:"schema" env:"SCHEMA"`
}

Presto represents the Presto configuration

type PubSubSink added in v1.2.5

type PubSubSink struct {
	BaseSink `yaml:",inline"`
	Project  string `json:"project" yaml:"project" env:"PROJECT"`
	Topic    string `json:"topic" yaml:"topic" env:"TOPIC"`
}

PubSubSink represents a stream to Google Pub/Sub

type Readers

type Readers struct {
	Presto *Presto `json:"presto" yaml:"presto" env:"PRESTO"`
}

Readers are ways to read the data

type S3SQS

type S3SQS struct {
	Region            string `json:"region" yaml:"region" env:"REGION"`
	Queue             string `json:"queue" yaml:"queue" env:"QUEUE"`
	WaitTimeout       int64  `json:"waitTimeout,omitempty" yaml:"waitTimeout" env:"WAITTIMEOUT"`                   // in seconds
	VisibilityTimeout int64  `json:"visibilityTimeout,omitempty" yaml:"visibilityTimeout" env:"VISIBILITYTIMEOUT"` // in seconds
	Retries           int    `json:"retries" yaml:"retries" env:"RETRIES"`
	ConcurrencyThread int    `json:"concurrencyThread" yaml:"concurrencyThread" env:"concurrencyThread"`
}

S3SQS represents the aws S3 SQS configuration

type S3Sink

type S3Sink struct {
	BaseSink    `yaml:",inline"`
	Region      string `json:"region" yaml:"region" env:"REGION"`                // The region of AWS bucket
	Bucket      string `json:"bucket" yaml:"bucket" env:"BUCKET"`                // The name of AWS bucket
	Prefix      string `json:"prefix" yaml:"prefix" env:"PREFIX"`                // The prefix to add
	Endpoint    string `json:"endpoint" yaml:"endpoint" env:"ENDPOINT"`          // The custom endpoint to use
	SSE         string `json:"sse" yaml:"sse" env:"SSE"`                         // The server side encryption to use
	AccessKey   string `json:"accessKey" yaml:"accessKey" env:"ACCESSKEY"`       // The optional static access key
	SecretKey   string `json:"secretKey" yaml:"secretKey" env:"SECRETKEY"`       // The optional static secret key
	Concurrency int    `json:"concurrency" yaml:"concurrency" env:"CONCURRENCY"` // The S3 upload concurrency
}

S3Sink represents a sink for AWS S3 and compatible stores.

type Sink added in v1.6.0

type Sink struct {
	Console  *ConsoleSink  `json:"console" yaml:"console"`
	S3       *S3Sink       `json:"s3" yaml:"s3"`              // The S3 writer configuration
	Azure    *AzureSink    `json:"azure" yaml:"azure"`        // The Azure writer configuration
	BigQuery *BigQuerySink `json:"bigquery" yaml:"bigquery" ` // The Big Query writer configuration
	GCS      *GCSSink      `json:"gcs" yaml:"gcs" `           // The Google Cloud Storage writer configuration
	File     *FileSink     `json:"file" yaml:"file" `         // The local file system writer configuration
	Talaria  *TalariaSink  `json:"talaria" yaml:"talaria" `   // The Talaria writer configuration
	PubSub   *PubSubSink   `json:"pubsub" yaml:"pubsub" `     // The Google Pub/Sub writer configuration
}

Sinks represents a configuration for writer sinks

type SplitWriter added in v1.6.1

type SplitWriter struct {
	Subject    string `json:"subject" yaml:"subject" env:"SUBJECT"`
	Table      string `json:"table" yaml:"table" env:"TABLE"`
	QueueGroup string `json:"queueGroup" yaml:"queueGroup" env:"QUEUE_GROUP"`
}

type StatsD

type StatsD struct {
	Host string `json:"host" yaml:"host" env:"HOST"`
	Port int64  `json:"port" port:"port" env:"PORT"`
}

StatsD represents the configuration for statsD client

type Storage

type Storage struct {
	Badger
	Directory string `json:"dir" yaml:"dir" env:"DIR"`
}

Storage is the location to write the data

type Streams added in v1.2.5

type Streams []Sink

Streams are lists of sinks to be streamed to

type Table added in v1.2.5

type Table struct {
	TTL     int64       `json:"ttl,omitempty" yaml:"ttl" env:"TTL"`          // The ttl (in seconds) for the storage, defaults to 1 hour.
	HashBy  string      `json:"hashBy,omitempty" yaml:"hashBy" env:"HASHBY"` // The column to use as key (metric), defaults to 'event'.
	SortBy  string      `json:"sortBy,omitempty" yaml:"sortBy" env:"SORTBY"` // The column to use as time, defaults to 'tsi'.
	Schema  string      `json:"schema" yaml:"schema" env:"SCHEMA"`           // The schema of the table
	Compact *Compaction `json:"compact" yaml:"compact" env:"COMPACT"`        // The compaction configuration for the table
	Streams Streams     `json:"streams" yaml:"streams" env:"STREAMS"`        // The streams to stream data to for data in this table
}

Table is the config for the timeseries table

type Tables

type Tables map[string]Table

Tables is a list of table configs

type TalariaSink added in v1.2.5

type TalariaSink struct {
	BaseSink              `yaml:",inline"`
	Endpoint              string         `json:"endpoint" yaml:"endpoint" env:"ENDPOINT"`                               // The second Talaria endpoint
	CircuitTimeout        *time.Duration `json:"timeout" yaml:"timeout" env:"TIMEOUT"`                                  // The timeout (in seconds) for requests to the second Talaria
	MaxConcurrent         *int           `json:"concurrency" yaml:"concurrency" env:"CONCURRENCY"`                      // The number of concurrent requests permissible
	ErrorPercentThreshold *int           `json:"errorThreshold" yaml:"errorThreshold" env:"ERROR_THRESHOLD"`            // The percentage of failed requests tolerated
	MaxCallRecvMsgSize    *int           `json:"maxCallRecvMsgSize" yaml:"maxCallRecvMsgSize" env:"MAXCALLRECVMSGSIZE"` // The Max message size of gprc call per server response
	MaxCallSendMsgSize    *int           `json:"maxCallSendMsgSize" yaml:"maxCallSendMsgSize" env:"MAXCALLSENDMSGSIZE"` // The Max message size of gprc call per client request
}

TalariaSink represents a sink to an instance of Talaria

type Writers

type Writers struct {
	GRPC  *GRPC  `json:"grpc,omitempty" yaml:"grpc" env:"GRPC"`    // The GRPC ingress
	S3SQS *S3SQS `json:"s3sqs,omitempty" yaml:"s3sqs" env:"S3SQS"` // The S3SQS ingress
	NATS  *NATS  `json:"nats,omitempty" yaml:"nats" env:"NATS"`    // The NATS ingress
}

Writers are sources to write data

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL