Documentation ¶
Overview ¶
A data pipeline processing engine.
See the README for more complete examples and guides.
Code Organization:
The pipeline package provides an API for how nodes can be connected to form a pipeline. The individual implementations of each node exist in this kapacitor package. The reason for the separation is to keep the exported API from the pipeline package clean as it is consumed via the TICKscripts (a DSL for Kapacitor).
Other Concepts:
Stream vs Batch -- Use of the word 'stream' indicates data arrives a single data point at a time. Use of the word 'batch' indicates data arrives in sets or batches or data points.
Task -- A task represents a concrete workload to perform. It consists of a pipeline and an identifying name. Basic CRUD operations can be performed on tasks.
Task Master -- Responsible for executing a task in a specific environment.
Replay -- Replays static datasets against tasks.
Index ¶
- Constants
- Variables
- func ConvertResultTimes(r *Result)
- func CreateDBRPMap(dbrps []DBRP) map[DBRP]bool
- func DeleteStatistics(key string)
- func EvalPredicate(se stateful.Expression, scopePool stateful.ScopePool, now time.Time, ...) (bool, error)
- func NewStatistics(name string, tags map[string]string) (string, *kexpvar.Map)
- func ReplayBatchFromChan(clck clock.Clock, batches []<-chan models.Batch, collectors []BatchCollector, ...) <-chan error
- func ReplayBatchFromIO(clck clock.Clock, data []io.ReadCloser, collectors []BatchCollector, ...) <-chan error
- func ReplayStreamFromChan(clck clock.Clock, points <-chan models.Point, collector StreamCollector, ...) <-chan error
- func ReplayStreamFromIO(clck clock.Clock, data io.ReadCloser, collector StreamCollector, recTime bool, ...) <-chan error
- func Uptime() time.Duration
- func WriteBatchForRecording(w io.Writer, b models.Batch) error
- func WritePointForRecording(w io.Writer, p models.Point, precision string) error
- type AlertData
- type AlertHandler
- type AlertLevel
- type AlertNode
- type BatchCollector
- type BatchNode
- type BatchQueries
- type CombineNode
- type Command
- type CommandInfo
- type Commander
- type DBRP
- type DefaultNode
- type DeleteNode
- type DerivativeNode
- type Edge
- func (e *Edge) Abort()
- func (e *Edge) Close()
- func (e *Edge) CollectBatch(b models.Batch) error
- func (e *Edge) CollectPoint(p models.Point) error
- func (e *Edge) Next() (p models.PointInterface, ok bool)
- func (e *Edge) NextBatch() (b models.Batch, ok bool)
- func (e *Edge) NextPoint() (p models.Point, ok bool)
- type EvalNode
- type ExecutingTask
- func (et *ExecutingTask) BatchCount() (int, error)
- func (et *ExecutingTask) BatchQueries(start, stop time.Time) ([]BatchQueries, error)
- func (et *ExecutingTask) EDot(labels bool) []byte
- func (et *ExecutingTask) ExecutionStats() (ExecutionStats, error)
- func (et *ExecutingTask) GetOutput(name string) (Output, error)
- func (et *ExecutingTask) Snapshot() (*TaskSnapshot, error)
- func (et *ExecutingTask) StartBatching() error
- func (et *ExecutingTask) StopStats()
- func (et *ExecutingTask) Wait() error
- type ExecutionStats
- type FlattenNode
- type FromNode
- type GroupByNode
- type HTTPOutNode
- type InfluxDBOutNode
- type InfluxQLNode
- type JoinNode
- type LogNode
- type LogService
- type MaxDuration
- type NoOpNode
- type Node
- type NoopMetaClient
- func (m *NoopMetaClient) Authenticate(username, password string) (ui *meta.UserInfo, err error)
- func (m *NoopMetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error)
- func (m *NoopMetaClient) CreateDatabaseWithRetentionPolicy(name string, rpi *meta.RetentionPolicyInfo) (*meta.DatabaseInfo, error)
- func (m *NoopMetaClient) CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
- func (m *NoopMetaClient) Database(name string) *meta.DatabaseInfo
- func (m *NoopMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error)
- func (m *NoopMetaClient) Users() ([]meta.UserInfo, error)
- func (m *NoopMetaClient) WaitForLeader(d time.Duration) error
- type Output
- type Query
- type QueryNode
- type Result
- type SampleNode
- type ShiftNode
- type Socket
- type StatsData
- type StatsNode
- type StreamCollector
- type StreamNode
- type Task
- type TaskMaster
- func (tm *TaskMaster) BatchCollectors(id string) []BatchCollector
- func (tm *TaskMaster) Close() error
- func (tm *TaskMaster) CreateTICKScope() *stateful.Scope
- func (tm *TaskMaster) DelFork(id string)
- func (tm *TaskMaster) Drain()
- func (tm *TaskMaster) ExecutingDot(id string, labels bool) string
- func (tm *TaskMaster) ExecutionStats(id string) (ExecutionStats, error)
- func (tm *TaskMaster) IsExecuting(id string) bool
- func (tm *TaskMaster) New(id string) *TaskMaster
- func (tm *TaskMaster) NewFork(taskName string, dbrps []DBRP, measurements []string) (*Edge, error)
- func (tm *TaskMaster) NewTask(id, script string, tt TaskType, dbrps []DBRP, snapshotInterval time.Duration, ...) (*Task, error)
- func (tm *TaskMaster) NewTemplate(id, script string, tt TaskType) (*Template, error)
- func (tm *TaskMaster) Open() (err error)
- func (tm *TaskMaster) SnapshotTask(id string) (*TaskSnapshot, error)
- func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error)
- func (tm *TaskMaster) StopTask(id string) error
- func (tm *TaskMaster) StopTasks()
- func (tm *TaskMaster) Stream(name string) (StreamCollector, error)
- func (tm *TaskMaster) WritePoints(database, retentionPolicy string, consistencyLevel imodels.ConsistencyLevel, ...) error
- type TaskMasterLookup
- type TaskSnapshot
- type TaskType
- type Template
- type TimeDimension
- type UDFNode
- type UDFProcess
- func (p *UDFProcess) Abort(err error)
- func (p *UDFProcess) BatchIn() chan<- models.Batch
- func (p *UDFProcess) BatchOut() <-chan models.Batch
- func (p *UDFProcess) Close() error
- func (p *UDFProcess) Info() (udf.Info, error)
- func (p *UDFProcess) Init(options []*udf.Option) error
- func (p *UDFProcess) Open() error
- func (p *UDFProcess) PointIn() chan<- models.Point
- func (p *UDFProcess) PointOut() <-chan models.Point
- func (p *UDFProcess) Restore(snapshot []byte) error
- func (p *UDFProcess) Snapshot() ([]byte, error)
- type UDFService
- type UDFSocket
- func (s *UDFSocket) Abort(err error)
- func (s *UDFSocket) BatchIn() chan<- models.Batch
- func (s *UDFSocket) BatchOut() <-chan models.Batch
- func (s *UDFSocket) Close() error
- func (s *UDFSocket) Info() (udf.Info, error)
- func (s *UDFSocket) Init(options []*udf.Option) error
- func (s *UDFSocket) Open() error
- func (s *UDFSocket) PointIn() chan<- models.Point
- func (s *UDFSocket) PointOut() <-chan models.Point
- func (s *UDFSocket) Restore(snapshot []byte) error
- func (s *UDFSocket) Snapshot() ([]byte, error)
- type UnionNode
- type WhereNode
- type WindowNode
Constants ¶
const ( // List of names for top-level exported vars ClusterIDVarName = "cluster_id" ServerIDVarName = "server_id" HostVarName = "host" ProductVarName = "product" VersionVarName = "version" NumTasksVarName = "num_tasks" NumEnabledTasksVarName = "num_enabled_tasks" NumSubscriptionsVarName = "num_subscriptions" UptimeVarName = "uptime" // The name of the product Product = "kapacitor" )
const (
MainTaskMaster = "main"
)
Variables ¶
var ( // Global expvars NumTasksVar = &kexpvar.Int{} NumEnabledTasksVar = &kexpvar.Int{} NumSubscriptionsVar = &kexpvar.Int{} ClusterIDVar = &kexpvar.String{} ServerIDVar = &kexpvar.String{} HostVar = &kexpvar.String{} ProductVar = &kexpvar.String{} VersionVar = &kexpvar.String{} )
var ErrAborted = errors.New("edged aborted")
var ErrEmptyEmit = errors.New("error call to emit produced no results")
var ErrTaskMasterClosed = errors.New("TaskMaster is closed")
var ErrTaskMasterOpen = errors.New("TaskMaster is open")
var ErrWrongTaskType = errors.New("wrong task type")
Functions ¶
func ConvertResultTimes ¶ added in v0.10.1
func ConvertResultTimes(r *Result)
func CreateDBRPMap ¶
func EvalPredicate ¶
func EvalPredicate(se stateful.Expression, scopePool stateful.ScopePool, now time.Time, fields models.Fields, tags models.Tags) (bool, error)
EvalPredicate - Evaluate a given expression as a boolean predicate against a set of fields and tags
func NewStatistics ¶
NewStatistics creates an expvar-based map. Within there "name" is the Measurement name, "tags" are the tags, and values are placed at the key "values". The "values" map is returned so that statistics can be set.
func ReplayBatchFromChan ¶ added in v1.0.0
func ReplayBatchFromChan(clck clock.Clock, batches []<-chan models.Batch, collectors []BatchCollector, recTime bool) <-chan error
Replay batch data from a channel source.
func ReplayBatchFromIO ¶ added in v1.0.0
func ReplayBatchFromIO(clck clock.Clock, data []io.ReadCloser, collectors []BatchCollector, recTime bool) <-chan error
Replay batch data from an IO source.
func ReplayStreamFromChan ¶ added in v1.0.0
func ReplayStreamFromChan(clck clock.Clock, points <-chan models.Point, collector StreamCollector, recTime bool) <-chan error
Replay stream data from a channel source.
func ReplayStreamFromIO ¶ added in v1.0.0
func ReplayStreamFromIO(clck clock.Clock, data io.ReadCloser, collector StreamCollector, recTime bool, precision string) <-chan error
Replay stream data from an IO source.
Types ¶
type AlertHandler ¶
type AlertHandler func(ad *AlertData)
type AlertLevel ¶
type AlertLevel int
const ( OKAlert AlertLevel = iota InfoAlert WarnAlert CritAlert )
func (AlertLevel) MarshalText ¶
func (l AlertLevel) MarshalText() ([]byte, error)
func (AlertLevel) String ¶
func (l AlertLevel) String() string
func (*AlertLevel) UnmarshalText ¶ added in v0.10.1
func (l *AlertLevel) UnmarshalText(text []byte) error
type BatchCollector ¶
type BatchNode ¶
type BatchNode struct {
// contains filtered or unexported fields
}
type BatchQueries ¶ added in v1.0.0
type CombineNode ¶ added in v1.0.0
type CombineNode struct {
// contains filtered or unexported fields
}
type CommandInfo ¶ added in v0.13.0
Necessary information to create a new command
func (CommandInfo) NewCommand ¶ added in v0.13.0
func (ci CommandInfo) NewCommand() Command
Create a new Command using golang exec package and the information.
type DefaultNode ¶ added in v0.13.0
type DefaultNode struct {
// contains filtered or unexported fields
}
type DeleteNode ¶ added in v1.0.0
type DeleteNode struct {
// contains filtered or unexported fields
}
type DerivativeNode ¶
type DerivativeNode struct {
// contains filtered or unexported fields
}
type Edge ¶
type Edge struct {
// contains filtered or unexported fields
}
func (*Edge) Abort ¶ added in v0.2.1
func (e *Edge) Abort()
Abort all next and collect calls. Items in flight may or may not be processed.
type ExecutingTask ¶
type ExecutingTask struct { Task *Task // contains filtered or unexported fields }
A task that is ready for execution.
func NewExecutingTask ¶
func NewExecutingTask(tm *TaskMaster, t *Task) (*ExecutingTask, error)
Create a new task from a defined kapacitor.
func (*ExecutingTask) BatchCount ¶
func (et *ExecutingTask) BatchCount() (int, error)
func (*ExecutingTask) BatchQueries ¶
func (et *ExecutingTask) BatchQueries(start, stop time.Time) ([]BatchQueries, error)
Get the next `num` batch queries that the batcher will run starting at time `start`.
func (*ExecutingTask) EDot ¶ added in v0.2.3
func (et *ExecutingTask) EDot(labels bool) []byte
Return a graphviz .dot formatted byte array. Label edges with relavant execution information.
func (*ExecutingTask) ExecutionStats ¶ added in v0.11.0
func (et *ExecutingTask) ExecutionStats() (ExecutionStats, error)
func (*ExecutingTask) GetOutput ¶
func (et *ExecutingTask) GetOutput(name string) (Output, error)
Get a named output.
func (*ExecutingTask) Snapshot ¶ added in v0.10.0
func (et *ExecutingTask) Snapshot() (*TaskSnapshot, error)
func (*ExecutingTask) StartBatching ¶
func (et *ExecutingTask) StartBatching() error
Instruct source batch node to start querying and sending batches of data
func (*ExecutingTask) StopStats ¶ added in v0.11.0
func (et *ExecutingTask) StopStats()
Stop all stats nodes
func (*ExecutingTask) Wait ¶ added in v0.11.0
func (et *ExecutingTask) Wait() error
Wait till the task finishes and return any error
type ExecutionStats ¶ added in v0.11.0
type FlattenNode ¶ added in v1.0.0
type FlattenNode struct {
// contains filtered or unexported fields
}
type GroupByNode ¶
type GroupByNode struct {
// contains filtered or unexported fields
}
type HTTPOutNode ¶
type HTTPOutNode struct {
// contains filtered or unexported fields
}
func (*HTTPOutNode) Endpoint ¶
func (h *HTTPOutNode) Endpoint() string
type InfluxDBOutNode ¶
type InfluxDBOutNode struct {
// contains filtered or unexported fields
}
type InfluxQLNode ¶ added in v0.11.0
type InfluxQLNode struct {
// contains filtered or unexported fields
}
type MaxDuration ¶ added in v0.11.0
type MaxDuration struct {
// contains filtered or unexported fields
}
MaxDuration is a 64-bit int variable representing a duration in nanoseconds,that satisfies the expvar.Var interface. When setting a value it will only be set if it is greater than the current value.
func (*MaxDuration) IntValue ¶ added in v0.11.0
func (v *MaxDuration) IntValue() int64
func (*MaxDuration) Set ¶ added in v0.11.0
func (v *MaxDuration) Set(next int64)
Set sets value if it is greater than current value. If set was successful and a setter exists, will pass on value to setter.
func (*MaxDuration) String ¶ added in v0.11.0
func (v *MaxDuration) String() string
func (*MaxDuration) StringValue ¶ added in v1.0.0
func (v *MaxDuration) StringValue() string
type Node ¶
type Node interface { pipeline.Node // wait for the node to finish processing and return any errors Wait() error // contains filtered or unexported methods }
A node that can be in an executor.
type NoopMetaClient ¶ added in v1.0.0
type NoopMetaClient struct{}
func (*NoopMetaClient) Authenticate ¶ added in v1.0.0
func (m *NoopMetaClient) Authenticate(username, password string) (ui *meta.UserInfo, err error)
func (*NoopMetaClient) CreateDatabase ¶ added in v1.0.0
func (m *NoopMetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error)
func (*NoopMetaClient) CreateDatabaseWithRetentionPolicy ¶ added in v1.0.0
func (m *NoopMetaClient) CreateDatabaseWithRetentionPolicy(name string, rpi *meta.RetentionPolicyInfo) (*meta.DatabaseInfo, error)
func (*NoopMetaClient) CreateRetentionPolicy ¶ added in v1.0.0
func (m *NoopMetaClient) CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
func (*NoopMetaClient) Database ¶ added in v1.0.0
func (m *NoopMetaClient) Database(name string) *meta.DatabaseInfo
func (*NoopMetaClient) RetentionPolicy ¶ added in v1.0.0
func (m *NoopMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error)
func (*NoopMetaClient) Users ¶ added in v1.0.0
func (m *NoopMetaClient) Users() ([]meta.UserInfo, error)
func (*NoopMetaClient) WaitForLeader ¶ added in v1.0.0
func (m *NoopMetaClient) WaitForLeader(d time.Duration) error
type Output ¶
type Output interface {
Endpoint() string
}
An output of a pipeline. Still need to improve this interface to expose different types of outputs.
type Query ¶
type Query struct {
// contains filtered or unexported fields
}
func (*Query) Dimensions ¶
Set the dimensions on the query
func (*Query) Fill ¶
func (q *Query) Fill(option influxql.FillOption, value interface{})
type QueryNode ¶ added in v0.13.0
type QueryNode struct {
// contains filtered or unexported fields
}
func (*QueryNode) DBRPs ¶ added in v0.13.0
Return list of databases and retention policies the batcher will query.
func (*QueryNode) GroupByMeasurement ¶ added in v1.0.0
type SampleNode ¶
type SampleNode struct {
// contains filtered or unexported fields
}
type ShiftNode ¶ added in v0.11.0
type ShiftNode struct {
// contains filtered or unexported fields
}
type Socket ¶ added in v0.13.0
func NewSocketConn ¶ added in v1.0.0
type StatsData ¶ added in v0.2.1
type StatsData struct { Name string `json:"name"` Tags map[string]string `json:"tags"` Values map[string]interface{} `json:"values"` }
func GetStatsData ¶
Return all stats data from the expvars.
type StatsNode ¶ added in v0.10.0
type StatsNode struct {
// contains filtered or unexported fields
}
type StreamCollector ¶
type StreamNode ¶
type StreamNode struct {
// contains filtered or unexported fields
}
type Task ¶
type Task struct { ID string Pipeline *pipeline.Pipeline Type TaskType DBRPs []DBRP SnapshotInterval time.Duration }
The complete definition of a task, its id, pipeline and type.
func (*Task) Measurements ¶ added in v0.13.0
returns all the measurements from a FromNode
type TaskMaster ¶
type TaskMaster struct { HTTPDService interface { AddRoutes([]httpd.Route) error DelRoutes([]httpd.Route) URL() string } TaskStore interface { SaveSnapshot(id string, snapshot *TaskSnapshot) error HasSnapshot(id string) bool LoadSnapshot(id string) (*TaskSnapshot, error) } DeadmanService pipeline.DeadmanService UDFService UDFService InfluxDBService interface { NewDefaultClient() (client.Client, error) NewNamedClient(name string) (client.Client, error) } SMTPService interface { Global() bool StateChangesOnly() bool SendMail(to []string, subject string, msg string) error } OpsGenieService interface { Global() bool Alert(teams []string, recipients []string, messageType, message, entityID string, t time.Time, details interface{}) error } VictorOpsService interface { Global() bool Alert(routingKey, messageType, message, entityID string, t time.Time, extra interface{}) error } PagerDutyService interface { Global() bool Alert(serviceKey, incidentKey, desc string, level AlertLevel, details interface{}) error } SlackService interface { Global() bool StateChangesOnly() bool Alert(channel, message string, level AlertLevel) error } TelegramService interface { Global() bool StateChangesOnly() bool Alert(chatId, parseMode, message string, disableWebPagePreview, disableNotification bool) error } HipChatService interface { Global() bool StateChangesOnly() bool Alert(room, token, message string, level AlertLevel) error } AlertaService interface { Alert(token, resource, event, environment, severity, group, value, message, origin string, service []string, data interface{}) error } SensuService interface { Alert(name, output string, level AlertLevel) error } TalkService interface { Alert(title, text string) error } TimingService interface { NewTimer(timer.Setter) timer.Timer } LogService LogService // contains filtered or unexported fields }
An execution framework for a set of tasks.
func NewTaskMaster ¶
func NewTaskMaster(id string, l LogService) *TaskMaster
Create a new Executor with a given clock.
func (*TaskMaster) BatchCollectors ¶
func (tm *TaskMaster) BatchCollectors(id string) []BatchCollector
func (*TaskMaster) Close ¶
func (tm *TaskMaster) Close() error
func (*TaskMaster) CreateTICKScope ¶ added in v0.10.0
func (tm *TaskMaster) CreateTICKScope() *stateful.Scope
func (*TaskMaster) DelFork ¶
func (tm *TaskMaster) DelFork(id string)
func (*TaskMaster) Drain ¶ added in v0.2.1
func (tm *TaskMaster) Drain()
func (*TaskMaster) ExecutingDot ¶ added in v0.2.3
func (tm *TaskMaster) ExecutingDot(id string, labels bool) string
func (*TaskMaster) ExecutionStats ¶ added in v0.11.0
func (tm *TaskMaster) ExecutionStats(id string) (ExecutionStats, error)
func (*TaskMaster) IsExecuting ¶ added in v0.2.1
func (tm *TaskMaster) IsExecuting(id string) bool
func (*TaskMaster) New ¶
func (tm *TaskMaster) New(id string) *TaskMaster
Returns a new TaskMaster instance with the same services as the current one.
func (*TaskMaster) NewTask ¶ added in v0.10.0
func (tm *TaskMaster) NewTask( id, script string, tt TaskType, dbrps []DBRP, snapshotInterval time.Duration, vars map[string]tick.Var, ) (*Task, error)
Create a new task in the context of a TaskMaster
func (*TaskMaster) NewTemplate ¶ added in v1.0.0
func (tm *TaskMaster) NewTemplate( id, script string, tt TaskType, ) (*Template, error)
Create a new template in the context of a TaskMaster
func (*TaskMaster) Open ¶
func (tm *TaskMaster) Open() (err error)
func (*TaskMaster) SnapshotTask ¶ added in v0.10.0
func (tm *TaskMaster) SnapshotTask(id string) (*TaskSnapshot, error)
func (*TaskMaster) StartTask ¶
func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error)
func (*TaskMaster) StopTask ¶
func (tm *TaskMaster) StopTask(id string) error
func (*TaskMaster) StopTasks ¶ added in v0.10.1
func (tm *TaskMaster) StopTasks()
func (*TaskMaster) Stream ¶
func (tm *TaskMaster) Stream(name string) (StreamCollector, error)
func (*TaskMaster) WritePoints ¶ added in v0.2.1
func (tm *TaskMaster) WritePoints(database, retentionPolicy string, consistencyLevel imodels.ConsistencyLevel, points []imodels.Point) error
type TaskMasterLookup ¶ added in v1.0.0
func NewTaskMasterLookup ¶ added in v1.0.0
func NewTaskMasterLookup() *TaskMasterLookup
func (*TaskMasterLookup) Delete ¶ added in v1.0.0
func (tml *TaskMasterLookup) Delete(tm *TaskMaster)
func (*TaskMasterLookup) Get ¶ added in v1.0.0
func (tml *TaskMasterLookup) Get(id string) *TaskMaster
func (*TaskMasterLookup) Main ¶ added in v1.0.0
func (tml *TaskMasterLookup) Main() *TaskMaster
func (*TaskMasterLookup) Set ¶ added in v1.0.0
func (tml *TaskMasterLookup) Set(tm *TaskMaster)
type TaskSnapshot ¶ added in v0.10.0
type TaskType ¶
type TaskType int
The type of a task
func (TaskType) MarshalText ¶ added in v0.13.0
func (*TaskType) UnmarshalText ¶ added in v0.13.0
type TimeDimension ¶ added in v1.0.0
type UDFNode ¶ added in v0.10.0
type UDFNode struct {
// contains filtered or unexported fields
}
User defined function
type UDFProcess ¶ added in v0.10.0
type UDFProcess struct {
// contains filtered or unexported fields
}
UDFProcess wraps an external process and sends and receives data over STDIN and STDOUT. Lines received over STDERR are logged via normal Kapacitor logging.
func NewUDFProcess ¶ added in v0.10.0
func (*UDFProcess) Abort ¶ added in v0.10.0
func (p *UDFProcess) Abort(err error)
func (*UDFProcess) BatchIn ¶ added in v0.10.0
func (p *UDFProcess) BatchIn() chan<- models.Batch
func (*UDFProcess) BatchOut ¶ added in v0.10.0
func (p *UDFProcess) BatchOut() <-chan models.Batch
func (*UDFProcess) Close ¶ added in v0.13.0
func (p *UDFProcess) Close() error
Stop the UDFProcess cleanly.
Calling Close should only be done once the owner has stopped writing to the *In channel, at which point the remaining data will be processed and the subprocess will be allowed to exit cleanly.
func (*UDFProcess) PointIn ¶ added in v0.10.0
func (p *UDFProcess) PointIn() chan<- models.Point
func (*UDFProcess) PointOut ¶ added in v0.10.0
func (p *UDFProcess) PointOut() <-chan models.Point
func (*UDFProcess) Restore ¶ added in v0.10.0
func (p *UDFProcess) Restore(snapshot []byte) error
func (*UDFProcess) Snapshot ¶ added in v0.10.0
func (p *UDFProcess) Snapshot() ([]byte, error)
type UDFService ¶ added in v0.10.0
type UDFSocket ¶ added in v0.13.0
type UDFSocket struct {
// contains filtered or unexported fields
}
func NewUDFSocket ¶ added in v0.13.0
type WindowNode ¶
type WindowNode struct {
// contains filtered or unexported fields
}
Source Files ¶
- alert.go
- batch.go
- combine.go
- default.go
- delete.go
- derivative.go
- doc.go
- edge.go
- eval.go
- expr.go
- flatten.go
- global_stats.go
- group_by.go
- http_out.go
- influxdb_out.go
- influxql.gen.go
- influxql.go
- join.go
- log.go
- metaclient.go
- node.go
- noop.go
- output.go
- query.go
- replay.go
- result.go
- sample.go
- shift.go
- stats.go
- stream.go
- task.go
- task_master.go
- template.go
- udf.go
- union.go
- where.go
- window.go
Directories ¶
Path | Synopsis |
---|---|
client
|
|
v1
Kapacitor HTTP API client written in Go
|
Kapacitor HTTP API client written in Go |
A clock that provides blocking calls that wait until absolute times have occurred.
|
A clock that provides blocking calls that wait until absolute times have occurred. |
cmd
|
|
This package is a fork of the golang expvar expvar.Var types.
|
This package is a fork of the golang expvar expvar.Var types. |
Contains integration and end-to-end tests
|
Contains integration and end-to-end tests |
Provides a set of structures for passing data around Kapacitor.
|
Provides a set of structures for passing data around Kapacitor. |
Provides an API for constructing data processing pipelines.
|
Provides an API for constructing data processing pipelines. |
Provides a server type for starting and configuring a Kapacitor server.
|
Provides a server type for starting and configuring a Kapacitor server. |
services
|
|
httpd
Provides an HTTP API exposing many components of Kapacitor.
|
Provides an HTTP API exposing many components of Kapacitor. |
reporting
Sends anonymous reports to InfluxData
|
Sends anonymous reports to InfluxData |
stats
The stats service collects the exported stats and submits them to the Kapacitor stream under the configured database and retetion policy.
|
The stats service collects the exported stats and submits them to the Kapacitor stream under the configured database and retetion policy. |
storage
The storage package provides a key/value based interface for storing Kapacitor metadata.
|
The storage package provides a key/value based interface for storing Kapacitor metadata. |
TICKscript is a simple invocation chaining DSL.
|
TICKscript is a simple invocation chaining DSL. |
cmd/tickdoc
Tickdoc is a simple utility similar to godoc that generates documentation from comments.
|
Tickdoc is a simple utility similar to godoc that generates documentation from comments. |
Package udf is a generated protocol buffer package.
|
Package udf is a generated protocol buffer package. |