Documentation ¶
Index ¶
- Variables
- func NewJaegerTraceProvider(serviceName, agentEndpoint string, sampleRatio float64) (*tracesdk.TracerProvider, error)
- func RecordCount(ctx context.Context, m *stats.Int64Measure, count int)
- func RecordDec(ctx context.Context, m *stats.Int64Measure)
- func RecordInc(ctx context.Context, m *stats.Int64Measure)
- func SinceInMilliseconds(startTime time.Time) float64
- func Timer(ctx context.Context, m *stats.Float64Measure) func()
- func WithTagValue(ctx context.Context, k tag.Key, v string) context.Context
Constants ¶
This section is empty.
Variables ¶
View Source
var (
Version, _ = tag.NewKey("version")
TaskType, _ = tag.NewKey("task") // name of task processor
Job, _ = tag.NewKey("job") // name of job
JobType, _ = tag.NewKey("job_type") // type of job (walk, watch, fill, find, watch-notify, walk-notify, etc.)
Name, _ = tag.NewKey("name") // name of running instance of visor
Table, _ = tag.NewKey("table") // name of table data is persisted for
ConnState, _ = tag.NewKey("conn_state")
API, _ = tag.NewKey("api") // name of method on lotus api
ActorCode, _ = tag.NewKey("actor_code") // human readable code of actor being processed
// distributed tipset worker
QueueName = tag.MustNewKey("queue")
)
View Source
var ( LilyInfo = stats.Int64("lily_info", "Arbitrary counter to tag lily info to", stats.UnitDimensionless) ProcessingDuration = stats.Float64("processing_duration_ms", "Time taken to process a task", stats.UnitMilliseconds) StateExtractionDuration = stats.Float64("state_extraction_duration_ms", "Time taken to extract an actor state", stats.UnitMilliseconds) PersistDuration = stats.Float64("persist_duration_ms", "Duration of a models persist operation", stats.UnitMilliseconds) PersistModel = stats.Int64("persist_model", "Number of models persisted", stats.UnitDimensionless) DBConns = stats.Int64("db_conns", "Database connections held", stats.UnitDimensionless) TipsetHeight = stats.Int64("tipset_height", "The height of the tipset being processed by a task", stats.UnitDimensionless) ProcessingFailure = stats.Int64("processing_failure", "Number of processing failures", stats.UnitDimensionless) PersistFailure = stats.Int64("persist_failure", "Number of persistence failures", stats.UnitDimensionless) WatchHeight = stats.Int64("watch_height", "The height of the tipset last seen by the watch command", stats.UnitDimensionless) TipSetSkip = stats.Int64("tipset_skip", "Number of tipsets that were not processed. This is is an indication that lily cannot keep up with chain.", stats.UnitDimensionless) JobStart = stats.Int64("job_start", "Number of jobs started", stats.UnitDimensionless) JobRunning = stats.Int64("job_running", "Numer of jobs currently running", stats.UnitDimensionless) JobComplete = stats.Int64("job_complete", "Number of jobs completed without error", stats.UnitDimensionless) JobError = stats.Int64("job_error", "Number of jobs stopped due to a fatal error", stats.UnitDimensionless) JobTimeout = stats.Int64("job_timeout", "Number of jobs stopped due to taking longer than expected", stats.UnitDimensionless) TipSetCacheSize = stats.Int64("tipset_cache_size", "Configured size of the tipset cache (aka confidence).", stats.UnitDimensionless) TipSetCacheDepth = stats.Int64("tipset_cache_depth", "Number of tipsets currently in the tipset cache.", stats.UnitDimensionless) TipSetCacheEmptyRevert = stats.Int64("tipset_cache_empty_revert", "Number of revert operations performed on an empty tipset cache. This is an indication that a chain reorg is underway that is deeper than the cache size and includes tipsets that have already been read from the cache.", stats.UnitDimensionless) WatcherActiveWorkers = stats.Int64("watcher_active_workers", "Current number of tipset indexers executing", stats.UnitDimensionless) WatcherWaitingWorkers = stats.Int64("watcher_waiting_workers", "Current number of tipset indexers waiting to execute", stats.UnitDimensionless) DataSourceSectorDiffCacheHit = stats.Int64("data_source_sector_diff_cache_hit", "Number of cache hits for sector diff", stats.UnitDimensionless) DataSourceSectorDiffRead = stats.Int64("data_source_sector_diff_read", "Number of reads for sector diff", stats.UnitDimensionless) DataSourcePreCommitDiffCacheHit = stats.Int64("data_source_precommit_diff_cache_hit", "Number of cache hits for precommit diff", stats.UnitDimensionless) DataSourcePreCommitDiffRead = stats.Int64("data_source_precommit_diff_read", "Number of reads for precommit diff", stats.UnitDimensionless) DataSourceMessageExecutionRead = stats.Int64("data_source_message_execution_read", "Number of reads for message executions", stats.UnitDimensionless) DataSourceMessageExecutionCacheHit = stats.Int64("data_source_message_execution_cache_hit", "Number of cache hits for message executions", stats.UnitDimensionless) DataSourceExecutedAndBlockMessagesRead = stats.Int64("data_source_executed_block_messages_read", "Number of reads for executed block messages", stats.UnitDimensionless) DataSourceExecutedAndBlockMessagesCacheHit = stats.Int64("data_source_executed_block_messages_cache_hig", "Number of cache hits for executed block messages", stats.UnitDimensionless) DataSourceActorStateChangesFastDiff = stats.Int64("data_source_actor_state_changes_fast_diff", "Number of fast diff operations performed for actor state changes", stats.UnitDimensionless) DataSourceActorStateChangesSlowDiff = stats.Int64("data_source_actor_state_changes_slow_diff", "Number of slow diff operations performed for actor state changes", stats.UnitDimensionless) TipSetWorkerConcurrency = stats.Int64("tipset_worker_concurrency", "Concurrency of tipset worker", stats.UnitDimensionless) TipSetWorkerQueuePriority = stats.Int64("tipset_worker_queue_priority", "Priority of tipset worker queue", stats.UnitDimensionless) )
View Source
var DefaultViews = []*view.View{ { Measure: LilyInfo, Aggregation: view.LastValue(), TagKeys: []tag.Key{Version}, }, { Measure: TipSetWorkerConcurrency, Aggregation: view.LastValue(), TagKeys: []tag.Key{}, }, { Measure: TipSetWorkerQueuePriority, Aggregation: view.LastValue(), TagKeys: []tag.Key{QueueName}, }, { Measure: DataSourceActorStateChangesFastDiff, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourceActorStateChangesSlowDiff, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourceMessageExecutionRead, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourceMessageExecutionCacheHit, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourceExecutedAndBlockMessagesRead, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourceExecutedAndBlockMessagesCacheHit, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourceSectorDiffCacheHit, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourceSectorDiffRead, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourcePreCommitDiffCacheHit, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: DataSourcePreCommitDiffRead, Aggregation: view.Count(), TagKeys: []tag.Key{Job, TaskType, Name}, }, { Measure: ProcessingDuration, Aggregation: defaultMillisecondsDistribution, TagKeys: []tag.Key{TaskType}, }, { Measure: StateExtractionDuration, Aggregation: defaultMillisecondsDistribution, TagKeys: []tag.Key{TaskType, ActorCode}, }, { Measure: PersistDuration, Aggregation: defaultMillisecondsDistribution, TagKeys: []tag.Key{TaskType, Table, ActorCode}, }, { Measure: DBConns, Aggregation: view.Count(), TagKeys: []tag.Key{ConnState}, }, { Measure: TipsetHeight, Aggregation: view.LastValue(), TagKeys: []tag.Key{TaskType, Job}, }, { Name: ProcessingFailure.Name() + "_total", Measure: ProcessingFailure, Aggregation: view.Count(), TagKeys: []tag.Key{TaskType, ActorCode}, }, { Name: PersistFailure.Name() + "_total", Measure: PersistFailure, Aggregation: view.Count(), TagKeys: []tag.Key{TaskType, Table, ActorCode}, }, { Measure: WatchHeight, Aggregation: view.LastValue(), TagKeys: []tag.Key{Job}, }, { Name: TipSetSkip.Name() + "_total", Measure: TipSetSkip, Aggregation: view.Sum(), TagKeys: []tag.Key{Job}, }, { Measure: JobRunning, Aggregation: view.Sum(), TagKeys: []tag.Key{Job, JobType}, }, { Name: JobStart.Name() + "_total", Measure: JobStart, Aggregation: view.Count(), TagKeys: []tag.Key{Job, JobType}, }, { Name: JobComplete.Name() + "_total", Measure: JobComplete, Aggregation: view.Count(), TagKeys: []tag.Key{Job, JobType}, }, { Name: JobError.Name() + "_total", Measure: JobError, Aggregation: view.Count(), TagKeys: []tag.Key{Job, JobType}, }, { Name: JobTimeout.Name() + "_total", Measure: JobTimeout, Aggregation: view.Count(), TagKeys: []tag.Key{Job, JobType}, }, { Name: PersistModel.Name() + "_total", Measure: PersistModel, Aggregation: view.Count(), TagKeys: []tag.Key{TaskType, Table}, }, { Measure: TipSetCacheSize, Aggregation: view.LastValue(), TagKeys: []tag.Key{Job}, }, { Measure: TipSetCacheDepth, Aggregation: view.LastValue(), TagKeys: []tag.Key{Job}, }, { Name: TipSetCacheEmptyRevert.Name() + "_total", Measure: TipSetCacheEmptyRevert, Aggregation: view.Sum(), TagKeys: []tag.Key{Job}, }, { Measure: WatcherActiveWorkers, Aggregation: view.LastValue(), TagKeys: []tag.Key{Job}, }, { Measure: WatcherWaitingWorkers, Aggregation: view.LastValue(), TagKeys: []tag.Key{Job}, }, }
Functions ¶
func NewJaegerTraceProvider ¶ added in v0.8.6
func NewJaegerTraceProvider(serviceName, agentEndpoint string, sampleRatio float64) (*tracesdk.TracerProvider, error)
NewJaegerTraceProvider returns a new and configured TracerProvider backed by Jaeger.
func RecordCount ¶
func RecordCount(ctx context.Context, m *stats.Int64Measure, count int)
RecordCount is a convenience function that increments a counter by a count.
func RecordDec ¶ added in v0.8.2
func RecordDec(ctx context.Context, m *stats.Int64Measure)
RecordDec is a convenience function that decrements a counter.
func RecordInc ¶
func RecordInc(ctx context.Context, m *stats.Int64Measure)
RecordInc is a convenience function that increments a counter.
func SinceInMilliseconds ¶
SinceInMilliseconds returns the duration of time since the provide time as a float64.
Types ¶
This section is empty.
Click to show internal directories.
Click to hide internal directories.