Documentation ¶
Index ¶
- Variables
- func CloneSet(in map[string]struct{}) map[string]struct{}
- func EnqueueEnhancedTelem[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](ch chan<- T, data T)
- func GetValidatedBootstrapPeers(specPeers []string, configPeers []commontypes.BootstrapperLocator) ([]commontypes.BootstrapperLocator, error)
- func MaybeEnqueueEnhancedTelem(jb job.Job, ch chan<- EnhancedTelemetryMercuryData, ...)
- func NewDataSourceV1(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, ...) ocr1types.DataSource
- func NewDataSourceV2(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, ...) median.DataSource
- func NewInMemoryDataSource(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger) median.DataSource
- func ParseBootstrapPeers(peers []string) (bootstrapPeers []commontypes.BootstrapperLocator, err error)
- func ShouldCollectEnhancedTelemetry(job *job.Job) bool
- func ShouldCollectEnhancedTelemetryMercury(jb job.Job) bool
- func ValidateExplicitlySetKeys(tree *toml.Tree, expected map[string]struct{}, notExpected map[string]struct{}, ...) error
- func ValidatePeerWrapperConfig(config config.P2P) error
- type ArbitrumBlockTranslator
- type BlockTranslator
- type Config
- type DiscovererDatabase
- type EnhancedTelemetryData
- type EnhancedTelemetryMercuryData
- type EnhancedTelemetryService
- type ObservationTimestamp
- type P2PPeer
- type PeerWrapperOCRConfig
- type Pstorewrapper
- type RunResultSaver
- type Saver
- type SingletonPeerWrapper
- func (p *SingletonPeerWrapper) Close() error
- func (p *SingletonPeerWrapper) HealthReport() map[string]error
- func (p *SingletonPeerWrapper) IsStarted() bool
- func (p *SingletonPeerWrapper) Name() string
- func (p *SingletonPeerWrapper) P2PConfig() config.P2P
- func (p *SingletonPeerWrapper) Start(context.Context) error
- type Transmitter
Constants ¶
This section is empty.
Variables ¶
var ( PromBridgeJsonParseValues = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "bridge_json_parse_values", Help: "Values returned by json_parse for bridge task", }, []string{"job_id", "job_name", "bridge_name", "task_id"}) PromOcrMedianValues = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "ocr_median_values", Help: "Median value returned by ocr job", }, []string{"job_id", "job_name"}) )
Functions ¶
func EnqueueEnhancedTelem ¶ added in v2.2.0
func EnqueueEnhancedTelem[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](ch chan<- T, data T)
EnqueueEnhancedTelem sends data to the telemetry channel for processing
func GetValidatedBootstrapPeers ¶
func GetValidatedBootstrapPeers(specPeers []string, configPeers []commontypes.BootstrapperLocator) ([]commontypes.BootstrapperLocator, error)
GetValidatedBootstrapPeers will error unless at least one valid bootstrap peer is found
func MaybeEnqueueEnhancedTelem ¶ added in v2.8.0
func MaybeEnqueueEnhancedTelem(jb job.Job, ch chan<- EnhancedTelemetryMercuryData, data EnhancedTelemetryMercuryData)
MaybeEnqueueEnhancedTelem sends data to the telemetry channel for processing
func NewDataSourceV1 ¶
func NewDataSourceV2 ¶
func NewInMemoryDataSource ¶
func ParseBootstrapPeers ¶
func ParseBootstrapPeers(peers []string) (bootstrapPeers []commontypes.BootstrapperLocator, err error)
func ShouldCollectEnhancedTelemetry ¶ added in v2.2.0
ShouldCollectEnhancedTelemetry returns whether EA telemetry should be collected
func ShouldCollectEnhancedTelemetryMercury ¶ added in v2.2.0
ShouldCollectEnhancedTelemetryMercury checks if enhanced telemetry should be collected and sent
func ValidateExplicitlySetKeys ¶
func ValidateExplicitlySetKeys(tree *toml.Tree, expected map[string]struct{}, notExpected map[string]struct{}, peerType string) error
ValidateExplicitlySetKeys checks if the values in expected are present and the values in notExpected are not present in the toml tree. Works on top level keys only.
Types ¶
type ArbitrumBlockTranslator ¶
type ArbitrumBlockTranslator struct {
// contains filtered or unexported fields
}
ArbitrumBlockTranslator uses Arbitrum's special L1BlockNumber to optimise log lookups Performance matters here hence aggressive use of the cache We want to minimise fetches because calling eth_getBlockByNumber is relatively expensive
func NewArbitrumBlockTranslator ¶
func NewArbitrumBlockTranslator(ethClient evmclient.Client, lggr logger.Logger) *ArbitrumBlockTranslator
NewArbitrumBlockTranslator returns a concrete ArbitrumBlockTranslator
func (*ArbitrumBlockTranslator) BinarySearch ¶
func (a *ArbitrumBlockTranslator) BinarySearch(ctx context.Context, targetL1 int64) (l2lowerBound *big.Int, l2upperBound *big.Int, err error)
BinarySearch uses both cache and RPC calls to find the smallest possible range of L2 block numbers that encompasses the given L1 block number
Imagine as a virtual array of L1 block numbers indexed by L2 block numbers L1 values are likely duplicated so it looks something like [42, 42, 42, 42, 42, 155, 155, 155, 430, 430, 430, 430, 430, ...] Theoretical max difference between L1 values is typically about 5, "worst case" is 6545 but can be arbitrarily high if sequencer is broken The returned range of L2s from leftmost thru rightmost represent all possible L2s that correspond to the L1 value we are looking for nil can be returned as a rightmost value if the range has no upper bound
func (*ArbitrumBlockTranslator) NumberToQueryRange ¶
func (a *ArbitrumBlockTranslator) NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int)
NumberToQueryRange implements BlockTranslator interface
type BlockTranslator ¶
type BlockTranslator interface {
NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int)
}
BlockTranslator converts emitted block numbers (from block.number) into a block number range suitable for query in FilterLogs
func NewBlockTranslator ¶
NewBlockTranslator returns the block translator for the given chain
type DiscovererDatabase ¶
type DiscovererDatabase struct {
// contains filtered or unexported fields
}
func NewDiscovererDatabase ¶
func NewDiscovererDatabase(db *sql.DB, peerID p2ppeer.ID) *DiscovererDatabase
func (*DiscovererDatabase) ReadAnnouncements ¶
func (d *DiscovererDatabase) ReadAnnouncements(ctx context.Context, peerIDs []string) (map[string][]byte, error)
ReadAnnouncements returns one serialized announcement (if available) for each of the peerIDs in the form of a map keyed by each announcement's corresponding peer ID.
func (*DiscovererDatabase) StoreAnnouncement ¶
func (d *DiscovererDatabase) StoreAnnouncement(ctx context.Context, peerID string, ann []byte) error
StoreAnnouncement has key-value-store semantics and stores a peerID (key) and an associated serialized announcement (value).
type EnhancedTelemetryData ¶ added in v2.2.0
type EnhancedTelemetryData struct { TaskRunResults pipeline.TaskRunResults FinalResults pipeline.FinalResult RepTimestamp ObservationTimestamp }
type EnhancedTelemetryMercuryData ¶ added in v2.2.0
type EnhancedTelemetryMercuryData struct { V1Observation *relaymercuryv1.Observation V2Observation *relaymercuryv2.Observation V3Observation *relaymercuryv3.Observation TaskRunResults pipeline.TaskRunResults RepTimestamp ocrtypes.ReportTimestamp FeedVersion mercuryutils.FeedVersion FetchMaxFinalizedTimestamp bool IsLinkFeed bool IsNativeFeed bool }
type EnhancedTelemetryService ¶ added in v2.2.0
type EnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData] struct { services.StateMachine // contains filtered or unexported fields }
func NewEnhancedTelemetryService ¶ added in v2.2.0
func NewEnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](job *job.Job, chTelem <-chan T, done chan struct{}, me commontypes.MonitoringEndpoint, lggr logger.Logger) *EnhancedTelemetryService[T]
func (*EnhancedTelemetryService[T]) Close ¶ added in v2.2.0
func (e *EnhancedTelemetryService[T]) Close() error
type ObservationTimestamp ¶ added in v2.1.0
ObservationTimestamp abstracts ocr2types.ReportTimestamp and ocr1types.ReportTimestamp
type PeerWrapperOCRConfig ¶ added in v2.3.0
type PeerWrapperOCRConfig interface {
TraceLogging() bool
}
type Pstorewrapper ¶
type Pstorewrapper struct { services.StateMachine Peerstore p2ppeerstore.Peerstore // contains filtered or unexported fields }
func NewPeerstoreWrapper ¶
func NewPeerstoreWrapper(db *sqlx.DB, writeInterval time.Duration, peerID p2pkey.PeerID, lggr logger.Logger, cfg pg.QConfig) (*Pstorewrapper, error)
NewPeerstoreWrapper creates a new database-backed peerstore wrapper scoped to the given jobID Multiple peerstore wrappers should not be instantiated with the same jobID
func (*Pstorewrapper) Close ¶
func (p *Pstorewrapper) Close() error
func (*Pstorewrapper) Start ¶
func (p *Pstorewrapper) Start() error
func (*Pstorewrapper) WriteToDB ¶
func (p *Pstorewrapper) WriteToDB() error
type RunResultSaver ¶
type RunResultSaver struct { services.StateMachine // contains filtered or unexported fields }
func NewResultRunSaver ¶
func (*RunResultSaver) Close ¶
func (r *RunResultSaver) Close() error
func (*RunResultSaver) HealthReport ¶ added in v2.6.0
func (r *RunResultSaver) HealthReport() map[string]error
func (*RunResultSaver) Name ¶ added in v2.6.0
func (r *RunResultSaver) Name() string
func (*RunResultSaver) Save ¶ added in v2.8.0
func (r *RunResultSaver) Save(run *pipeline.Run)
Save sends the run on the internal `runResults` channel for saving. IMPORTANT: if the `runResults` pipeline is full, the run will be dropped.
type SingletonPeerWrapper ¶
type SingletonPeerWrapper struct { services.StateMachine PeerID p2pkey.PeerID // OCR1 peer adapter Peer1 *peerAdapterOCR1 // OCR2 peer adapter Peer2 *peerAdapterOCR2 // contains filtered or unexported fields }
SingletonPeerWrapper manages all libocr peers for the application
func NewSingletonPeerWrapper ¶
func NewSingletonPeerWrapper(keyStore keystore.Master, p2pCfg config.P2P, ocrCfg PeerWrapperOCRConfig, dbConfig pg.QConfig, db *sqlx.DB, lggr logger.Logger) *SingletonPeerWrapper
NewSingletonPeerWrapper creates a new peer based on the p2p keys in the keystore It currently only supports one peerID/key It should be fairly easy to modify it to support multiple peerIDs/keys using e.g. a map
func (*SingletonPeerWrapper) Close ¶
func (p *SingletonPeerWrapper) Close() error
Close closes the peer and peerstore
func (*SingletonPeerWrapper) HealthReport ¶
func (p *SingletonPeerWrapper) HealthReport() map[string]error
func (*SingletonPeerWrapper) IsStarted ¶
func (p *SingletonPeerWrapper) IsStarted() bool
func (*SingletonPeerWrapper) Name ¶
func (p *SingletonPeerWrapper) Name() string
func (*SingletonPeerWrapper) P2PConfig ¶ added in v2.3.0
func (p *SingletonPeerWrapper) P2PConfig() config.P2P
type Transmitter ¶
type Transmitter interface { CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error FromAddress() common.Address }
func NewPipelineTransmitter ¶
func NewPipelineTransmitter( lgr logger.Logger, fromAddress common.Address, gasLimit uint32, effectiveTransmitterAddress common.Address, strategy types.TxStrategy, checker txmgr.TransmitCheckerSpec, pr pipeline.Runner, spec job.Job, chainID string, ) Transmitter
NewPipelineTransmitter creates a new eth transmitter using the job pipeline mechanism
func NewTransmitter ¶
func NewTransmitter( txm txManager, fromAddresses []common.Address, gasLimit uint32, effectiveTransmitterAddress common.Address, strategy types.TxStrategy, checker txmgr.TransmitCheckerSpec, chainID *big.Int, keystore roundRobinKeystore, ) (Transmitter, error)
NewTransmitter creates a new eth transmitter