ocrcommon

package
v2.12.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 5, 2024 License: MIT Imports: 49 Imported by: 1

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	PromBridgeJsonParseValues = promauto.NewGaugeVec(prometheus.GaugeOpts{
		Name: "bridge_json_parse_values",
		Help: "Values returned by json_parse for bridge task",
	},
		[]string{"job_id", "job_name", "bridge_name", "task_id"})

	PromOcrMedianValues = promauto.NewGaugeVec(prometheus.GaugeOpts{
		Name: "ocr_median_values",
		Help: "Median value returned by ocr job",
	},
		[]string{"job_id", "job_name"})
)

Functions

func CloneSet

func CloneSet(in map[string]struct{}) map[string]struct{}

CloneSet returns a copy of the input map.

func EnqueueEnhancedTelem added in v2.2.0

func EnqueueEnhancedTelem[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](ch chan<- T, data T)

EnqueueEnhancedTelem sends data to the telemetry channel for processing

func GetValidatedBootstrapPeers

func GetValidatedBootstrapPeers(specPeers []string, configPeers []commontypes.BootstrapperLocator) ([]commontypes.BootstrapperLocator, error)

GetValidatedBootstrapPeers will error unless at least one valid bootstrap peer is found

func MaybeEnqueueEnhancedTelem added in v2.8.0

func MaybeEnqueueEnhancedTelem(jb job.Job, ch chan<- EnhancedTelemetryMercuryData, data EnhancedTelemetryMercuryData)

MaybeEnqueueEnhancedTelem sends data to the telemetry channel for processing

func NewDataSourceV1

func NewDataSourceV1(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, s Saver, chEnhancedTelemetry chan EnhancedTelemetryData) ocr1types.DataSource

func NewDataSourceV2

func NewDataSourceV2(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, s Saver, enhancedTelemChan chan EnhancedTelemetryData) median.DataSource

func NewInMemoryDataSource

func NewInMemoryDataSource(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger) median.DataSource

func NewOCRWrapper added in v2.12.0

func NewOCRWrapper(l logger.Logger, trace bool, saveError func(context.Context, string)) *ocrLoggerService

func ParseBootstrapPeers

func ParseBootstrapPeers(peers []string) (bootstrapPeers []commontypes.BootstrapperLocator, err error)

func ShouldCollectEnhancedTelemetry added in v2.2.0

func ShouldCollectEnhancedTelemetry(job *job.Job) bool

ShouldCollectEnhancedTelemetry returns whether EA telemetry should be collected

func ShouldCollectEnhancedTelemetryMercury added in v2.2.0

func ShouldCollectEnhancedTelemetryMercury(jb job.Job) bool

ShouldCollectEnhancedTelemetryMercury checks if enhanced telemetry should be collected and sent

func ValidateExplicitlySetKeys

func ValidateExplicitlySetKeys(tree *toml.Tree, expected map[string]struct{}, notExpected map[string]struct{}, peerType string) error

ValidateExplicitlySetKeys checks if the values in expected are present and the values in notExpected are not present in the toml tree. Works on top level keys only.

func ValidatePeerWrapperConfig

func ValidatePeerWrapperConfig(config config.P2P) error

Types

type ArbitrumBlockTranslator

type ArbitrumBlockTranslator struct {
	// contains filtered or unexported fields
}

ArbitrumBlockTranslator uses Arbitrum's special L1BlockNumber to optimise log lookups Performance matters here hence aggressive use of the cache We want to minimise fetches because calling eth_getBlockByNumber is relatively expensive

func NewArbitrumBlockTranslator

func NewArbitrumBlockTranslator(ethClient evmclient.Client, lggr logger.Logger) *ArbitrumBlockTranslator

NewArbitrumBlockTranslator returns a concrete ArbitrumBlockTranslator

func (*ArbitrumBlockTranslator) BinarySearch

func (a *ArbitrumBlockTranslator) BinarySearch(ctx context.Context, targetL1 int64) (l2lowerBound *big.Int, l2upperBound *big.Int, err error)

BinarySearch uses both cache and RPC calls to find the smallest possible range of L2 block numbers that encompasses the given L1 block number

Imagine as a virtual array of L1 block numbers indexed by L2 block numbers L1 values are likely duplicated so it looks something like [42, 42, 42, 42, 42, 155, 155, 155, 430, 430, 430, 430, 430, ...] Theoretical max difference between L1 values is typically about 5, "worst case" is 6545 but can be arbitrarily high if sequencer is broken The returned range of L2s from leftmost thru rightmost represent all possible L2s that correspond to the L1 value we are looking for nil can be returned as a rightmost value if the range has no upper bound

func (*ArbitrumBlockTranslator) NumberToQueryRange

func (a *ArbitrumBlockTranslator) NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int)

NumberToQueryRange implements BlockTranslator interface

type BlockTranslator

type BlockTranslator interface {
	NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int)
}

BlockTranslator converts emitted block numbers (from block.number) into a block number range suitable for query in FilterLogs

func NewBlockTranslator

func NewBlockTranslator(cfg Config, client evmclient.Client, lggr logger.Logger) BlockTranslator

NewBlockTranslator returns the block translator for the given chain

type Config

type Config interface {
	ChainType() config.ChainType
}

type DataSourceCacheService added in v2.10.0

type DataSourceCacheService interface {
	Start(context.Context) error
	Close() error
	median.DataSource
}

func NewInMemoryDataSourceCache added in v2.10.0

func NewInMemoryDataSourceCache(ds median.DataSource, kvStore job.KVStore, cacheCfg *config.JuelsPerFeeCoinCache) (DataSourceCacheService, error)

type DiscovererDatabase

type DiscovererDatabase struct {
	// contains filtered or unexported fields
}

func NewDiscovererDatabase

func NewDiscovererDatabase(ds sqlutil.DataSource, peerID string) *DiscovererDatabase

func (*DiscovererDatabase) ReadAnnouncements

func (d *DiscovererDatabase) ReadAnnouncements(ctx context.Context, peerIDs []string) (results map[string][]byte, err error)

ReadAnnouncements returns one serialized announcement (if available) for each of the peerIDs in the form of a map keyed by each announcement's corresponding peer ID.

func (*DiscovererDatabase) StoreAnnouncement

func (d *DiscovererDatabase) StoreAnnouncement(ctx context.Context, peerID string, ann []byte) error

StoreAnnouncement has key-value-store semantics and stores a peerID (key) and an associated serialized announcement (value).

type EnhancedTelemetryData added in v2.2.0

type EnhancedTelemetryData struct {
	TaskRunResults pipeline.TaskRunResults
	FinalResults   pipeline.FinalResult
	RepTimestamp   ObservationTimestamp
}

type EnhancedTelemetryMercuryData added in v2.2.0

type EnhancedTelemetryMercuryData struct {
	V1Observation              *v1types.Observation
	V2Observation              *v2types.Observation
	V3Observation              *v3types.Observation
	TaskRunResults             pipeline.TaskRunResults
	RepTimestamp               ocrtypes.ReportTimestamp
	FeedVersion                mercuryutils.FeedVersion
	FetchMaxFinalizedTimestamp bool
	IsLinkFeed                 bool
	IsNativeFeed               bool
}

type EnhancedTelemetryService added in v2.2.0

type EnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData] struct {
	services.StateMachine
	// contains filtered or unexported fields
}

func NewEnhancedTelemetryService added in v2.2.0

func NewEnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](job *job.Job, chTelem <-chan T, done chan struct{}, me commontypes.MonitoringEndpoint, lggr logger.Logger) *EnhancedTelemetryService[T]

func (*EnhancedTelemetryService[T]) Close added in v2.2.0

func (e *EnhancedTelemetryService[T]) Close() error

func (*EnhancedTelemetryService[T]) Start added in v2.2.0

Start starts

type OCR3ContractTransmitterAdapter added in v2.10.0

type OCR3ContractTransmitterAdapter struct {
	// contains filtered or unexported fields
}

func NewOCR3ContractTransmitterAdapter added in v2.10.0

func NewOCR3ContractTransmitterAdapter(ct ocrtypes.ContractTransmitter) *OCR3ContractTransmitterAdapter

func (*OCR3ContractTransmitterAdapter) FromAccount added in v2.10.0

func (*OCR3ContractTransmitterAdapter) Transmit added in v2.10.0

type OCR3OnchainKeyringAdapter added in v2.10.0

type OCR3OnchainKeyringAdapter struct {
	// contains filtered or unexported fields
}

func NewOCR3OnchainKeyringAdapter added in v2.10.0

func NewOCR3OnchainKeyringAdapter(o ocrtypes.OnchainKeyring) *OCR3OnchainKeyringAdapter

func (*OCR3OnchainKeyringAdapter) MaxSignatureLength added in v2.10.0

func (k *OCR3OnchainKeyringAdapter) MaxSignatureLength() int

func (*OCR3OnchainKeyringAdapter) PublicKey added in v2.10.0

func (*OCR3OnchainKeyringAdapter) Sign added in v2.10.0

func (k *OCR3OnchainKeyringAdapter) Sign(digest ocrtypes.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[[]byte]) (signature []byte, err error)

func (*OCR3OnchainKeyringAdapter) Verify added in v2.10.0

type ObservationTimestamp added in v2.1.0

type ObservationTimestamp struct {
	Round        uint8
	Epoch        uint32
	ConfigDigest string
}

ObservationTimestamp abstracts ocr2types.ReportTimestamp and ocr1types.ReportTimestamp

type PeerWrapperOCRConfig added in v2.3.0

type PeerWrapperOCRConfig interface {
	TraceLogging() bool
}

type ResultTimePair added in v2.10.0

type ResultTimePair struct {
	Result serializablebig.Big `json:"result"`
	Time   time.Time           `json:"time"`
}

type RunResultSaver

type RunResultSaver struct {
	services.StateMachine
	// contains filtered or unexported fields
}

func NewResultRunSaver

func NewResultRunSaver(pipelineRunner Runner,
	logger logger.Logger, maxSuccessfulRuns uint64, resultsWriteDepth uint64,
) *RunResultSaver

func (*RunResultSaver) Close

func (r *RunResultSaver) Close() error

func (*RunResultSaver) HealthReport added in v2.6.0

func (r *RunResultSaver) HealthReport() map[string]error

func (*RunResultSaver) Name added in v2.6.0

func (r *RunResultSaver) Name() string

func (*RunResultSaver) Save added in v2.8.0

func (r *RunResultSaver) Save(run *pipeline.Run)

Save sends the run on the internal `runResults` channel for saving. IMPORTANT: if the `runResults` pipeline is full, the run will be dropped.

func (*RunResultSaver) Start

func (r *RunResultSaver) Start(context.Context) error

Start starts RunResultSaver.

type Runner added in v2.9.0

type Runner interface {
	InsertFinishedRun(ctx context.Context, ds sqlutil.DataSource, run *pipeline.Run, saveSuccessfulTaskRuns bool) error
}

type Saver added in v2.8.0

type Saver interface {
	Save(run *pipeline.Run)
}

type SingletonPeerWrapper

type SingletonPeerWrapper struct {
	services.StateMachine

	PeerID p2pkey.PeerID

	// OCR1 peer adapter
	Peer1 *peerAdapterOCR1

	// OCR2 peer adapter
	Peer2 *peerAdapterOCR2
	// contains filtered or unexported fields
}

SingletonPeerWrapper manages all libocr peers for the application

func NewSingletonPeerWrapper

func NewSingletonPeerWrapper(keyStore keystore.Master, p2pCfg config.P2P, ocrCfg PeerWrapperOCRConfig, ds sqlutil.DataSource, lggr logger.Logger) *SingletonPeerWrapper

NewSingletonPeerWrapper creates a new peer based on the p2p keys in the keystore It currently only supports one peerID/key It should be fairly easy to modify it to support multiple peerIDs/keys using e.g. a map

func (*SingletonPeerWrapper) Close

func (p *SingletonPeerWrapper) Close() error

Close closes the peer and peerstore

func (*SingletonPeerWrapper) HealthReport

func (p *SingletonPeerWrapper) HealthReport() map[string]error

func (*SingletonPeerWrapper) IsStarted

func (p *SingletonPeerWrapper) IsStarted() bool

func (*SingletonPeerWrapper) Name

func (p *SingletonPeerWrapper) Name() string

func (*SingletonPeerWrapper) P2PConfig added in v2.3.0

func (p *SingletonPeerWrapper) P2PConfig() config.P2P

func (*SingletonPeerWrapper) Start

Start starts SingletonPeerWrapper.

type Transmitter

type Transmitter interface {
	CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error
	FromAddress() common.Address
}

func NewOCR2FeedsTransmitter added in v2.12.0

func NewOCR2FeedsTransmitter(
	txm txManagerOCR2,
	fromAddresses []common.Address,
	ocr2Aggregator common.Address,
	gasLimit uint64,
	effectiveTransmitterAddress common.Address,
	strategy types.TxStrategy,
	checker txmgr.TransmitCheckerSpec,
	chainID *big.Int,
	keystore roundRobinKeystore,
) (Transmitter, error)

NewOCR2FeedsTransmitter creates a new eth transmitter that handles OCR2 Feeds specific logic surrounding forwarders. ocr2FeedsTransmitter validates forwarders before every transmission, enabling smooth onchain config changes without job restarts.

func NewTransmitter

func NewTransmitter(
	txm txManager,
	fromAddresses []common.Address,
	gasLimit uint64,
	effectiveTransmitterAddress common.Address,
	strategy types.TxStrategy,
	checker txmgr.TransmitCheckerSpec,
	chainID *big.Int,
	keystore roundRobinKeystore,
) (Transmitter, error)

NewTransmitter creates a new eth transmitter

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL