module

package
v0.37.21-tts Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 8, 2024 License: AGPL-3.0 Imports: 26 Imported by: 64

Documentation

Index

Constants

View Source
const (
	ConsumeProgressVerificationBlockHeight = "ConsumeProgressVerificationBlockHeight"
	ConsumeProgressVerificationChunkIndex  = "ConsumeProgressVerificationChunkIndex"

	ConsumeProgressExecutionDataRequesterBlockHeight  = "ConsumeProgressExecutionDataRequesterBlockHeight"
	ConsumeProgressExecutionDataRequesterNotification = "ConsumeProgressExecutionDataRequesterNotification"

	ConsumeProgressExecutionDataIndexerBlockHeight = "ConsumeProgressExecutionDataIndexerBlockHeight"

	ConsumeProgressIngestionEngineBlockHeight = "ConsumeProgressIngestionEngineBlockHeight"
	ConsumeProgressLastFullBlockHeight        = "ConsumeProgressLastFullBlockHeight"
)

Variables

View Source
var ErrMultipleStartup = errors.New("component may only be started once")
View Source
var (
	// ErrNoBeaconKeyForEpoch indicates that no beacon key is available for the given epoch.
	// This can happen for several reasons:
	//   1. The DKG for the epoch has not completed yet, and the beacon key may be available later.
	//   2. The DKG succeeded globally, but this node failed to generate a local beacon key.
	//      This can happen if the node is unavailable or behind during the DKG.
	//      In this case, no beacon key will ever be available for this epoch.
	//   3. The DKG failed globally, so no nodes generated a local beacon key.
	//
	// Regardless of the reason, beacon key users should fall back to signing with
	// only their staking key - hence these situations are not differentiated.
	ErrNoBeaconKeyForEpoch = errors.New("no beacon key available for epoch")
)

Functions

func IsUnknownBlockError added in v0.33.30

func IsUnknownBlockError(err error) bool

func IsUnknownResultError added in v0.33.30

func IsUnknownResultError(err error) bool

func NewUnknownBlockError added in v0.33.30

func NewUnknownBlockError(msg string, args ...interface{}) error

func NewUnknownResultError added in v0.33.30

func NewUnknownResultError(msg string, args ...interface{}) error

Types

type AccessMetrics added in v0.26.16

type AccessMetrics interface {
	RestMetrics
	GRPCConnectionPoolMetrics
	TransactionMetrics
	TransactionValidationMetrics
	BackendScriptsMetrics

	// UpdateExecutionReceiptMaxHeight is called whenever we store an execution receipt from a block from a newer height
	UpdateExecutionReceiptMaxHeight(height uint64)

	// UpdateLastFullBlockHeight tracks the height of the last block for which all collections were received
	UpdateLastFullBlockHeight(height uint64)
}

type AlspMetrics added in v0.31.0

type AlspMetrics interface {
	// OnMisbehaviorReported is called when a misbehavior is reported by the application layer to ALSP.
	// An engine detecting a spamming-related misbehavior reports it to the ALSP module.
	// Args:
	// - channel: the channel on which the misbehavior was reported
	// - misbehaviorType: the type of misbehavior reported
	OnMisbehaviorReported(channel string, misbehaviorType string)
}

AlspMetrics encapsulates the metrics collectors for the Application Layer Spam Prevention (ALSP) module, which is part of the networking layer. ALSP is responsible to prevent spam attacks on the application layer messages that appear to be valid for the networking layer but carry on a malicious intent on the application layer (i.e., Flow protocols).

type BackendScriptsMetrics added in v0.26.1

type BackendScriptsMetrics interface {
	// ScriptExecuted records the round trip time while executing a script
	ScriptExecuted(dur time.Duration, size int)

	// ScriptExecutionErrorLocal records script execution failures from local execution
	ScriptExecutionErrorLocal()

	// ScriptExecutionErrorOnExecutionNode records script execution failures on Execution Nodes
	ScriptExecutionErrorOnExecutionNode()

	// ScriptExecutionResultMismatch records script execution result mismatches between local and
	// execution nodes
	ScriptExecutionResultMismatch()

	// ScriptExecutionResultMatch records script execution result matches between local and
	// execution nodes
	ScriptExecutionResultMatch()

	// ScriptExecutionErrorMismatch records script execution error mismatches between local and
	// execution nodes
	ScriptExecutionErrorMismatch()

	// ScriptExecutionErrorMatch records script execution error matches between local and
	// execution nodes
	ScriptExecutionErrorMatch()

	// ScriptExecutionNotIndexed records script execution matches where data for the block is not
	// indexed locally yet
	ScriptExecutionNotIndexed()
}

type BitswapMetrics added in v0.27.3

type BitswapMetrics interface {
	Peers(prefix string, n int)
	Wantlist(prefix string, n int)
	BlobsReceived(prefix string, n uint64)
	DataReceived(prefix string, n uint64)
	BlobsSent(prefix string, n uint64)
	DataSent(prefix string, n uint64)
	DupBlobsReceived(prefix string, n uint64)
	DupDataReceived(prefix string, n uint64)
	MessagesReceived(prefix string, n uint64)
}

type BlockExecutionResultStats added in v0.33.30

type BlockExecutionResultStats struct {
	CollectionExecutionResultStats
	NumberOfCollections int
}

func (*BlockExecutionResultStats) Add added in v0.33.30

type BlockRequester

type BlockRequester interface {

	// RequestBlock indicates that the given block should be queued for retrieval.
	RequestBlock(blockID flow.Identifier, height uint64)

	// RequestHeight indicates that the given block height should be queued for retrieval.
	RequestHeight(height uint64)

	// Prune manually prunes requests
	Prune(final *flow.Header)
}

BlockRequester enables components to request particular blocks by ID from synchronization system.

type Builder

type Builder interface {

	// BuildOn generates a new payload that is valid with respect to the parent
	// being built upon, with the view being provided by the consensus algorithm.
	// The builder stores the block and validates it against the protocol state
	// before returning it.
	//
	// NOTE: Since the block is stored within Builder, HotStuff MUST propose the
	// block once BuildOn successfully returns.
	BuildOn(parentID flow.Identifier, setter func(*flow.Header) error, sign func(*flow.Header) error) (*flow.Header, error)
}

Builder represents an abstracted block construction module that can be used in more than one consensus algorithm. The resulting block is consistent within itself and can be wrapped with additional consensus information such as QCs.

type CacheMetrics

type CacheMetrics interface {
	// CacheEntries report the total number of cached items
	CacheEntries(resource string, entries uint)
	// CacheHit report the number of times the queried item is found in the cache
	CacheHit(resource string)
	// CacheNotFound records the number of times the queried item was not found in either cache or database.
	CacheNotFound(resource string)
	// CacheMiss report the number of times the queried item is not found in the cache, but found in the database.
	CacheMiss(resource string)
}

type ChainSyncMetrics added in v0.27.0

type ChainSyncMetrics interface {
	// record pruned blocks. requested and received times might be zero values
	PrunedBlockById(status *chainsync.Status)

	PrunedBlockByHeight(status *chainsync.Status)

	// totalByHeight and totalById are the number of blocks pruned for blocks requested by height and by id
	// storedByHeight and storedById are the number of blocks still stored by height and id
	PrunedBlocks(totalByHeight, totalById, storedByHeight, storedById int)

	RangeRequested(ran chainsync.Range)

	BatchRequested(batch chainsync.Batch)
}

type ChunkAssigner

type ChunkAssigner interface {
	// Assign generates the assignment
	// error returns:
	//  * NoValidChildBlockError indicates that no valid child block is known
	//    (which contains the block's source of randomness)
	//  * unexpected errors should be considered symptoms of internal bugs
	Assign(result *flow.ExecutionResult, blockID flow.Identifier) (*chmodels.Assignment, error)
}

ChunkAssigner presents an interface for assigning chunks to the verifier nodes

type ChunkVerifier

type ChunkVerifier interface {
	// Verify verifies the given VerifiableChunk by executing it and checking
	// the final state commitment.
	// It returns a Spock Secret as a byte array, verification fault of the
	// chunk, and an error.
	Verify(
		ch *verification.VerifiableChunkData,
	) (
		[]byte,
		error,
	)
}

ChunkVerifier provides functionality to verify chunks

type CleanerMetrics

type CleanerMetrics interface {
	RanGC(took time.Duration)
}

type ClusterRootQCVoter

type ClusterRootQCVoter interface {

	// Vote handles the full procedure of generating a vote, submitting it to the epoch
	// smart contract, and verifying submission. It is safe to run Vote multiple
	// times within a single setup phase.
	// Error returns:
	//   - epochs.ClusterQCNoVoteError if we fail to vote for a benign reason
	//   - generic error in case of critical unexpected failure
	Vote(context.Context, protocol.Epoch) error
}

ClusterRootQCVoter is responsible for submitting a vote to the cluster QC contract to coordinate generation of a valid root quorum certificate for the next epoch.

type CollectionExecutedMetric added in v0.33.15

type CollectionExecutedMetric interface {
	CollectionFinalized(light flow.LightCollection)
	CollectionExecuted(light flow.LightCollection)
	BlockFinalized(block *flow.Block)
	ExecutionReceiptReceived(r *flow.ExecutionReceipt)
	UpdateLastFullBlockHeight(height uint64)
}

type CollectionExecutionResultStats added in v0.33.30

type CollectionExecutionResultStats struct {
	ExecutionResultStats
	NumberOfTransactions int
}

func (*CollectionExecutionResultStats) Add added in v0.33.30

type CollectionMetrics

type CollectionMetrics interface {
	TransactionValidationMetrics
	// TransactionIngested is called when a new transaction is ingested by the
	// node. It increments the total count of ingested transactions and starts
	// a tx->col span for the transaction.
	TransactionIngested(txID flow.Identifier)

	// ClusterBlockProposed is called when a new collection is proposed by us or
	// any other node in the cluster.
	ClusterBlockProposed(block *cluster.Block)

	// ClusterBlockFinalized is called when a collection is finalized.
	ClusterBlockFinalized(block *cluster.Block)
}

type ComplianceMetrics

type ComplianceMetrics interface {
	// FinalizedHeight reports the latest finalized height known to this node.
	FinalizedHeight(height uint64)
	// EpochTransitionHeight reports the height of the most recently finalized epoch transition.
	EpochTransitionHeight(height uint64)
	// SealedHeight reports the latest block that has a finalized seal known to this node.
	SealedHeight(height uint64)
	// BlockFinalized reports information about data contained within finalized blocks.
	BlockFinalized(*flow.Block)
	// BlockSealed reports information about data contained within blocks with a finalized seal.
	// When a new block is finalized, this method is called for every seal in the finalized block,
	// in the order the seals are listed.
	// CAUTION: within a block, seals can be included in any permutation (not necessarily in order
	// of increasing height).
	BlockSealed(*flow.Block)
	// CurrentEpochCounter reports the current epoch counter.
	CurrentEpochCounter(counter uint64)
	// CurrentEpochPhase reports the current epoch phase.
	CurrentEpochPhase(phase flow.EpochPhase)
	// CurrentEpochFinalView reports the final view of the current epoch, including epoch extensions.
	CurrentEpochFinalView(view uint64)
	// CurrentDKGPhaseViews reports the final view of each DKG phase for the current epoch.
	CurrentDKGPhaseViews(phase1FinalView, phase2FinalView, phase3FinalView uint64)
	// EpochFallbackModeTriggered reports that EFM is triggered.
	EpochFallbackModeTriggered()
	// EpochFallbackModeExited reports that EFM is no longer triggered.
	EpochFallbackModeExited()
}

ComplianceMetrics reports metrics about the compliance layer, which processes, finalizes, and seals blocks and tracks data in the Protocol State.

type ConsensusMetrics

type ConsensusMetrics interface {
	// StartCollectionToFinalized reports Metrics C1: Collection Received by CCL→ Collection Included in Finalized Block
	StartCollectionToFinalized(collectionID flow.Identifier)

	// FinishCollectionToFinalized reports Metrics C1: Collection Received by CCL→ Collection Included in Finalized Block
	FinishCollectionToFinalized(collectionID flow.Identifier)

	// StartBlockToSeal reports Metrics C4: Block Received by CCL → Block Seal in finalized block
	StartBlockToSeal(blockID flow.Identifier)

	// FinishBlockToSeal reports Metrics C4: Block Received by CCL → Block Seal in finalized block
	FinishBlockToSeal(blockID flow.Identifier)

	// EmergencySeal increments the number of seals that were created in emergency mode
	EmergencySeal()

	// OnReceiptProcessingDuration records the number of seconds spent processing a receipt
	OnReceiptProcessingDuration(duration time.Duration)

	// OnApprovalProcessingDuration records the number of seconds spent processing an approval
	OnApprovalProcessingDuration(duration time.Duration)

	// CheckSealingDuration records absolute time for the full sealing check by the consensus match engine
	CheckSealingDuration(duration time.Duration)
}

type CruiseCtlMetrics added in v0.31.0

type CruiseCtlMetrics interface {

	// PIDError measures the current error values for the proportional, integration,
	// and derivative terms of the PID controller.
	PIDError(p, i, d float64)

	// TargetProposalDuration measures the current value of the Block Time Controller output:
	// the target duration from parent to child proposal.
	TargetProposalDuration(duration time.Duration)

	// ControllerOutput measures the output of the cruise control PID controller.
	// Concretely, this is the quantity to subtract from the baseline view duration.
	ControllerOutput(duration time.Duration)

	// ProposalPublicationDelay measures the effective delay the controller imposes on publishing
	// the node's own proposals, with all limits of authority applied.
	// Note: Technically, our metrics capture the publication delay relative to when the publication delay was
	// last requested. Currently, only the EventHandler requests a publication delay, exactly once per proposal.
	ProposalPublicationDelay(duration time.Duration)
}

type DHTMetrics added in v0.26.11

type DHTMetrics interface {
	RoutingTablePeerAdded()
	RoutingTablePeerRemoved()
}

type DKGBroker added in v0.33.1

type DKGBroker interface {
	crypto.DKGProcessor

	// GetIndex returns the index of this node in the DKG committee list.
	GetIndex() int

	// GetPrivateMsgCh returns the channel through which a user can receive
	// incoming private DKGMessages.
	GetPrivateMsgCh() <-chan messages.PrivDKGMessageIn

	// GetBroadcastMsgCh returns the channel through which a user can receive
	// incoming broadcast DKGMessages.
	GetBroadcastMsgCh() <-chan messages.BroadcastDKGMessage

	// Poll instructs the broker to actively fetch broadcast messages (ex. read
	// from DKG smart contract). The messages will be forwarded through the
	// broker's message channel (cf. GetMsgCh). The method does not return until
	// all received messages are processed by the consumer.
	Poll(referenceBlock flow.Identifier) error

	// SubmitResult instructs the broker to publish the results of the DKG run
	// (ex. publish to DKG smart contract).
	SubmitResult(crypto.PublicKey, []crypto.PublicKey) error

	// Shutdown causes the broker to stop listening and forwarding messages.
	Shutdown()
}

DKGBroker extends the crypto.DKGProcessor interface with methods that enable a controller to access the channel of incoming messages, and actively fetch new DKG broadcast messages.

type DKGContractClient added in v0.22.4

type DKGContractClient interface {

	// Broadcast broadcasts a message to all other nodes participating in the
	// DKG. The message is broadcast by submitting a transaction to the DKG
	// smart contract. An error is returned if the transaction has failed has
	// failed.
	// TBD: retry logic
	Broadcast(msg messages.BroadcastDKGMessage) error

	// ReadBroadcast reads the broadcast messages from the smart contract.
	// Messages are returned in the order in which they were broadcast (received
	// and stored in the smart contract). The parameters are:
	//
	// * fromIndex: return messages with index >= fromIndex
	// * referenceBlock: a marker for the state against which the query should
	//   be executed
	//
	// DKG nodes should call ReadBroadcast one final time once they have
	// observed the phase deadline trigger to guarantee they receive all
	// messages for that phase.
	ReadBroadcast(fromIndex uint, referenceBlock flow.Identifier) ([]messages.BroadcastDKGMessage, error)

	// SubmitResult submits the final public result of the DKG protocol. This
	// represents the group public key and the node's local computation of the
	// public keys for each DKG participant.
	//
	// SubmitResult must be called strictly after the final phase has ended.
	SubmitResult(crypto.PublicKey, []crypto.PublicKey) error
}

DKGContractClient enables interacting with the DKG smart contract. This contract is deployed to the service account as part of a collection of smart contracts that facilitate and manage epoch transitions.

type DKGController added in v0.22.4

type DKGController interface {

	// Run starts the DKG controller and starts phase 1. It is a blocking call
	// that blocks until the controller is shutdown or until an error is
	// encountered in one of the protocol phases.
	Run() error

	// EndPhase1 notifies the controller to end phase 1, and start phase 2.
	EndPhase1() error

	// EndPhase2 notifies the controller to end phase 2, and start phase 3.
	EndPhase2() error

	// End terminates the DKG state machine and records the artifacts.
	End() error

	// Shutdown stops the controller regardless of the current state.
	Shutdown()

	// Poll instructs the controller to actively fetch broadcast messages (ex.
	// read from DKG smart contract). The method does not return until all
	// received messages are processed.
	Poll(blockReference flow.Identifier) error

	// GetArtifacts returns our node's private key share, the group public key,
	// and the list of all nodes' public keys (including ours), as computed by
	// the DKG.
	GetArtifacts() (crypto.PrivateKey, crypto.PublicKey, []crypto.PublicKey)

	// GetIndex returns the index of this node in the DKG committee list.
	GetIndex() int

	// SubmitResult instructs the broker to publish the results of the DKG run
	// (ex. publish to DKG smart contract).
	SubmitResult() error
}

DKGController controls the execution of a Joint Feldman DKG instance.

type DKGControllerFactory added in v0.22.4

type DKGControllerFactory interface {

	// Create instantiates a new DKGController.
	Create(dkgInstanceID string, participants flow.IdentitySkeletonList, seed []byte) (DKGController, error)
}

DKGControllerFactory is a factory to create instances of DKGController.

type EVMMetrics added in v0.36.3

type EVMMetrics interface {
	// SetNumberOfDeployedCOAs sets the total number of deployed COAs
	SetNumberOfDeployedCOAs(count uint64)
	// EVMTransactionExecuted reports the gas used when executing an evm transaction
	EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool)
	// EVMBlockExecuted reports the block size, total gas used and total supply when executing an evm block
	EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64)
}

type EngineMetrics

type EngineMetrics interface {
	// MessageSent reports that the engine transmitted the message over the network.
	// Unicasts, broadcasts, and multicasts are all reported once.
	MessageSent(engine string, message string)
	// MessageReceived reports that the engine received the message over the network.
	MessageReceived(engine string, message string)
	// MessageHandled reports that the engine has finished processing the message.
	// Both invalid and valid messages should be reported.
	// A message must be reported as either handled or dropped, not both.
	MessageHandled(engine string, messages string)
	// InboundMessageDropped reports that the engine has dropped inbound message without processing it.
	// Inbound messages must be reported as either handled or dropped, not both.
	InboundMessageDropped(engine string, messages string)
	// OutboundMessageDropped reports that the engine has dropped outbound message without processing it.
	// Outbound messages must be reported as either sent or dropped, not both.
	OutboundMessageDropped(engine string, messages string)
}

EngineMetrics is a generic metrics consumer for node-internal data processing components (aka engines). Implementations must be non-blocking and concurrency safe.

type EntriesFunc added in v0.14.6

type EntriesFunc func() uint

type EpochLookup added in v0.20.0

type EpochLookup interface {

	// EpochForView returns the counter of the epoch that the input view belongs to.
	// Note: The EpochLookup component processes EpochExtended notifications which will
	// extend the view range for the latest epoch.
	//
	// Returns model.ErrViewForUnknownEpoch if the input does not fall within the range of a known epoch.
	EpochForView(view uint64) (epochCounter uint64, err error)
}

EpochLookup enables looking up epochs by view. CAUTION: EpochLookup should only be used for querying the previous, current, or next epoch.

type ExecutionDataProviderMetrics added in v0.27.2

type ExecutionDataProviderMetrics interface {
	RootIDComputed(duration time.Duration, numberOfChunks int)
	AddBlobsSucceeded(duration time.Duration, totalSize uint64)
	AddBlobsFailed()
}

type ExecutionDataPrunerMetrics added in v0.27.2

type ExecutionDataPrunerMetrics interface {
	Pruned(height uint64, duration time.Duration)
}

type ExecutionDataRequesterMetrics added in v0.26.2

type ExecutionDataRequesterMetrics interface {
	// ExecutionDataFetchStarted records an in-progress download
	ExecutionDataFetchStarted()

	// ExecutionDataFetchFinished records a completed download
	ExecutionDataFetchFinished(duration time.Duration, success bool, height uint64)

	// NotificationSent reports that ExecutionData received notifications were sent for a block height
	NotificationSent(height uint64)

	// FetchRetried reports that a download retry was processed
	FetchRetried()
}

type ExecutionDataRequesterV2Metrics added in v0.27.2

type ExecutionDataRequesterV2Metrics interface {
	FulfilledHeight(blockHeight uint64)
	ReceiptSkipped()
	RequestSucceeded(blockHeight uint64, duration time.Duration, totalSize uint64, numberOfAttempts int)
	RequestFailed(duration time.Duration, retryable bool)
	RequestCanceled()
	ResponseDropped()
}

type ExecutionMetrics

type ExecutionMetrics interface {
	LedgerMetrics
	RuntimeMetrics
	EVMMetrics
	ProviderMetrics
	WALMetrics

	// StartBlockReceivedToExecuted starts a span to trace the duration of a block
	// from being received for execution to execution being finished
	StartBlockReceivedToExecuted(blockID flow.Identifier)

	// FinishBlockReceivedToExecuted finishes a span to trace the duration of a block
	// from being received for execution to execution being finished
	FinishBlockReceivedToExecuted(blockID flow.Identifier)

	// ExecutionStorageStateCommitment reports the storage size of a state commitment in bytes
	ExecutionStorageStateCommitment(bytes int64)

	// ExecutionLastExecutedBlockHeight reports last executed block height
	ExecutionLastExecutedBlockHeight(height uint64)

	// ExecutionLastFinalizedExecutedBlockHeight reports last finalized and executed block height
	ExecutionLastFinalizedExecutedBlockHeight(height uint64)

	// ExecutionBlockExecuted reports the total time and computation spent on executing a block
	ExecutionBlockExecuted(dur time.Duration, stats BlockExecutionResultStats)

	// ExecutionBlockExecutionEffortVectorComponent reports the unweighted effort of given ComputationKind at block level
	ExecutionBlockExecutionEffortVectorComponent(string, uint)

	// ExecutionBlockCachedPrograms reports the number of cached programs at the end of a block
	ExecutionBlockCachedPrograms(programs int)

	// ExecutionCollectionExecuted reports the total time and computation spent on executing a collection
	ExecutionCollectionExecuted(dur time.Duration, stats CollectionExecutionResultStats)

	// ExecutionTransactionExecuted reports stats on executing a single transaction
	ExecutionTransactionExecuted(dur time.Duration, stats TransactionExecutionResultStats, info TransactionExecutionResultInfo)

	// ExecutionChunkDataPackGenerated reports stats on chunk data pack generation
	ExecutionChunkDataPackGenerated(proofSize, numberOfTransactions int)

	// ExecutionScriptExecuted reports the time and memory spent on executing an script
	ExecutionScriptExecuted(dur time.Duration, compUsed, memoryUsed, memoryEstimate uint64)

	// ExecutionCollectionRequestSent reports when a request for a collection is sent to a collection node
	ExecutionCollectionRequestSent()

	// Unused
	ExecutionCollectionRequestRetried()

	// ExecutionSync reports when the state syncing is triggered or stopped.
	ExecutionSync(syncing bool)

	// Upload metrics
	ExecutionBlockDataUploadStarted()
	ExecutionBlockDataUploadFinished(dur time.Duration)
	ExecutionComputationResultUploaded()
	ExecutionComputationResultUploadRetried()

	UpdateCollectionMaxHeight(height uint64)
}

type ExecutionResultStats added in v0.29.0

type ExecutionResultStats struct {
	ComputationUsed                 uint64
	MemoryUsed                      uint64
	EventCounts                     int
	EventSize                       int
	NumberOfRegistersTouched        int
	NumberOfBytesWrittenToRegisters int
}

func (*ExecutionResultStats) Merge added in v0.29.0

func (stats *ExecutionResultStats) Merge(other ExecutionResultStats)

type ExecutionStateIndexerMetrics added in v0.32.1

type ExecutionStateIndexerMetrics interface {
	// BlockIndexed records metrics from indexing execution data from a single block.
	BlockIndexed(height uint64, duration time.Duration, events, registers, transactionResults int)

	// BlockReindexed records that a previously indexed block was indexed again.
	BlockReindexed()

	// InitializeLatestHeight records the latest height that has been indexed.
	// This should only be used during startup. After startup, use BlockIndexed to record newly
	// indexed heights.
	InitializeLatestHeight(height uint64)
}

type FinalizedHeaderCache added in v0.31.0

type FinalizedHeaderCache interface {
	Get() *flow.Header
}

FinalizedHeaderCache is a cache of the latest finalized block header.

type Finalizer

type Finalizer interface {

	// MakeFinal will declare a block and all of its ancestors as finalized, which
	// makes it an immutable part of the blockchain. Returning an error indicates
	// some fatal condition and will cause the finalization logic to terminate.
	MakeFinal(blockID flow.Identifier) error
}

Finalizer is used by the consensus algorithm to inform other components for (such as the protocol state) about finalization of blocks.

Since we have two different protocol states: one for the main consensus, the other for the collection cluster consensus, the Finalizer interface allows the two different protocol states to provide different implementations for updating its state when a block has been finalized.

Updating the protocol state should always succeed when the data is consistent. However, in case the protocol state is corrupted, error should be returned and the consensus algorithm should halt. So the error returned from MakeFinal is for the protocol state to report exceptions.

type GRPCConnectionPoolMetrics added in v0.31.0

type GRPCConnectionPoolMetrics interface {
	// TotalConnectionsInPool updates the number connections to collection/execution nodes stored in the pool, and the size of the pool
	TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint)

	// ConnectionFromPoolReused tracks the number of times a connection to a collection/execution node is reused from the connection pool
	ConnectionFromPoolReused()

	// ConnectionAddedToPool tracks the number of times a collection/execution node is added to the connection pool
	ConnectionAddedToPool()

	// NewConnectionEstablished tracks the number of times a new grpc connection is established
	NewConnectionEstablished()

	// ConnectionFromPoolInvalidated tracks the number of times a cached grpc connection is invalidated and closed
	ConnectionFromPoolInvalidated()

	// ConnectionFromPoolUpdated tracks the number of times a cached connection is updated
	ConnectionFromPoolUpdated()

	// ConnectionFromPoolEvicted tracks the number of times a cached connection is evicted from the cache
	ConnectionFromPoolEvicted()
}

type GossipSubRpcInspectorMetrics added in v0.33.1

type GossipSubRpcInspectorMetrics interface {
	// OnIWantMessageIDsReceived tracks the number of message ids received by the node from other nodes on an RPC.
	// Note: this function is called on each IWANT message received by the node, not on each message id received.
	OnIWantMessageIDsReceived(msgIdCount int)

	// OnIHaveMessageIDsReceived tracks the number of message ids received by the node from other nodes on an iHave message.
	// This function is called on each iHave message received by the node.
	// Args:
	// - channel: the channel on which the iHave message was received.
	// - msgIdCount: the number of message ids received on the iHave message.
	OnIHaveMessageIDsReceived(channel string, msgIdCount int)

	// OnIncomingRpcReceived tracks the number of RPC messages received by the node.
	// Args:
	// 	iHaveCount: the number of iHAVE messages included in the RPC.
	// 	iWantCount: the number of iWANT messages included in the RPC.
	// 	graftCount: the number of GRAFT messages included in the RPC.
	// 	pruneCount: the number of PRUNE messages included in the RPC.
	// 	msgCount: the number of publish messages included in the RPC.
	OnIncomingRpcReceived(iHaveCount, iWantCount, graftCount, pruneCount, msgCount int)
}

GossipSubRpcInspectorMetrics encapsulates the metrics collectors for GossipSub RPC Inspector module of the networking layer. The RPC inspector is the entry point of the GossipSub protocol. It inspects the incoming RPC messages and decides whether to accept, prune, or reject the RPC message. The GossipSubRpcInspectorMetrics tracks the number of RPC messages received by the local node from other nodes over the GossipSub protocol. It also tracks the number of control messages included in the RPC messages, i.e., IHAVE, IWANT, GRAFT, PRUNE. It also tracks the number of actual messages included in the RPC messages. The GossipSubRpcInspectorMetrics differs from LocalGossipSubRouterMetrics in that the former tracks the messages received by the local node from other nodes over the GossipSub protocol but may not all be accepted by the local node, e.g., due to RPC pruning or throttling; while the latter tracks the local node's view of the GossipSub protocol, i.e., entirely containing the messages that are accepted by the local node (either as whole RPC or only for the control messages). Having this distinction is useful for debugging and troubleshooting the GossipSub protocol, for example, the number of messages received by the local node from other nodes over the GossipSub protocol may be much higher than the number of messages accepted by the local node, which may indicate that the local node is throttling the incoming messages.

type GossipSubRpcValidationInspectorMetrics added in v0.31.0

type GossipSubRpcValidationInspectorMetrics interface {
	GossipSubRpcInspectorMetrics

	// AsyncProcessingStarted increments the metric tracking the number of inspect message request being processed by workers in the rpc validator worker pool.
	AsyncProcessingStarted()
	// AsyncProcessingFinished tracks the time spent by a rpc validation inspector worker to process an inspect message request asynchronously and decrements the metric tracking
	// the number of inspect message requests  being processed asynchronously by the rpc validation inspector workers.
	AsyncProcessingFinished(duration time.Duration)

	// OnIHaveControlMessageIdsTruncated tracks the number of times message ids on an iHave message were truncated.
	// Note that this function is called only when the message ids are truncated from an iHave message, not when the iHave message itself is truncated.
	// This is different from the OnControlMessagesTruncated function which is called when a slice of control messages truncated from an RPC with all their message ids.
	// Args:
	//
	//	diff: the number of actual messages truncated.
	OnIHaveControlMessageIdsTruncated(diff int)

	// OnIWantControlMessageIdsTruncated tracks the number of times message ids on an iWant message were truncated.
	// Note that this function is called only when the message ids are truncated from an iWant message, not when the iWant message itself is truncated.
	// This is different from the OnControlMessagesTruncated function which is called when a slice of control messages truncated from an RPC with all their message ids.
	// Args:
	// 	diff: the number of actual messages truncated.
	OnIWantControlMessageIdsTruncated(diff int)

	// OnControlMessagesTruncated tracks the number of times a slice of control messages is truncated from an RPC with all their included message ids.
	// Args:
	//
	//	messageType: the type of the control message that was truncated
	//	diff: the number of control messages truncated.
	OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int)

	// OnIWantMessagesInspected tracks the number of duplicate and cache miss message ids received by the node on iWant messages at the end of the async inspection iWants
	// across one RPC, regardless of the result of the inspection.
	//
	//	duplicateCount: the total number of duplicate message ids received by the node on the iWant messages at the end of the async inspection of the RPC.
	//	cacheMissCount: the total number of cache miss message ids received by the node on the iWant message at the end of the async inspection of the RPC.
	OnIWantMessagesInspected(duplicateCount int, cacheMissCount int)

	// OnIWantDuplicateMessageIdsExceedThreshold tracks the number of times that async inspection of iWant messages failed due to the total number of duplicate message ids
	// received by the node on the iWant messages of a single RPC exceeding the threshold, which results in a misbehaviour report.
	OnIWantDuplicateMessageIdsExceedThreshold()

	// OnIWantCacheMissMessageIdsExceedThreshold tracks the number of times that async inspection of iWant messages failed due to the total
	// number of cache miss message ids received by the node on the iWant messages of a single RPC exceeding the threshold, which results in a misbehaviour report.
	OnIWantCacheMissMessageIdsExceedThreshold()

	// OnIHaveMessagesInspected is called at the end of the async inspection of iHave messages of a single RPC, regardless of the result of the inspection.
	// It tracks the number of duplicate topic ids and duplicate message ids received by the node on the iHave messages of that single RPC at the end of the async inspection iHaves.
	// Args:
	//
	//	duplicateTopicIds: the total number of duplicate topic ids received by the node on the iHave messages at the end of the async inspection of the RPC.
	//	duplicateMessageIds: the number of duplicate message ids received by the node on the iHave messages at the end of the async inspection of the RPC.
	//	invalidTopicIds: the number of invalid message ids received by the node on the iHave messages at the end of the async inspection of the RPC.
	OnIHaveMessagesInspected(duplicateTopicIds int, duplicateMessageIds, invalidTopicIds int)

	// OnIHaveDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of duplicate topic ids
	// received by the node on the iHave messages of that RPC exceeding the threshold, which results in a misbehaviour report.
	OnIHaveDuplicateTopicIdsExceedThreshold()

	// OnIHaveInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of invalid topic ids
	// received by the node on the iHave messages of that RPC exceeding the threshold, which results in a misbehaviour report.
	OnIHaveInvalidTopicIdsExceedThreshold()

	// OnIHaveDuplicateMessageIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of duplicate message ids
	// received by the node on an iHave message exceeding the threshold, which results in a misbehaviour report.
	OnIHaveDuplicateMessageIdsExceedThreshold()

	// OnInvalidTopicIdDetectedForControlMessage tracks the number of times that the async inspection of a control message type on a single RPC failed due to an invalid topic id.
	// Args:
	// - messageType: the type of the control message that was truncated.
	OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType)

	// OnActiveClusterIDsNotSetErr tracks the number of times that the async inspection of a control message type on a single RPC failed due to active cluster ids not set inspection failure.
	// This is not causing a misbehaviour report.
	OnActiveClusterIDsNotSetErr()

	// OnUnstakedPeerInspectionFailed tracks the number of times that the async inspection of a control message type on a single RPC failed due to unstaked peer inspection failure.
	// This is not causing a misbehaviour report.
	OnUnstakedPeerInspectionFailed()

	// OnInvalidControlMessageNotificationSent tracks the number of times that the async inspection of a control message failed and resulted in dissemination of an invalid control message was sent.
	OnInvalidControlMessageNotificationSent()

	// OnRpcRejectedFromUnknownSender tracks the number of rpc's rejected from unstaked nodes.
	OnRpcRejectedFromUnknownSender()

	// OnPublishMessagesInspectionErrorExceedsThreshold tracks the number of times that async inspection of publish messages failed due to the number of errors.
	OnPublishMessagesInspectionErrorExceedsThreshold()

	// OnPruneDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of prune messages for an RPC failed due to the number of duplicate topic ids
	// received by the node on prune messages of the same RPC excesses threshold, which results in a misbehaviour report.
	OnPruneDuplicateTopicIdsExceedThreshold()

	// OnPruneInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of prune messages for an RPC failed due to the number of invalid topic ids
	// received by the node on prune messages of the same RPC excesses threshold, which results in a misbehaviour report.
	OnPruneInvalidTopicIdsExceedThreshold()

	// OnPruneMessageInspected is called at the end of the async inspection of prune messages of the RPC, regardless of the result of the inspection.
	// Args:
	// 	duplicateTopicIds: the number of duplicate topic ids received by the node on the prune messages of the RPC at the end of the async inspection prunes.
	// 	invalidTopicIds: the number of invalid topic ids received by the node on the prune messages at the end of the async inspection of a single RPC.
	OnPruneMessageInspected(duplicateTopicIds, invalidTopicIds int)

	// OnGraftDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of the graft messages of a single RPC failed due to the number of duplicate topic ids
	// received by the node on graft messages of the same RPC excesses threshold, which results in a misbehaviour report.
	OnGraftDuplicateTopicIdsExceedThreshold()

	// OnGraftInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of the graft messages of a single RPC failed due to the number of invalid topic ids
	// received by the node on graft messages of the same RPC excesses threshold, which results in a misbehaviour report.
	OnGraftInvalidTopicIdsExceedThreshold()

	// OnGraftMessageInspected is called at the end of the async inspection of graft messages of a single RPC, regardless of the result of the inspection.
	// Args:
	// 	duplicateTopicIds: the number of duplicate topic ids received by the node on the graft messages at the end of the async inspection of a single RPC.
	// 	invalidTopicIds: the number of invalid topic ids received by the node on the graft messages at the end of the async inspection of a single RPC.
	OnGraftMessageInspected(duplicateTopicIds, invalidTopicIds int)

	// OnPublishMessageInspected is called at the end of the async inspection of publish messages of a single RPC, regardless of the result of the inspection.
	// It tracks the total number of errors detected during the async inspection of the rpc together with their individual breakdown.
	// Args:
	// - errCount: the number of errors that occurred during the async inspection of publish messages.
	// - invalidTopicIdsCount: the number of times that an invalid topic id was detected during the async inspection of publish messages.
	// - invalidSubscriptionsCount: the number of times that an invalid subscription was detected during the async inspection of publish messages.
	// - invalidSendersCount: the number of times that an invalid sender was detected during the async inspection of publish messages.
	OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int)
}

GossipSubRpcValidationInspectorMetrics encapsulates the metrics collectors for the gossipsub rpc validation control message inspectors.

type GossipSubScoringMetrics added in v0.30.0

type GossipSubScoringMetrics interface {
	// OnOverallPeerScoreUpdated tracks the overall score of peers in the local mesh.
	OnOverallPeerScoreUpdated(float64)
	// OnAppSpecificScoreUpdated tracks the application specific score of peers in the local mesh.
	OnAppSpecificScoreUpdated(float64)
	// OnIPColocationFactorUpdated tracks the IP colocation factor of peers in the local mesh.
	OnIPColocationFactorUpdated(float64)
	// OnBehaviourPenaltyUpdated tracks the behaviour penalty of peers in the local mesh.
	OnBehaviourPenaltyUpdated(float64)
	// OnTimeInMeshUpdated tracks the time in mesh factor of peers in the local mesh for a given topic.
	OnTimeInMeshUpdated(channels.Topic, time.Duration)
	// OnFirstMessageDeliveredUpdated tracks the first message delivered factor of peers in the local mesh for a given topic.
	OnFirstMessageDeliveredUpdated(channels.Topic, float64)
	// OnMeshMessageDeliveredUpdated tracks the mesh message delivered factor of peers in the local mesh for a given topic.
	OnMeshMessageDeliveredUpdated(channels.Topic, float64)
	// OnInvalidMessageDeliveredUpdated tracks the invalid message delivered factor of peers in the local mesh for a given topic.
	OnInvalidMessageDeliveredUpdated(channels.Topic, float64)
	// SetWarningStateCount tracks the warning score state of peers in the local mesh. It updates the total number of
	// peers in the local mesh that are in the warning state based on their score.
	SetWarningStateCount(uint)
}

GossipSubScoringMetrics encapsulates the metrics collectors for the peer scoring module of GossipSub protocol. It tracks the scores of the peers in the local mesh and the different factors that contribute to the score of a peer. It also tracks the scores of the topics in the local mesh and the different factors that contribute to the score of a topic.

type GossipSubScoringRegistryMetrics added in v0.33.30

type GossipSubScoringRegistryMetrics interface {
	// DuplicateMessagePenalties tracks the duplicate message penalty for a node.
	DuplicateMessagePenalties(penalty float64)
	// DuplicateMessagesCounts tracks the duplicate message count for a node.
	DuplicateMessagesCounts(count float64)
}

GossipSubScoringRegistryMetrics encapsulates the metrics collectors for collecting metrics related to the Gossipsub scoring registry. GossipSubScoringRegistryMetrics encapsulates various metrics collectors offering insights into penalties and other factors used by the scoring registry to compute the application-specific score. It focuses on tracking internal aspects of the application-specific score, distinguishing itself from GossipSubScoringMetrics.

type HeroCacheMetrics added in v0.25.2

type HeroCacheMetrics interface {
	// BucketAvailableSlots keeps track of number of available slots in buckets of cache.
	BucketAvailableSlots(uint64, uint64)

	// OnKeyPutAttempt is called whenever a new (key, value) pair is attempted to be put in cache.
	// It does not reflect whether the put was successful or not.
	// A (key, value) pair put attempt may fail if the cache is full, or the key already exists.
	OnKeyPutAttempt(size uint32)

	// OnKeyPutSuccess is called whenever a new (key, entity) pair is successfully added to the cache.
	OnKeyPutSuccess(size uint32)

	// OnKeyPutDrop is called whenever a new (key, entity) pair is dropped from the cache due to full cache.
	OnKeyPutDrop()

	// OnKeyPutDeduplicated is tracking the total number of unsuccessful writes caused by adding a duplicate key to the cache.
	// A duplicate key is dropped by the cache when it is written to the cache.
	// Note: in context of HeroCache, the key corresponds to the identifier of its entity. Hence, a duplicate key corresponds to
	// a duplicate entity.
	OnKeyPutDeduplicated()

	// OnKeyRemoved is called whenever a (key, entity) pair is removed from the cache.
	OnKeyRemoved(size uint32)

	// OnKeyGetSuccess tracks total number of successful read queries.
	// A read query is successful if the entity corresponding to its key is available in the cache.
	// Note: in context of HeroCache, the key corresponds to the identifier of its entity.
	OnKeyGetSuccess()

	// OnKeyGetFailure tracks total number of unsuccessful read queries.
	// A read query is unsuccessful if the entity corresponding to its key is not available in the cache.
	// Note: in context of HeroCache, the key corresponds to the identifier of its entity.
	OnKeyGetFailure()

	// OnEntityEjectionDueToFullCapacity is called whenever adding a new (key, entity) to the cache results in ejection of another (key', entity') pair.
	// This normally happens -- and is expected -- when the cache is full.
	// Note: in context of HeroCache, the key corresponds to the identifier of its entity.
	OnEntityEjectionDueToFullCapacity()

	// OnEntityEjectionDueToEmergency is called whenever a bucket is found full and all of its keys are valid, i.e.,
	// each key belongs to an existing (key, entity) pair.
	// Hence, adding a new key to that bucket will replace the oldest valid key inside that bucket.
	// Note: in context of HeroCache, the key corresponds to the identifier of its entity.
	OnEntityEjectionDueToEmergency()
}

type HotStuff

type HotStuff interface {
	ReadyDoneAware
	Startable

	// SubmitProposal submits a new block proposal to the HotStuff event loop.
	// This method blocks until the proposal is accepted to the event queue.
	//
	// Block proposals must be submitted in order and only if they extend a
	// block already known to HotStuff core.
	SubmitProposal(proposal *model.Proposal)
}

HotStuff defines the interface for the core HotStuff algorithm. It includes a method to start the event loop, and utilities to submit block proposals received from other replicas.

TODO:

HotStuff interface could extend HotStuffFollower. Thereby, we can
utilize the optimized catchup mode from the follower also for the
consensus participant.

type HotStuffFollower

type HotStuffFollower interface {
	ReadyDoneAware
	Startable

	// AddCertifiedBlock appends the given certified block to the tree of pending
	// blocks and updates the latest finalized block (if finalization progressed).
	// Unless the parent is below the pruning threshold (latest finalized view), we
	// require that the parent has previously been added.
	//
	// Notes:
	//  - Under normal operations, this method is non-blocking. The follower internally
	//    queues incoming blocks and processes them in its own worker routine. However,
	//    when the inbound queue is full, we block until there is space in the queue. This
	//    behaviour is intentional, because we cannot drop blocks (otherwise, we would
	//    cause disconnected blocks). Instead we simply block the compliance layer to
	//    avoid any pathological edge cases.
	//  - Blocks whose views are below the latest finalized view are dropped.
	//  - Inputs are idempotent (repetitions are no-ops).
	AddCertifiedBlock(certifiedBlock *model.CertifiedBlock)
}

HotStuffFollower is run by non-consensus nodes to observe the block chain and make local determination about block finalization. While the process of reaching consensus (incl. guaranteeing its safety and liveness) is very intricate, the criteria to confirm that consensus has been reached are relatively straight forward. Each non-consensus node can simply observe the blockchain and determine locally which blocks have been finalized without requiring additional information from the consensus nodes.

In contrast to an active HotStuff participant, the HotStuffFollower does not validate block payloads. This greatly reduces the amount of CPU and memory that it consumes. Essentially, the consensus participants exhaustively verify the entire block including the payload and only vote for the block if it is valid. The consensus committee aggregates votes from a supermajority of participants to a Quorum Certificate [QC]. Thereby, it is guaranteed that only valid blocks get certified (receive a QC). By only consuming certified blocks, the HotStuffFollower can be sure of their correctness and omit the heavy payload verification. There is no disbenefit for nodes to wait for a QC (included in child blocks), because all nodes other than consensus generally require the Source Of Randomness included in QCs to process the block in the first place.

The central purpose of the HotStuffFollower is to inform other components within the node about finalization of blocks.

Notes:

  • HotStuffFollower internally prunes blocks below the last finalized view.
  • HotStuffFollower does not handle disconnected blocks. For each input block, we require that the parent was previously added (unless the parent's view is _below_ the latest finalized view).

type HotstuffMetrics

type HotstuffMetrics interface {
	// HotStuffBusyDuration reports Metrics C6 HotStuff Busy Duration
	HotStuffBusyDuration(duration time.Duration, event string)

	// HotStuffIdleDuration reports Metrics C6 HotStuff Idle Duration
	HotStuffIdleDuration(duration time.Duration)

	// HotStuffWaitDuration reports Metrics C6 HotStuff Idle Duration - the time between receiving and
	// enqueueing a message to beginning to process that message.
	HotStuffWaitDuration(duration time.Duration, event string)

	// SetCurView reports Metrics C8: Current View maintained by Pacemaker.
	SetCurView(view uint64)

	// SetQCView reports Metrics C9: View of the newest QC known to Pacemaker.
	SetQCView(view uint64)

	// SetTCView reports last TC known to Pacemaker.
	SetTCView(view uint64)

	// CountSkipped counts the number of skips we did.
	CountSkipped()

	// CountTimeout tracks the number of views that this replica left due to observing a TC.
	CountTimeout()

	// SetTimeout sets the current timeout duration
	SetTimeout(duration time.Duration)

	// BlockProcessingDuration measures the time which the compliance engine
	// spends to process one block proposal.
	BlockProcessingDuration(duration time.Duration)

	// VoteProcessingDuration measures the time which the hotstuff.VoteAggregator
	// spends to process one vote.
	VoteProcessingDuration(duration time.Duration)

	// TimeoutObjectProcessingDuration measures the time which the hotstuff.TimeoutAggregator
	// spends to process one timeout object.
	TimeoutObjectProcessingDuration(duration time.Duration)

	// CommitteeProcessingDuration measures the time which the HotStuff's core logic
	// spends in the hotstuff.Replicas component, i.e. the time determining consensus
	// committee relations.
	CommitteeProcessingDuration(duration time.Duration)

	// SignerProcessingDuration measures the time which the HotStuff's core logic
	// spends in the hotstuff.Signer component, i.e. the with crypto-related operations.
	SignerProcessingDuration(duration time.Duration)

	// ValidatorProcessingDuration measures the time which the HotStuff's core logic
	// spends in the hotstuff.Validator component, i.e. the with verifying
	// consensus messages.
	ValidatorProcessingDuration(duration time.Duration)

	// PayloadProductionDuration measures the time which the HotStuff's core logic
	// spends in the module.Builder component, i.e. the with generating block payloads.
	PayloadProductionDuration(duration time.Duration)

	// TimeoutCollectorsRange collects information from the node's `TimeoutAggregator` component.
	// Specifically, it measurers the number of views for which we are currently collecting timeouts
	// (i.e. the number of `TimeoutCollector` instances we are maintaining) and their lowest/highest view.
	TimeoutCollectorsRange(lowestRetainedView uint64, newestViewCreatedCollector uint64, activeCollectors int)
}

type IdentifierProvider added in v0.28.0

type IdentifierProvider interface {
	Identifiers() flow.IdentifierList
}

IdentifierProvider provides an interface to get a list of Identifiers representing a specific set of nodes in the network.

type IdentityProvider added in v0.28.0

type IdentityProvider interface {
	// Identities returns the full identities of _all_ nodes currently known to the
	// protocol that pass the provided filter. Caution, this includes ejected nodes.
	// Please check the `Ejected` flag in the identities (or provide a filter for
	// removing ejected nodes).
	Identities(flow.IdentityFilter[flow.Identity]) flow.IdentityList

	// ByNodeID returns the full identity for the node with the given Identifier,
	// where Identifier is the way the protocol refers to the node. The function
	// has the same semantics as a map lookup, where the boolean return value is
	// true if and only if Identity has been found, i.e. `Identity` is not nil.
	// Caution: function returns include ejected nodes. Please check the `Ejected`
	// flag in the identity.
	ByNodeID(flow.Identifier) (*flow.Identity, bool)

	// ByPeerID returns the full identity for the node with the given peer ID,
	// where ID is the way the libP2P refers to the node. The function
	// has the same semantics as a map lookup, where the boolean return value is
	// true if and only if Identity has been found, i.e. `Identity` is not nil.
	// Caution: function returns include ejected nodes. Please check the `Ejected`
	// flag in the identity.
	ByPeerID(peer.ID) (*flow.Identity, bool)
}

IdentityProvider provides an interface to get a list of Identities representing the set of participants in the Flow protocol. CAUTION: return values will include ejected nodes, so callers must check the `Identity.Ejected` flag.

type Job added in v0.15.0

type Job interface {
	// each job has a unique ID for deduplication
	ID() JobID
}

type JobConsumer added in v0.15.0

type JobConsumer interface {
	NewJobListener

	// Start starts processing jobs from a job queue.
	Start() error

	// Stop gracefully stops the consumer from reading new jobs from the job queue. It does not stop
	// the existing worker finishing their jobs
	// It blocks until the existing worker finish processing the job
	Stop()

	// LastProcessedIndex returns the last processed job index
	LastProcessedIndex() uint64

	// NotifyJobIsDone let the consumer know a job has been finished, so that consumer will take
	// the next job from the job queue if there are workers available. It returns the last processed job index.
	NotifyJobIsDone(JobID) uint64

	// Size returns the number of processing jobs in consumer.
	Size() uint
}

JobConsumer consumes jobs from a job queue, and it remembers which job it has processed, and is able to resume processing from the next.

type JobID added in v0.15.0

type JobID string

JobID is a unique ID of the job.

type JobQueue added in v0.15.0

type JobQueue interface {
	// Add a job to the job queue
	Add(job Job) error
}

type Jobs added in v0.15.0

type Jobs interface {
	// AtIndex returns the job at the given index.
	// Error returns:
	//   * storage.ErrNotFound if a job at the provided index is not available
	AtIndex(index uint64) (Job, error)

	// Head returns the index of the last job
	Head() (uint64, error)
}

Jobs is the reader for an ordered job queue. Job can be fetched by the index, which start from 0

type LedgerMetrics

type LedgerMetrics interface {
	// ForestApproxMemorySize records approximate memory usage of forest (all in-memory trees)
	ForestApproxMemorySize(bytes uint64)

	// ForestNumberOfTrees current number of trees in a forest (in memory)
	ForestNumberOfTrees(number uint64)

	// LatestTrieRegCount records the number of unique register allocated (the latest created trie)
	LatestTrieRegCount(number uint64)

	// LatestTrieRegCountDiff records the difference between the number of unique register allocated of the latest created trie and parent trie
	LatestTrieRegCountDiff(number int64)

	// LatestTrieRegSize records the size of unique register allocated (the latest created trie)
	LatestTrieRegSize(size uint64)

	// LatestTrieRegSizeDiff records the difference between the size of unique register allocated of the latest created trie and parent trie
	LatestTrieRegSizeDiff(size int64)

	// LatestTrieMaxDepthTouched records the maximum depth touched of the lastest created trie
	LatestTrieMaxDepthTouched(maxDepth uint16)

	// UpdateCount increase a counter of performed updates
	UpdateCount()

	// ProofSize records a proof size
	ProofSize(bytes uint32)

	// UpdateValuesNumber accumulates number of updated values
	UpdateValuesNumber(number uint64)

	// UpdateValuesSize total size (in bytes) of updates values
	UpdateValuesSize(byte uint64)

	// UpdateDuration records absolute time for the update of a trie
	UpdateDuration(duration time.Duration)

	// UpdateDurationPerItem records update time for single value (total duration / number of updated values)
	UpdateDurationPerItem(duration time.Duration)

	// ReadValuesNumber accumulates number of read values
	ReadValuesNumber(number uint64)

	// ReadValuesSize total size (in bytes) of read values
	ReadValuesSize(byte uint64)

	// ReadDuration records absolute time for the read from a trie
	ReadDuration(duration time.Duration)

	// ReadDurationPerItem records read time for single value (total duration / number of read values)
	ReadDurationPerItem(duration time.Duration)
}

LedgerMetrics provides an interface to record Ledger Storage metrics. Ledger storage is non-linear (fork-aware) so certain metrics are averaged and computed before emitting for better visibility

type LibP2PConnectionMetrics added in v0.29.1

type LibP2PConnectionMetrics interface {
	// OutboundConnections updates the metric tracking the number of outbound connections of this node
	OutboundConnections(connectionCount uint)

	// InboundConnections updates the metric tracking the number of inbound connections of this node
	InboundConnections(connectionCount uint)
}

LibP2PConnectionMetrics encapsulates the metrics collectors for the connection manager of the libp2p node.

type Local

type Local interface {

	// NodeID returns the node ID of the local node.
	NodeID() flow.Identifier

	// Address returns the (listen) address of the local node.
	Address() string

	// Sign provides a signature oracle that given a message and hasher, it
	// generates and returns a signature over the message using the node's private key
	// as well as the input hasher
	Sign([]byte, hash.Hasher) (crypto.Signature, error)

	// NotMeFilter returns handy not-me filter for searching identity
	NotMeFilter() flow.IdentityFilter[flow.Identity]

	// SignFunc provides a signature oracle that given a message, a hasher, and a signing function, it
	// generates and returns a signature over the message using the node's private key
	// as well as the input hasher by invoking the given signing function. The overall idea of this function
	// is to not expose the private key to the caller.
	SignFunc([]byte, hash.Hasher, func(crypto.PrivateKey, []byte, hash.Hasher) (crypto.Signature,
		error)) (crypto.Signature, error)
}

Local encapsulates the stable local node information.

type LocalGossipSubRouterMetrics added in v0.33.1

type LocalGossipSubRouterMetrics interface {
	// OnLocalMeshSizeUpdated tracks the size of the local mesh for a topic.
	OnLocalMeshSizeUpdated(topic string, size int)

	// OnPeerAddedToProtocol is called when the local node receives a stream from a peer on a gossipsub-related protocol.
	// Args:
	// 	protocol: the protocol name that the peer is connected to.
	OnPeerAddedToProtocol(protocol string)

	// OnPeerRemovedFromProtocol is called when the local considers a remote peer blacklisted or unavailable.
	OnPeerRemovedFromProtocol()

	// OnLocalPeerJoinedTopic is called when the local node subscribes to a gossipsub topic.
	OnLocalPeerJoinedTopic()

	// OnLocalPeerLeftTopic is called when the local node unsubscribes from a gossipsub topic.
	OnLocalPeerLeftTopic()

	// OnPeerGraftTopic is called when the local node receives a GRAFT message from a remote peer on a topic.
	// Note: the received GRAFT at this point is considered passed the RPC inspection, and is accepted by the local node.
	OnPeerGraftTopic(topic string)

	// OnPeerPruneTopic is called when the local node receives a PRUNE message from a remote peer on a topic.
	// Note: the received PRUNE at this point is considered passed the RPC inspection, and is accepted by the local node.
	OnPeerPruneTopic(topic string)

	// OnMessageEnteredValidation is called when a received pubsub message enters the validation pipeline. It is the
	// internal validation pipeline of GossipSub protocol. The message may be rejected or accepted by the validation
	// pipeline.
	OnMessageEnteredValidation(size int)

	// OnMessageRejected is called when a received pubsub message is rejected by the validation pipeline.
	// Args:
	//
	//	reason: the reason for rejection.
	// 	size: the size of the message in bytes.
	OnMessageRejected(size int, reason string)

	// OnMessageDuplicate is called when a received pubsub message is a duplicate of a previously received message, and
	// is dropped.
	// Args:
	// 	size: the size of the message in bytes.
	OnMessageDuplicate(size int)

	// OnPeerThrottled is called when a peer is throttled by the local node, i.e., the local node is not accepting any
	// pubsub message from the peer but may still accept control messages.
	OnPeerThrottled()

	// OnRpcReceived is called when an RPC message is received by the local node. The received RPC is considered
	// passed the RPC inspection, and is accepted by the local node.
	// Args:
	// 	msgCount: the number of messages included in the RPC.
	// 	iHaveCount: the number of iHAVE messages included in the RPC.
	// 	iWantCount: the number of iWANT messages included in the RPC.
	// 	graftCount: the number of GRAFT messages included in the RPC.
	// 	pruneCount: the number of PRUNE messages included in the RPC.
	OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int)

	// OnRpcSent is called when an RPC message is sent by the local node.
	// Note: the sent RPC is considered passed the RPC inspection, and is accepted by the local node.
	// Args:
	// 	msgCount: the number of messages included in the RPC.
	// 	iHaveCount: the number of iHAVE messages included in the RPC.
	// 	iWantCount: the number of iWANT messages included in the RPC.
	// 	graftCount: the number of GRAFT messages included in the RPC.
	// 	pruneCount: the number of PRUNE messages included in the RPC.
	OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int)

	// OnOutboundRpcDropped is called when an outbound RPC message is dropped by the local node, typically because the local node
	// outbound message queue is full; or the RPC is big and the local node cannot fragment it.
	OnOutboundRpcDropped()

	// OnUndeliveredMessage is called when a message is not delivered at least one subscriber of the topic, for example when
	// the subscriber is too slow to process the message.
	OnUndeliveredMessage()

	// OnMessageDeliveredToAllSubscribers is called when a message is delivered to all subscribers of the topic.
	OnMessageDeliveredToAllSubscribers(size int)
}

LocalGossipSubRouterMetrics encapsulates the metrics collectors for GossipSub router of the local node. It gives a lens into the local GossipSub node's view of the GossipSub protocol. LocalGossipSubRouterMetrics differs from GossipSubRpcInspectorMetrics in that the former tracks the local node's view of the GossipSub protocol, while the latter tracks the messages received by the local node from other nodes over the GossipSub protocol but may not all be accepted by the local node, e.g., due to RPC pruning or throttling. Having this distinction is useful for debugging and troubleshooting the GossipSub protocol, for example, the number of messages received by the local node from other nodes over the GossipSub protocol may be much higher than the number of messages accepted by the local node, which may indicate that the local node is throttling the incoming messages.

type MachineAccountMetrics added in v0.33.30

type MachineAccountMetrics interface {
	// AccountBalance reports the current balance of the machine account.
	AccountBalance(bal float64)
	// RecommendedMinBalance reports the recommended minimum balance. If the actual balance
	// falls below this level, it must be refilled.
	// NOTE: Operators should alert on `AccountBalance < RecommendedMinBalance`
	RecommendedMinBalance(bal float64)
	// IsMisconfigured reports whether a critical misconfiguration has been detected.
	// NOTE Operators should alert on non-zero values reported here.
	IsMisconfigured(misconfigured bool)
}

type MempoolMetrics

type MempoolMetrics interface {
	MempoolEntries(resource string, entries uint)
	Register(resource string, entriesFunc EntriesFunc) error
}

type NetworkCoreMetrics added in v0.29.1

type NetworkCoreMetrics interface {
	NetworkInboundQueueMetrics
	AlspMetrics
	NetworkSecurityMetrics

	// OutboundMessageSent collects metrics related to a message sent by the node.
	OutboundMessageSent(sizeBytes int, topic string, protocol string, messageType string)
	// InboundMessageReceived collects metrics related to a message received by the node.
	InboundMessageReceived(sizeBytes int, topic string, protocol string, messageType string)
	// DuplicateInboundMessagesDropped increments the metric tracking the number of duplicate messages dropped by the node.
	DuplicateInboundMessagesDropped(topic string, protocol string, messageType string)
	// UnicastMessageSendingStarted increments the metric tracking the number of unicast messages sent by the node.
	UnicastMessageSendingStarted(topic string)
	// UnicastMessageSendingCompleted decrements the metric tracking the number of unicast messages sent by the node.
	UnicastMessageSendingCompleted(topic string)
	// MessageProcessingStarted increments the metric tracking the number of messages being processed by the node.
	MessageProcessingStarted(topic string)
	// MessageProcessingFinished tracks the time spent by the node to process a message and decrements the metric tracking
	// the number of messages being processed by the node.
	MessageProcessingFinished(topic string, duration time.Duration)
}

NetworkCoreMetrics encapsulates the metrics collectors for the core networking layer functionality.

type NetworkInboundQueueMetrics added in v0.29.1

type NetworkInboundQueueMetrics interface {

	// MessageAdded increments the metric tracking the number of messages in the queue with the given priority
	MessageAdded(priority int)

	// MessageRemoved decrements the metric tracking the number of messages in the queue with the given priority
	MessageRemoved(priority int)

	// QueueDuration tracks the time spent by a message with the given priority in the queue
	QueueDuration(duration time.Duration, priority int)
}

NetworkInboundQueueMetrics encapsulates the metrics collectors for the inbound queue of the networking layer.

type NetworkMetrics

type NetworkMetrics interface {
	LibP2PMetrics
	NetworkCoreMetrics
}

NetworkMetrics is the blanket abstraction that encapsulates the metrics collectors for the networking layer.

type NetworkSecurityMetrics added in v0.28.0

type NetworkSecurityMetrics interface {
	// OnUnauthorizedMessage tracks the number of unauthorized messages seen on the network.
	OnUnauthorizedMessage(role, msgType, topic, offense string)

	// OnRateLimitedPeer tracks the number of rate limited unicast messages seen on the network.
	OnRateLimitedPeer(pid peer.ID, role, msgType, topic, reason string)

	// OnViolationReportSkipped tracks the number of slashing violations consumer violations that were not
	// reported for misbehavior when the identity of the sender not known.
	OnViolationReportSkipped()
}

NetworkSecurityMetrics metrics related to network protection.

type NewJobListener added in v0.15.0

type NewJobListener interface {
	// Check let the producer notify the consumer that a new job has been added, so that the consumer
	// can check if there is worker available to process that job.
	Check()
}

type NoopComponent added in v0.30.0

type NoopComponent struct {
	*NoopReadyDoneAware
}

NoopComponent noop struct that implements the component.Component interface.

func (*NoopComponent) Start added in v0.30.0

type NoopReadyDoneAware added in v0.23.9

type NoopReadyDoneAware struct{}

NoopReadyDoneAware is a ReadyDoneAware implementation whose ready/done channels close immediately

func (*NoopReadyDoneAware) Done added in v0.23.9

func (n *NoopReadyDoneAware) Done() <-chan struct{}

func (*NoopReadyDoneAware) Ready added in v0.23.9

func (n *NoopReadyDoneAware) Ready() <-chan struct{}

type NoopRequester added in v0.33.12

type NoopRequester struct{}

func (NoopRequester) EntityByID added in v0.33.12

func (n NoopRequester) EntityByID(entityID flow.Identifier, selector flow.IdentityFilter[flow.Identity])

func (NoopRequester) Force added in v0.33.12

func (n NoopRequester) Force()

func (NoopRequester) Query added in v0.33.12

func (n NoopRequester) Query(key flow.Identifier, selector flow.IdentityFilter[flow.Identity])

func (NoopRequester) WithHandle added in v0.33.12

func (n NoopRequester) WithHandle(func(flow.Identifier, flow.Entity)) Requester

type PendingBlockBuffer

type PendingBlockBuffer interface {
	Add(block flow.Slashable[*flow.Block]) bool

	ByID(blockID flow.Identifier) (flow.Slashable[*flow.Block], bool)

	ByParentID(parentID flow.Identifier) ([]flow.Slashable[*flow.Block], bool)

	DropForParent(parentID flow.Identifier)

	// PruneByView prunes any pending blocks with views less or equal to the given view.
	PruneByView(view uint64)

	Size() uint
}

PendingBlockBuffer defines an interface for a cache of pending blocks that cannot yet be processed because they do not connect to the rest of the chain state. They are indexed by parent ID to enable processing all of a parent's children once the parent is received. Safe for concurrent use.

type PendingClusterBlockBuffer

type PendingClusterBlockBuffer interface {
	Add(block flow.Slashable[*cluster.Block]) bool

	ByID(blockID flow.Identifier) (flow.Slashable[*cluster.Block], bool)

	ByParentID(parentID flow.Identifier) ([]flow.Slashable[*cluster.Block], bool)

	DropForParent(parentID flow.Identifier)

	// PruneByView prunes any pending cluster blocks with views less or equal to the given view.
	PruneByView(view uint64)

	Size() uint
}

PendingClusterBlockBuffer is the same thing as PendingBlockBuffer, but for collection node cluster consensus. Safe for concurrent use.

type PingMetrics

type PingMetrics interface {
	// NodeReachable tracks the round trip time in milliseconds taken to ping a node
	// The nodeInfo provides additional information about the node such as the name of the node operator
	NodeReachable(node *flow.Identity, nodeInfo string, rtt time.Duration)

	// NodeInfo tracks the software version, sealed height and hotstuff view of a node
	NodeInfo(node *flow.Identity, nodeInfo string, version string, sealedHeight uint64, hotstuffCurView uint64)
}

type ProcessingNotifier added in v0.15.0

type ProcessingNotifier interface {
	Notify(entityID flow.Identifier)
}

ProcessingNotifier is for the worker's underneath engine to report an entity has been processed without knowing the job queue. It is a callback so that the worker can convert the entity id into a job id, and notify the consumer about a finished job.

At the current version, entities used in this interface are chunks and blocks ids.

type ProviderMetrics

type ProviderMetrics interface {
	// ChunkDataPackRequestProcessed is executed every time a chunk data pack request is picked up for processing at execution node.
	// It increases the request processed counter by one.
	ChunkDataPackRequestProcessed()
}

type ProxiedReadyDoneAware added in v0.28.0

type ProxiedReadyDoneAware struct {
	// contains filtered or unexported fields
}

ProxiedReadyDoneAware is a ReadyDoneAware implementation that proxies the ReadyDoneAware interface from another implementation. This allows for usecases where the Ready/Done methods are needed before the proxied object is initialized.

func NewProxiedReadyDoneAware added in v0.28.0

func NewProxiedReadyDoneAware() *ProxiedReadyDoneAware

NewProxiedReadyDoneAware returns a new ProxiedReadyDoneAware instance

func (*ProxiedReadyDoneAware) Done added in v0.28.0

func (n *ProxiedReadyDoneAware) Done() <-chan struct{}

func (*ProxiedReadyDoneAware) Init added in v0.28.0

Init adds the proxied ReadyDoneAware implementation and sets up the ready/done channels to close when the respective channel on the proxied object closes. Init can only be called once.

IMPORTANT: the proxied ReadyDoneAware implementation must be idempotent since the Ready and Done methods will be called immediately when calling Init.

func (*ProxiedReadyDoneAware) Ready added in v0.28.0

func (n *ProxiedReadyDoneAware) Ready() <-chan struct{}

type QCContractClient

type QCContractClient interface {

	// SubmitVote submits the given vote to the cluster QC aggregator smart
	// contract. This function returns only once the transaction has been
	// processed by the network. An error is returned if the transaction has
	// failed and should be re-submitted.
	// Error returns:
	//   - network.TransientError for any errors from the underlying client, if the retry period has been exceeded
	//   - errTransactionExpired if the transaction has expired
	//   - errTransactionReverted if the transaction execution reverted
	//   - generic error in case of unexpected critical failure
	SubmitVote(ctx context.Context, vote *model.Vote) error

	// Voted returns true if we have successfully submitted a vote to the
	// cluster QC aggregator smart contract for the current epoch.
	// Error returns:
	//   - network.TransientError for any errors from the underlying Flow client
	//   - generic error in case of unexpected critical failures
	Voted(ctx context.Context) (bool, error)
}

QCContractClient enables interacting with the cluster QC aggregator smart contract. This contract is deployed to the service account as part of a collection of smart contracts that facilitate and manage epoch transitions.

type RandomBeaconKeyStore added in v0.23.9

type RandomBeaconKeyStore interface {
	// ByView returns the node's locally computed beacon private key for the epoch containing the given view.
	// It returns:
	//   - (key, nil) if the node has beacon keys in the epoch of the view
	//   - (nil, model.ErrViewForUnknownEpoch) if no epoch found for given view
	//   - (nil, module.ErrNoBeaconKeyForEpoch) if beacon key for epoch is unavailable
	//   - (nil, error) if there is any exception
	ByView(view uint64) (crypto.PrivateKey, error)
}

RandomBeaconKeyStore provides access to the node's locally computed random beacon for a given epoch. We determine which epoch to use based on the view, each view belongs to exactly one epoch. Beacon keys are only returned once:

  • the DKG has completed successfully locally AND
  • the DKG has completed successfully globally (EpochCommit event sealed) with a consistent result

Therefore keys returned by this module are guaranteed to be safe for use.

type RateLimitedBlockstoreMetrics added in v0.28.0

type RateLimitedBlockstoreMetrics interface {
	BytesRead(int)
}

type ReadyDoneAware

type ReadyDoneAware interface {
	// Ready commences startup of the module, and returns a ready channel that is closed once
	// startup has completed. Note that the ready channel may never close if errors are
	// encountered during startup.
	// If shutdown has already commenced before this method is called for the first time,
	// startup will not be performed and the returned channel will also never close.
	// This should be an idempotent method.
	Ready() <-chan struct{}

	// Done commences shutdown of the module, and returns a done channel that is closed once
	// shutdown has completed. Note that the done channel should be closed even if errors are
	// encountered during shutdown.
	// This should be an idempotent method.
	Done() <-chan struct{}
}

WARNING: The semantics of this interface will be changing in the near future, with startup / shutdown capabilities being delegated to the Startable interface instead. For more details, see https://github.com/onflow/flow-go/pull/1167

ReadyDoneAware provides an easy interface to wait for module startup and shutdown. Modules that implement this interface only support a single start-stop cycle, and will not restart if Ready() is called again after shutdown has already commenced.

type ReceiptValidator added in v0.14.0

type ReceiptValidator interface {

	// Validate verifies that the ExecutionReceipt satisfies the following conditions:
	//   - is from Execution node with positive weight
	//   - has valid signature
	//   - chunks are in correct format
	//   - execution result has a valid parent and satisfies the subgraph check
	//
	// In order to validate a receipt, both the executed block and the parent result
	// referenced in `receipt.ExecutionResult` must be known. We return nil if all checks
	// pass successfully.
	//
	// Expected errors during normal operations:
	//   - engine.InvalidInputError if receipt violates protocol condition
	//   - module.UnknownResultError if the receipt's parent result is unknown
	//   - module.UnknownBlockError if the executed block is unknown
	//
	// All other error are potential symptoms critical internal failures, such as bugs or state corruption.
	Validate(receipt *flow.ExecutionReceipt) error

	// ValidatePayload verifies the ExecutionReceipts and ExecutionResults
	// in the payload for compliance with the protocol:
	// Receipts:
	//   - are from Execution node with positive weight
	//   - have valid signature
	//   - chunks are in correct format
	//   - no duplicates in fork
	//
	// Results:
	//   - have valid parents and satisfy the subgraph check
	//   - extend the execution tree, where the tree root is the latest
	//     finalized block and only results from this fork are included
	//   - no duplicates in fork
	//
	// Expected errors during normal operations:
	//   - engine.InvalidInputError if some receipts in the candidate block violate protocol condition
	//   - module.UnknownBlockError if the candidate block's _parent_ is unknown
	//
	// All other error are potential symptoms critical internal failures, such as bugs or state corruption.
	// Note that module.UnknownResultError is not possible; we have either an invalid candidate block
	// (yields engine.InvalidInputError) or a missing parent block (yields module.UnknownBlockError).
	ValidatePayload(candidate *flow.Block) error
}

ReceiptValidator is an interface which is used for validating receipts with respect to current protocol state.

type Requester

type Requester interface {
	// EntityByID will request an entity through the request engine backing
	// the interface. The additional selector will be applied to the subset
	// of valid providers for the entity and allows finer-grained control
	// over which providers to request a given entity from. Use `filter.Any`
	// if no additional restrictions are required. Data integrity of response
	// will be checked upon arrival. This function should be used for requesting
	// entites by their IDs.
	EntityByID(entityID flow.Identifier, selector flow.IdentityFilter[flow.Identity])

	// Query will request data through the request engine backing the interface.
	// The additional selector will be applied to the subset
	// of valid providers for the data and allows finer-grained control
	// over which providers to request data from. Doesn't perform integrity check
	// can be used to get entities without knowing their ID.
	Query(key flow.Identifier, selector flow.IdentityFilter[flow.Identity])

	// Force will force the dispatcher to send all possible batches immediately.
	// It can be used in cases where responsiveness is of utmost importance, at
	// the cost of additional network messages.
	Force()
}

type ResolverMetrics added in v0.21.0

type ResolverMetrics interface {
	// DNSLookupDuration tracks the time spent to resolve a DNS address.
	DNSLookupDuration(duration time.Duration)

	// OnDNSCacheMiss tracks the total number of dns requests resolved through looking up the network.
	OnDNSCacheMiss()

	// OnDNSCacheHit tracks the total number of dns requests resolved through the cache without
	// looking up the network.
	OnDNSCacheHit()

	// OnDNSCacheInvalidated is called whenever dns cache is invalidated for an entry
	OnDNSCacheInvalidated()

	// OnDNSLookupRequestDropped tracks the number of dns lookup requests that are dropped due to a full queue
	OnDNSLookupRequestDropped()
}

ResolverMetrics encapsulates the metrics collectors for dns resolver module of the networking layer.

type RestMetrics added in v0.31.0

type RestMetrics interface {
	// Example recorder taken from:
	// https://github.com/slok/go-http-metrics/blob/master/metrics/prometheus/prometheus.go
	httpmetrics.Recorder
	AddTotalRequests(ctx context.Context, method string, routeName string)
}

type RuntimeMetrics

type RuntimeMetrics interface {
	// RuntimeTransactionParsed reports the time spent parsing a single transaction
	RuntimeTransactionParsed(dur time.Duration)

	// RuntimeTransactionChecked reports the time spent checking a single transaction
	RuntimeTransactionChecked(dur time.Duration)

	// RuntimeTransactionInterpreted reports the time spent interpreting a single transaction
	RuntimeTransactionInterpreted(dur time.Duration)

	// RuntimeSetNumberOfAccounts Sets the total number of accounts on the network
	RuntimeSetNumberOfAccounts(count uint64)

	// RuntimeTransactionProgramsCacheMiss reports a programs cache miss
	// during transaction execution
	RuntimeTransactionProgramsCacheMiss()

	// RuntimeTransactionProgramsCacheHit reports a programs cache hit
	// during transaction execution
	RuntimeTransactionProgramsCacheHit()
}

type SDKClientWrapper added in v0.20.0

type SDKClientWrapper interface {
	GetAccount(context.Context, sdk.Address) (*sdk.Account, error)
	GetAccountAtLatestBlock(context.Context, sdk.Address) (*sdk.Account, error)
	SendTransaction(context.Context, sdk.Transaction) error
	GetLatestBlock(context.Context, bool) (*sdk.Block, error)
	GetTransactionResult(context.Context, sdk.Identifier) (*sdk.TransactionResult, error)
	ExecuteScriptAtLatestBlock(context.Context, []byte, []cadence.Value) (cadence.Value, error)
	ExecuteScriptAtBlockID(context.Context, sdk.Identifier, []byte, []cadence.Value) (cadence.Value, error)
}

SDKClientWrapper is a temporary solution to mocking the `sdk.Client` interface from `flow-go-sdk`

type SealValidator added in v0.14.0

type SealValidator interface {
	Validate(candidate *flow.Block) (*flow.Seal, error)
}

SealValidator checks seals with respect to current protocol state. Accepts `candidate` block with seals that needs to be verified for protocol state validity. Returns the following values: * last seal in `candidate` block - in case of success * engine.InvalidInputError - in case if `candidate` block violates protocol state. * exception in case of any other error, usually this is not expected. PREREQUISITE: The SealValidator can only process blocks which are attached to the main chain (without any missing ancestors). This is the case because:

  • the Seal validator walks the chain backwards and requires the relevant ancestors to be known and validated
  • the storage.Seals only holds seals for block that are attached to the main chain.

type SealingConfigsGetter added in v0.26.13

type SealingConfigsGetter interface {
	// updatable fields
	RequireApprovalsForSealConstructionDynamicValue() uint

	// not-updatable fields
	ChunkAlphaConst() uint
	RequireApprovalsForSealVerificationConst() uint
	EmergencySealingActiveConst() bool
	ApprovalRequestsThresholdConst() uint64
}

SealingConfigsGetter is an interface for the actual updatable configs module. but only exposes its getter methods to return the config values without exposing its setter methods. SealingConfigs contains three configs: - RequireApprovalsForSealingConstruction (updatable) - RequireApprovalsForSealingVerification (not-updatable) - ChunkAlpha (not-updatable) - EmergencySealingActive (not-updatable) - ApprovalRequestsThreshold (not-updatable)

type SealingConfigsSetter added in v0.26.13

type SealingConfigsSetter interface {
	SealingConfigsGetter
	// SetRequiredApprovalsForSealingConstruction takes a new config value and updates the config
	// if the new value is valid.
	// Returns ValidationError if the new value results in an invalid sealing config.
	SetRequiredApprovalsForSealingConstruction(newVal uint) error
}

SealingConfigsSetter is an interface that allows the caller to update updatable configs

type Startable added in v0.22.4

type Startable interface {
	// Start starts the component. Any irrecoverable errors encountered while the component is running
	// should be thrown with the given SignalerContext.
	// This method should only be called once, and subsequent calls should panic with ErrMultipleStartup.
	Start(irrecoverable.SignalerContext)
}

Startable provides an interface to start a component. Once started, the component can be stopped by cancelling the given context.

type SyncCore

type SyncCore interface {

	// HandleBlock handles receiving a new block. It returns true if the block
	// should be passed along to the rest of the system for processing, or false
	// if it should be discarded.
	HandleBlock(header *flow.Header) bool

	// HandleHeight handles receiving a new highest finalized height from another node.
	HandleHeight(final *flow.Header, height uint64)

	// ScanPending scans all pending block statuses for blocks that should be
	// requested. It apportions requestable items into range and batch requests
	// according to configured maximums, giving precedence to range requests.
	ScanPending(final *flow.Header) ([]chainsync.Range, []chainsync.Batch)

	// WithinTolerance returns whether or not the given height is within configured
	// height tolerance, wrt the given local finalized header.
	WithinTolerance(final *flow.Header, height uint64) bool

	// RangeRequested updates sync state after a range is requested.
	RangeRequested(ran chainsync.Range)

	// BatchRequested updates sync state after a batch is requested.
	BatchRequested(batch chainsync.Batch)
}

SyncCore represents state management for chain state synchronization.

type Tracer

type Tracer interface {
	ReadyDoneAware

	// BlockRootSpan returns the block's empty root span.  The returned span
	// has already ended, and should only be used for creating children span.
	BlockRootSpan(blockID flow.Identifier) otelTrace.Span

	// StartBlockSpan starts an span for a block, built as a child of rootSpan.
	// It also returns the context including this span which can be used for
	// nested calls.
	StartBlockSpan(
		ctx context.Context,
		blockID flow.Identifier,
		spanName trace.SpanName,
		opts ...otelTrace.SpanStartOption,
	) (
		otelTrace.Span,
		context.Context,
	)

	// StartCollectionSpan starts an span for a collection, built as a child of
	// rootSpan.  It also returns the context including this span which can be
	// used for nested calls.
	StartCollectionSpan(
		ctx context.Context,
		collectionID flow.Identifier,
		spanName trace.SpanName,
		opts ...otelTrace.SpanStartOption,
	) (
		otelTrace.Span,
		context.Context,
	)

	StartSpanFromContext(
		ctx context.Context,
		operationName trace.SpanName,
		opts ...otelTrace.SpanStartOption,
	) (
		otelTrace.Span,
		context.Context,
	)

	StartSpanFromParent(
		parentSpan otelTrace.Span,
		operationName trace.SpanName,
		opts ...otelTrace.SpanStartOption,
	) otelTrace.Span

	ShouldSample(entityID flow.Identifier) bool

	// StartSampledSpanFromParent starts a real span from the parent span
	// if the entity should be sampled.  Otherwise, it returns a no-op span.
	StartSampledSpanFromParent(
		parentSpan otelTrace.Span,
		entityID flow.Identifier,
		operationName trace.SpanName,
		opts ...otelTrace.SpanStartOption,
	) otelTrace.Span

	// WithSpanFromContext encapsulates executing a function within an span, i.e., it starts a span with the specified SpanName from the context,
	// executes the function f, and finishes the span once the function returns.
	WithSpanFromContext(
		ctx context.Context,
		operationName trace.SpanName,
		f func(),
		opts ...otelTrace.SpanStartOption,
	)
}

Tracer interface for tracers in flow. Uses open tracing span definitions

type TransactionExecutionResultInfo added in v0.36.1

type TransactionExecutionResultInfo struct {
	TransactionID flow.Identifier
	BlockID       flow.Identifier
	BlockHeight   uint64
}

type TransactionExecutionResultStats added in v0.33.30

type TransactionExecutionResultStats struct {
	ExecutionResultStats
	NumberOfTxnConflictRetries int
	Failed                     bool
	SystemTransaction          bool
	ComputationIntensities     meter.MeteredComputationIntensities
}

type TransactionMetrics

type TransactionMetrics interface {
	// Record the round trip time while getting a transaction result
	TransactionResultFetched(dur time.Duration, size int)

	// TransactionReceived starts tracking of transaction execution/finalization/sealing
	TransactionReceived(txID flow.Identifier, when time.Time)

	// TransactionFinalized reports the time spent between the transaction being received and finalized. Reporting only
	// works if the transaction was earlier added as received.
	TransactionFinalized(txID flow.Identifier, when time.Time)

	// TransactionSealed reports the time spent between the transaction being received and sealed. Reporting only
	// works if the transaction was earlier added as received.
	TransactionSealed(txID flow.Identifier, when time.Time)

	// TransactionExecuted reports the time spent between the transaction being received and executed. Reporting only
	// works if the transaction was earlier added as received.
	TransactionExecuted(txID flow.Identifier, when time.Time)

	// TransactionExpired tracks number of expired transactions
	TransactionExpired(txID flow.Identifier)

	// TransactionSubmissionFailed should be called whenever we try to submit a transaction and it fails
	TransactionSubmissionFailed()
}

type TransactionValidationMetrics added in v0.37.22

type TransactionValidationMetrics interface {
	// TransactionValidated tracks number of successfully validated transactions
	TransactionValidated()
	// TransactionValidationFailed tracks number of validation failed transactions with reason
	TransactionValidationFailed(reason string)
	// TransactionValidationSkipped tracks number of skipped transaction validations
	TransactionValidationSkipped()
}

type UnicastManagerMetrics added in v0.30.0

type UnicastManagerMetrics interface {
	// OnStreamCreated tracks the overall time it takes to create a stream successfully and the number of retry attempts.
	OnStreamCreated(duration time.Duration, attempts int)
	// OnStreamCreationFailure tracks the amount of time taken and number of retry attempts used when the unicast manager fails to create a stream.
	OnStreamCreationFailure(duration time.Duration, attempts int)
	// OnPeerDialed tracks the time it takes to dial a peer during stream creation and the number of retry attempts before a peer
	// is dialed successfully.
	OnPeerDialed(duration time.Duration, attempts int)
	// OnPeerDialFailure tracks the amount of time taken and number of retry attempts used when the unicast manager cannot dial a peer
	// to establish the initial connection between the two.
	OnPeerDialFailure(duration time.Duration, attempts int)
	// OnStreamEstablished tracks the time it takes to create a stream successfully on the available open connection during stream
	// creation and the number of retry attempts.
	OnStreamEstablished(duration time.Duration, attempts int)
	// OnEstablishStreamFailure tracks the amount of time taken and number of retry attempts used when the unicast manager cannot establish
	// a stream on the open connection between two peers.
	OnEstablishStreamFailure(duration time.Duration, attempts int)

	// OnDialRetryBudgetUpdated tracks the history of the dial retry budget updates.
	OnDialRetryBudgetUpdated(budget uint64)

	// OnStreamCreationRetryBudgetUpdated tracks the history of the stream creation retry budget updates.
	OnStreamCreationRetryBudgetUpdated(budget uint64)

	// OnDialRetryBudgetResetToDefault tracks the number of times the dial retry budget is reset to default.
	OnDialRetryBudgetResetToDefault()

	// OnStreamCreationRetryBudgetResetToDefault tracks the number of times the stream creation retry budget is reset to default.
	OnStreamCreationRetryBudgetResetToDefault()
}

UnicastManagerMetrics unicast manager metrics.

type UnknownBlockError added in v0.33.30

type UnknownBlockError struct {
	// contains filtered or unexported fields
}

UnknownBlockError indicates that a referenced block is missing

func (UnknownBlockError) Error added in v0.33.30

func (e UnknownBlockError) Error() string

func (UnknownBlockError) Unwrap added in v0.33.30

func (e UnknownBlockError) Unwrap() error

type UnknownResultError added in v0.33.30

type UnknownResultError struct {
	// contains filtered or unexported fields
}

UnknownResultError indicates that a referenced result is missing

func (UnknownResultError) Error added in v0.33.30

func (e UnknownResultError) Error() string

func (UnknownResultError) Unwrap added in v0.33.30

func (e UnknownResultError) Unwrap() error

type VerificationMetrics

type VerificationMetrics interface {
	// OnBlockConsumerJobDone is invoked by block consumer whenever it is notified a job is done by a worker. It
	// sets the last processed block job index.
	OnBlockConsumerJobDone(uint64)
	// OnChunkConsumerJobDone is invoked by chunk consumer whenever it is notified a job is done by a worker. It
	// sets the last processed chunk job index.
	OnChunkConsumerJobDone(uint64)
	// OnExecutionResultReceivedAtAssignerEngine is called whenever a new execution result arrives
	// at Assigner engine. It increments total number of received execution results.
	OnExecutionResultReceivedAtAssignerEngine()

	// OnVerifiableChunkReceivedAtVerifierEngine increments a counter that keeps track of number of verifiable chunks received at
	// verifier engine from fetcher engine.
	OnVerifiableChunkReceivedAtVerifierEngine()

	// OnFinalizedBlockArrivedAtAssigner sets a gauge that keeps track of number of the latest block height arrives
	// at assigner engine. Note that it assumes blocks are coming to assigner engine in strictly increasing order of their height.
	OnFinalizedBlockArrivedAtAssigner(height uint64)

	// OnChunksAssignmentDoneAtAssigner increments a counter that keeps track of the total number of assigned chunks to
	// the verification node.
	OnChunksAssignmentDoneAtAssigner(chunks int)

	// OnAssignedChunkProcessedAtAssigner increments a counter that keeps track of the total number of assigned chunks pushed by
	// assigner engine to the fetcher engine.
	OnAssignedChunkProcessedAtAssigner()

	// OnAssignedChunkReceivedAtFetcher increments a counter that keeps track of number of assigned chunks arrive at fetcher engine.
	OnAssignedChunkReceivedAtFetcher()

	// OnChunkDataPackRequestSentByFetcher increments a counter that keeps track of number of chunk data pack requests that fetcher engine
	// sends to requester engine.
	OnChunkDataPackRequestSentByFetcher()

	// OnChunkDataPackRequestReceivedByRequester increments a counter that keeps track of number of chunk data pack requests
	// arrive at the requester engine from the fetcher engine.
	OnChunkDataPackRequestReceivedByRequester()

	// OnChunkDataPackRequestDispatchedInNetwork increments a counter that keeps track of number of chunk data pack requests that the
	// requester engine dispatches in the network (to the execution nodes).
	OnChunkDataPackRequestDispatchedInNetworkByRequester()

	// OnChunkDataPackResponseReceivedFromNetwork increments a counter that keeps track of number of chunk data pack responses that the
	// requester engine receives from execution nodes (through network).
	OnChunkDataPackResponseReceivedFromNetworkByRequester()

	// SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester is invoked when a cycle of requesting chunk data packs is done by requester engine.
	// It updates the maximum number of attempts made by requester engine for requesting the chunk data packs of the next unsealed height.
	// The maximum is taken over the history of all chunk data packs requested during that cycle that belong to the next unsealed height.
	SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester(attempts uint64)

	// OnChunkDataPackSentToFetcher increments a counter that keeps track of number of chunk data packs sent to the fetcher engine from
	// requester engine.
	OnChunkDataPackSentToFetcher()

	// OnChunkDataPackArrivedAtFetcher increments a counter that keeps track of number of chunk data packs arrived at fetcher engine from
	// requester engine.
	OnChunkDataPackArrivedAtFetcher()

	// OnVerifiableChunkSentToVerifier increments a counter that keeps track of number of verifiable chunks fetcher engine sent to verifier engine.
	OnVerifiableChunkSentToVerifier()

	// OnResultApprovalDispatchedInNetwork increments a counter that keeps track of number of result approvals dispatched in the network
	// by verifier engine.
	OnResultApprovalDispatchedInNetworkByVerifier()
}

type WALMetrics added in v0.15.4

type WALMetrics interface {
	// ExecutionCheckpointSize reports the size of a checkpoint in bytes
	ExecutionCheckpointSize(bytes uint64)
}

Directories

Path Synopsis
builder
Package dkg implements a controller that manages the lifecycle of a Joint Feldman DKG node, as well as a broker that enables the controller to communicate with other nodes
Package dkg implements a controller that manages the lifecycle of a Joint Feldman DKG node, as well as a broker that enables the controller to communicate with other nodes
executiondatasync
finalizer

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL