storage

package
v0.36.7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 26, 2024 License: AGPL-3.0 Imports: 7 Imported by: 52

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	// ErrNotFound is returned when a retrieved key does not exist in the database.
	// Note: there is another not found error: badger.ErrKeyNotFound. The difference between
	// badger.ErrKeyNotFound and storage.ErrNotFound is that:
	// badger.ErrKeyNotFound is the error returned by the badger API.
	// Modules in storage/badger and storage/badger/operation package both
	// return storage.ErrNotFound for not found error
	ErrNotFound = errors.New("key not found")

	// ErrAlreadyExists is returned when an insert attempts to set the value
	// for a key that already exists. Inserts may only occur once per key,
	// updates may overwrite an existing key without returning an error.
	ErrAlreadyExists = errors.New("key already exists")

	// ErrDataMismatch is returned when a repeatable insert operation attempts
	// to insert a different value for the same key.
	ErrDataMismatch = errors.New("data for key is different")

	// ErrHeightNotIndexed is returned when data that is indexed sequentially is queried by a given block height
	// and that data is unavailable.
	ErrHeightNotIndexed = errors.New("data for block height not available")

	// ErrNotBootstrapped is returned when the database has not been bootstrapped.
	ErrNotBootstrapped = errors.New("pebble database not bootstrapped")
)

Functions

This section is empty.

Types

type All added in v0.12.0

type All struct {
	Headers                   Headers
	Guarantees                Guarantees
	Seals                     Seals
	Index                     Index
	Payloads                  Payloads
	Blocks                    Blocks
	QuorumCertificates        QuorumCertificates
	Setups                    EpochSetups
	EpochCommits              EpochCommits
	Results                   ExecutionResults
	Receipts                  ExecutionReceipts
	ChunkDataPacks            ChunkDataPacks
	Commits                   Commits
	Transactions              Transactions
	LightTransactionResults   LightTransactionResults
	TransactionResults        TransactionResults
	Collections               Collections
	Events                    Events
	EpochProtocolStateEntries EpochProtocolStateEntries
	ProtocolKVStore           ProtocolKVStore
	VersionBeacons            VersionBeacons
	RegisterIndex             RegisterIndex
}

All includes all the storage modules

type BatchStorage added in v0.15.0

type BatchStorage interface {
	GetWriter() *badger.WriteBatch

	// OnSucceed adds a callback to execute after the batch has
	// been successfully flushed.
	// useful for implementing the cache where we will only cache
	// after the batch has been successfully flushed
	OnSucceed(callback func())

	// Flush will flush the write batch and update the cache.
	Flush() error
}

BatchStorage serves as an abstraction over batch storage, adding ability to add ability to add extra callbacks which fire after the batch is successfully flushed.

type Blocks

type Blocks interface {

	// Store will atomically store a block with all its dependencies.
	Store(block *flow.Block) error

	// StoreTx allows us to store a new block, including its payload & header, as part of a DB transaction, while
	// still going through the caching layer.
	StoreTx(block *flow.Block) func(*transaction.Tx) error

	// ByID returns the block with the given hash. It is available for
	// finalized and ambiguous blocks.
	ByID(blockID flow.Identifier) (*flow.Block, error)

	// ByHeight returns the block at the given height. It is only available
	// for finalized blocks.
	ByHeight(height uint64) (*flow.Block, error)

	// ByCollectionID returns the block for the given collection ID.
	ByCollectionID(collID flow.Identifier) (*flow.Block, error)

	// IndexBlockForCollections indexes the block each collection was
	// included in.
	IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error
}

Blocks represents persistent storage for blocks.

type ChunkDataPacks

type ChunkDataPacks interface {

	// Store stores multiple ChunkDataPacks cs keyed by their ChunkIDs in a batch.
	// No errors are expected during normal operation, but it may return generic error
	Store(cs []*flow.ChunkDataPack) error

	// Remove removes multiple ChunkDataPacks cs keyed by their ChunkIDs in a batch.
	// No errors are expected during normal operation, but it may return generic error
	Remove(cs []flow.Identifier) error

	// ByChunkID returns the chunk data for the given a chunk ID.
	ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error)

	// BatchRemove removes ChunkDataPack c keyed by its ChunkID in provided batch
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchRemove(chunkID flow.Identifier, batch BatchStorage) error
}

ChunkDataPacks represents persistent storage for chunk data packs.

type ChunksQueue added in v0.15.0

type ChunksQueue interface {
	StoreChunkLocator(locator *chunks.Locator) (bool, error)

	LatestIndex() (uint64, error)

	AtIndex(index uint64) (*chunks.Locator, error)
}

type ClusterBlocks

type ClusterBlocks interface {

	// Store stores the cluster block.
	Store(block *cluster.Block) error

	// ByID returns the block with the given ID.
	ByID(blockID flow.Identifier) (*cluster.Block, error)

	// ByHeight returns the block with the given height. Only available for
	// finalized blocks.
	ByHeight(height uint64) (*cluster.Block, error)
}

type ClusterPayloads

type ClusterPayloads interface {

	// Store stores and indexes the given cluster payload.
	Store(blockID flow.Identifier, payload *cluster.Payload) error

	// ByBlockID returns the cluster payload for the given block ID.
	ByBlockID(blockID flow.Identifier) (*cluster.Payload, error)
}

ClusterPayloads handles storing and retrieving payloads for collection node cluster consensus.

type Collections

type Collections interface {

	// StoreLight inserts the collection. It does not insert, nor check
	// existence of, the constituent transactions.
	StoreLight(collection *flow.LightCollection) error

	// Store inserts the collection keyed by ID and all constituent
	// transactions.
	Store(collection *flow.Collection) error

	// Remove removes the collection and all constituent transactions.
	Remove(collID flow.Identifier) error

	// LightByID returns collection with the given ID. Only retrieves
	// transaction hashes.
	LightByID(collID flow.Identifier) (*flow.LightCollection, error)

	// ByID returns the collection with the given ID, including all
	// transactions within the collection.
	ByID(collID flow.Identifier) (*flow.Collection, error)

	// StoreLightAndIndexByTransaction inserts the light collection (only
	// transaction IDs) and adds a transaction id index for each of the
	// transactions within the collection (transaction_id->collection_id).
	//
	// NOTE: Currently it is possible in rare circumstances for two collections
	// to be guaranteed which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/3556).
	// The second of these will revert upon reaching the execution node, so
	// this doesn't impact the execution state, but it can result in the Access
	// node processing two collections which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/5337).
	// To handle this, we skip indexing the affected transaction when inserting
	// the transaction_id->collection_id index when an index for the transaction
	// already exists.
	StoreLightAndIndexByTransaction(collection *flow.LightCollection) error

	// LightByTransactionID returns the collection for the given transaction ID. Only retrieves
	// transaction hashes.
	LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error)
}

Collections represents persistent storage for collections.

type Commits

type Commits interface {

	// Store will store a commit in the persistent storage.
	Store(blockID flow.Identifier, commit flow.StateCommitment) error

	// BatchStore stores Commit keyed by blockID in provided batch
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch BatchStorage) error

	// ByBlockID will retrieve a commit by its ID from persistent storage.
	ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error)

	// BatchRemoveByBlockID removes Commit keyed by blockID in provided batch
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchRemoveByBlockID(blockID flow.Identifier, batch BatchStorage) error
}

Commits represents persistent storage for state commitments.

type ComputationResultUploadStatus added in v0.28.0

type ComputationResultUploadStatus interface {
	// Upsert upserts omputationResult into persistent storage with given BlockID.
	Upsert(blockID flow.Identifier, wasUploadCompleted bool) error

	// GetIDsByUploadStatus returns BlockIDs whose upload status matches with targetUploadStatus
	GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error)

	// ByID returns the upload status of ComputationResult with given BlockID.
	ByID(blockID flow.Identifier) (bool, error)

	// Remove removes an instance of ComputationResult with given BlockID.
	Remove(blockID flow.Identifier) error
}

ComputationResultUploadStatus interface defines storage operations for upload status of given ComputationResult instance: - false as upload not completed - true as upload completed

type ConsumerProgress added in v0.15.0

type ConsumerProgress interface {
	// read the current processed index
	ProcessedIndex() (uint64, error)
	// insert the default processed index to the storage layer, can only be done once.
	// initialize for the second time will return storage.ErrAlreadyExists
	InitProcessedIndex(defaultIndex uint64) error
	// update the processed index in the storage layer.
	// it will fail if InitProcessedIndex was never called.
	SetProcessedIndex(processed uint64) error
}

ConsumerProgress reads and writes the last processed index of the job in the job queue

type DKGState added in v0.23.9

type DKGState interface {

	// SetDKGStarted sets the flag indicating the DKG has started for the given epoch.
	// Error returns: storage.ErrAlreadyExists
	SetDKGStarted(epochCounter uint64) error

	// GetDKGStarted checks whether the DKG has been started for the given epoch.
	// No errors expected during normal operation.
	GetDKGStarted(epochCounter uint64) (bool, error)

	// SetDKGEndState stores that the DKG has ended, and its end state.
	// Error returns: storage.ErrAlreadyExists
	SetDKGEndState(epochCounter uint64, endState flow.DKGEndState) error

	// GetDKGEndState retrieves the end state for the given DKG.
	// Error returns: storage.ErrNotFound
	GetDKGEndState(epochCounter uint64) (flow.DKGEndState, error)

	// InsertMyBeaconPrivateKey stores the random beacon private key for an epoch.
	//
	// CAUTION: these keys are stored before they are validated against the
	// canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys
	// to guarantee only keys safe for signing are returned
	// Error returns: storage.ErrAlreadyExists
	InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error

	// RetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch.
	//
	// CAUTION: these keys are stored before they are validated against the
	// canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys
	// to guarantee only keys safe for signing are returned
	// Error returns: storage.ErrNotFound
	RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error)
}

DKGState is the storage interface for storing all artifacts and state related to the DKG process, including the latest state of a running or completed DKG, and computed beacon keys.

type EpochCommits

type EpochCommits interface {

	// StoreTx allows us to store a new epoch commit in a DB transaction while updating the cache.
	StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error

	// ByID will return the EpochCommit event by its ID.
	// Error returns:
	// * storage.ErrNotFound if no EpochCommit with the ID exists
	ByID(flow.Identifier) (*flow.EpochCommit, error)
}

type EpochProtocolStateEntries added in v0.33.30

type EpochProtocolStateEntries interface {
	// StoreTx returns an anonymous function (intended to be executed as part of a badger transaction),
	// which persists the given epoch sub-state as part of a DB tx. Per convention, the identities in
	// the Protocol State must be in canonical order for the current and next epoch (if present),
	// otherwise an exception is returned.
	// Expected errors of the returned anonymous function:
	//   - storage.ErrAlreadyExists if an epoch sub-state with the given id is already stored
	StoreTx(epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.EpochProtocolStateEntry) func(*transaction.Tx) error

	// Index returns an anonymous function that is intended to be executed as part of a database transaction.
	// In a nutshell, we want to maintain a map from `blockID` to `epochProtocolStateID`, where `blockID` references the
	// block that _proposes_ the epoch sub-state.
	// Upon call, the anonymous function persists the specific map entry in the node's database.
	// Protocol convention:
	//   - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example,
	//     the protocol state changes if we seal some execution results emitting service events.
	//   - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value,
	//     the hash of the resulting protocol state at the end of processing B is to be used.
	//   - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block,
	//     _after_ validating the QC.
	//
	// Expected errors during normal operations:
	//   - storage.ErrAlreadyExists if a epoch sub-state for the given blockID has already been indexed
	Index(blockID flow.Identifier, epochProtocolStateID flow.Identifier) func(*transaction.Tx) error

	// ByID returns the flow.RichEpochProtocolStateEntry by its ID.
	// Expected errors during normal operations:
	//   - storage.ErrNotFound if no epoch state entry with the given Identifier is known.
	ByID(id flow.Identifier) (*flow.RichEpochProtocolStateEntry, error)

	// ByBlockID retrieves the flow.RichEpochProtocolStateEntry that the block with the given ID proposes.
	// CAUTION: this protocol state requires confirmation by a QC and will only become active at the child block,
	// _after_ validating the QC. Protocol convention:
	//   - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example,
	//     the protocol state changes if we seal some execution results emitting service events.
	//   - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value,
	//     the hash of the resulting protocol state at the end of processing B is to be used.
	//   - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block,
	//     _after_ validating the QC.
	//
	// Expected errors during normal operations:
	//   - storage.ErrNotFound if no epoch state entry has been indexed for the given block.
	ByBlockID(blockID flow.Identifier) (*flow.RichEpochProtocolStateEntry, error)
}

EpochProtocolStateEntries represents persistent, fork-aware storage for the Epoch-related sub-state of the overall of the overall Protocol State (KV Store).

type EpochSetups

type EpochSetups interface {

	// StoreTx allows us to store a new epoch setup in a DB transaction while going through the cache.
	StoreTx(*flow.EpochSetup) func(*transaction.Tx) error

	// ByID will return the EpochSetup event by its ID.
	// Error returns:
	// * storage.ErrNotFound if no EpochSetup with the ID exists
	ByID(flow.Identifier) (*flow.EpochSetup, error)
}

type Events

type Events interface {
	// Store will store events for the given block ID
	Store(blockID flow.Identifier, blockEvents []flow.EventsList) error

	// BatchStore will store events for the given block ID in a given batch
	BatchStore(blockID flow.Identifier, events []flow.EventsList, batch BatchStorage) error

	// ByBlockID returns the events for the given block ID
	ByBlockID(blockID flow.Identifier) ([]flow.Event, error)

	// ByBlockIDTransactionID returns the events for the given block ID and transaction ID
	ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) ([]flow.Event, error)

	// ByBlockIDTransactionIndex returns the events for the transaction at given index in a given block
	ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error)

	// ByBlockIDEventType returns the events for the given block ID and event type
	ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error)

	// BatchRemoveByBlockID removes events keyed by a blockID in provided batch
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchRemoveByBlockID(blockID flow.Identifier, batch BatchStorage) error
}

Events represents persistent storage for events.

type ExecutionReceipts

type ExecutionReceipts interface {

	// Store stores an execution receipt.
	Store(receipt *flow.ExecutionReceipt) error

	// BatchStore stores an execution receipt inside given batch
	BatchStore(receipt *flow.ExecutionReceipt, batch BatchStorage) error

	// ByID retrieves an execution receipt by its ID.
	ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error)

	// ByBlockID retrieves all known execution receipts for the given block
	// (from any Execution Node).
	ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error)
}

ExecutionReceipts holds and indexes Execution Receipts. The storage-layer abstraction is from the viewpoint of the network: there are multiple execution nodes which produce several receipts for each block. By default, there is no distinguished execution node (the are all equal).

type ExecutionResults

type ExecutionResults interface {

	// Store stores an execution result.
	Store(result *flow.ExecutionResult) error

	// BatchStore stores an execution result in a given batch
	BatchStore(result *flow.ExecutionResult, batch BatchStorage) error

	// ByID retrieves an execution result by its ID. Returns `ErrNotFound` if `resultID` is unknown.
	ByID(resultID flow.Identifier) (*flow.ExecutionResult, error)

	// ByIDTx returns a functor which retrieves the execution result by its ID, as part of a future database transaction.
	// When executing the functor, it returns `ErrNotFound` if no execution result with the respective ID is known.
	ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error)

	// Index indexes an execution result by block ID.
	Index(blockID flow.Identifier, resultID flow.Identifier) error

	// ForceIndex indexes an execution result by block ID overwriting existing database entry
	ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error

	// BatchIndex indexes an execution result by block ID in a given batch
	BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch BatchStorage) error

	// ByBlockID retrieves an execution result by block ID.
	ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error)

	// BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch.
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchRemoveIndexByBlockID(blockID flow.Identifier, batch BatchStorage) error
}

type Guarantees

type Guarantees interface {

	// Store inserts the collection guarantee.
	Store(guarantee *flow.CollectionGuarantee) error

	// ByCollectionID retrieves the collection guarantee by collection ID.
	ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error)
}

Guarantees represents persistent storage for collection guarantees.

type Headers

type Headers interface {

	// Store will store a header.
	Store(header *flow.Header) error

	// ByBlockID returns the header with the given ID. It is available for finalized and ambiguous blocks.
	// Error returns:
	//  - ErrNotFound if no block header with the given ID exists
	ByBlockID(blockID flow.Identifier) (*flow.Header, error)

	// ByHeight returns the block with the given number. It is only available for finalized blocks.
	ByHeight(height uint64) (*flow.Header, error)

	// Exists returns true if a header with the given ID has been stored.
	// No errors are expected during normal operation.
	Exists(blockID flow.Identifier) (bool, error)

	// BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized
	// version of `ByHeight` that skips retrieving the block. Expected errors during normal operations:
	//  - storage.ErrNotFound if no finalized block is known at given height
	BlockIDByHeight(height uint64) (flow.Identifier, error)

	// ByParentID finds all children for the given parent block. The returned headers
	// might be unfinalized; if there is more than one, at least one of them has to
	// be unfinalized.
	ByParentID(parentID flow.Identifier) ([]*flow.Header, error)
}

Headers represents persistent storage for blocks.

type HeightIndex added in v0.32.0

type HeightIndex interface {
	// LatestHeight returns the latest indexed height.
	LatestHeight() (uint64, error)
	// FirstHeight at which we started to index. Returns the first indexed height found in the store.
	FirstHeight() (uint64, error)
	// SetLatestHeight updates the latest height.
	// The provided height should either be one higher than the current height or the same to ensure idempotency.
	// If the height is not within those bounds it will panic!
	// An error might get returned if there are problems with persisting the height.
	SetLatestHeight(height uint64) error
}

HeightIndex defines methods for indexing height. This interface should normally be composed with some other resource we want to index by height.

type Index

type Index interface {

	// Store stores the index for a block payload.
	Store(blockID flow.Identifier, index *flow.Index) error

	// ByBlockID retrieves the index for a block payload.
	// Error returns:
	//  - ErrNotFound if no block header with the given ID exists
	ByBlockID(blockID flow.Identifier) (*flow.Index, error)
}

type Ledger

type Ledger interface {
	EmptyStateCommitment() flow.StateCommitment

	// Trusted methods (without proof)
	// Get registers at specific StateCommitment by a list of register ids
	GetRegisters(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) (values []flow.RegisterValue, err error)
	// Batched atomic updates of a subset of registers at specific state
	UpdateRegisters(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (newStateCommitment flow.StateCommitment, err error)

	// Untrusted methods (providing proofs)
	// Get registers at specific StateCommitment by a list of register ids with proofs
	GetRegistersWithProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) (values []flow.RegisterValue, proofs []flow.StorageProof, err error)
	// Batched atomic updates of a subset of registers at specific state with proofs
	UpdateRegistersWithProof(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (newStateCommitment flow.StateCommitment, proofs []flow.StorageProof, err error)
}

Ledger takes care of storing registers (key, value pairs) providing proof of correctness we aim to store a state of the order of 10^10 registers with up to 1M historic state versions

type LedgerVerifier

type LedgerVerifier interface {
	// verify if a provided proof for getRegisters is accurate
	VerifyRegistersProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment, values []flow.RegisterValue, proof []flow.StorageProof) (verified bool, err error)
}

LedgerVerifier should be designed as an standalone package to verify proofs of storage

type LightTransactionResults added in v0.32.0

type LightTransactionResults interface {

	// BatchStore inserts a batch of transaction result into a batch
	BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch BatchStorage) error

	// ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID
	ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.LightTransactionResult, error)

	// ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index
	ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error)

	// ByBlockID gets all transaction results for a block, ordered by transaction index
	ByBlockID(id flow.Identifier) ([]flow.LightTransactionResult, error)
}

LightTransactionResults represents persistent storage for light transaction result

type MyExecutionReceipts added in v0.15.0

type MyExecutionReceipts interface {
	// StoreMyReceipt stores the receipt and marks it as mine (trusted). My
	// receipts are indexed by the block whose result they compute. Currently,
	// we only support indexing a _single_ receipt per block. Attempting to
	// store conflicting receipts for the same block will error.
	StoreMyReceipt(receipt *flow.ExecutionReceipt) error

	// BatchStoreMyReceipt stores blockID-to-my-receipt index entry keyed by blockID in a provided batch.
	// No errors are expected during normal operation
	// If entity fails marshalling, the error is wrapped in a generic error and returned.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch BatchStorage) error

	// MyReceipt retrieves my receipt for the given block.
	MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error)

	// BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchRemoveIndexByBlockID(blockID flow.Identifier, batch BatchStorage) error
}

MyExecutionReceipts reuses the storage.ExecutionReceipts API, but doesn't expose them. Instead, it includes the "My" in the method name in order to highlight the notion of "MY execution receipt", from the viewpoint of an individual Execution Node.

type Payloads

type Payloads interface {

	// Store will store a payload and index its contents.
	Store(blockID flow.Identifier, payload *flow.Payload) error

	// ByBlockID returns the payload with the given hash. It is available for
	// finalized and ambiguous blocks.
	ByBlockID(blockID flow.Identifier) (*flow.Payload, error)
}

Payloads represents persistent storage for payloads.

type ProtocolKVStore added in v0.33.30

type ProtocolKVStore interface {
	// StoreTx returns an anonymous function (intended to be executed as part of a badger transaction),
	// which persists the given KV-store snapshot as part of a DB tx.
	// Expected errors of the returned anonymous function:
	//   - storage.ErrAlreadyExists if a KV-store snapshot with the given id is already stored.
	StoreTx(stateID flow.Identifier, data *flow.PSKeyValueStoreData) func(*transaction.Tx) error

	// IndexTx returns an anonymous function intended to be executed as part of a database transaction.
	// In a nutshell, we want to maintain a map from `blockID` to `stateID`, where `blockID` references the
	// block that _proposes_ the updated key-value store.
	// Upon call, the anonymous function persists the specific map entry in the node's database.
	// Protocol convention:
	//   - Consider block B, whose ingestion might potentially lead to an updated KV store. For example,
	//     the KV store changes if we seal some execution results emitting specific service events.
	//   - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store.
	//   - CAUTION: The updated state requires confirmation by a QC and will only become active at the
	//     child block, _after_ validating the QC.
	//
	// Expected errors during normal operations:
	//   - storage.ErrAlreadyExists if a KV store for the given blockID has already been indexed.
	IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error

	// ByID retrieves the KV store snapshot with the given ID.
	// Expected errors during normal operations:
	//   - storage.ErrNotFound if no snapshot with the given Identifier is known.
	ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, error)

	// ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes.
	// CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block,
	// _after_ validating the QC. Protocol convention:
	//   - Consider block B, whose ingestion might potentially lead to an updated KV store state.
	//     For example, the state changes if we seal some execution results emitting specific service events.
	//   - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value,
	//     the hash of the resulting state at the end of processing B is to be used.
	//   - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block,
	//     _after_ validating the QC.
	//
	// Expected errors during normal operations:
	//   - storage.ErrNotFound if no snapshot has been indexed for the given block.
	ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error)
}

ProtocolKVStore persists different snapshots of key-value stores [KV-stores]. At this level, the API deals with versioned data blobs, each representing a Snapshot of the Protocol State. The *current* implementation allows to retrieve snapshots from the database (e.g. to answer external API calls) even for legacy protocol states whose versions are not support anymore. However, this _may_ change in the future, where only versioned snapshots can be retrieved that are also supported by the current software. TODO maybe rename to `ProtocolStateSnapshots` (?) because at this low level, we are not exposing the KV-store, it is just an encoded data blob

type QuorumCertificates added in v0.30.0

type QuorumCertificates interface {
	// StoreTx stores a Quorum Certificate as part of database transaction QC is indexed by QC.BlockID.
	// * storage.ErrAlreadyExists if any QC for blockID is already stored
	StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error
	// ByBlockID returns QC that certifies block referred by blockID.
	// * storage.ErrNotFound if no QC for blockID doesn't exist.
	ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error)
}

QuorumCertificates represents storage for Quorum Certificates. Quorum Certificates are distributed using blocks, where a block incorporates a QC for its parent. When stored, QCs are indexed by the ID of the block they certify (not the block they are included within). In the example below, `QC_1` is indexed by `Block_1.ID()` Block_1 <- Block_2(QC_1)

type RegisterIndex added in v0.32.0

type RegisterIndex interface {
	// Get register by the register ID at a given block height.
	//
	// If the register at the given height was not indexed, returns the highest
	// height the register was indexed at.
	// Expected errors:
	// - storage.ErrHeightNotIndexed if the given height was not indexed yet or lower than the first indexed height.
	// - storage.ErrNotFound if the given height is indexed, but the register does not exist.
	Get(ID flow.RegisterID, height uint64) (flow.RegisterValue, error)

	// LatestHeight returns the latest indexed height.
	LatestHeight() uint64

	// FirstHeight at which we started to index. Returns the first indexed height found in the store.
	FirstHeight() uint64

	// Store batch of register entries at the provided block height.
	//
	// The provided height must either be one higher than the current height or the same to ensure idempotency,
	// otherwise and error is returned. If the height is not within those bounds there is either a bug
	// or state corruption.
	//
	// No errors are expected during normal operation.
	Store(entries flow.RegisterEntries, height uint64) error
}

RegisterIndex defines methods for the register index.

type ResultApprovals added in v0.14.0

type ResultApprovals interface {

	// Store stores a ResultApproval
	Store(result *flow.ResultApproval) error

	// Index indexes a ResultApproval by result ID and chunk index
	Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error

	// ByID retrieves a ResultApproval by its ID
	ByID(approvalID flow.Identifier) (*flow.ResultApproval, error)

	// ByChunk retrieves a ResultApproval by result ID and chunk index
	ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error)
}

type SafeBeaconKeys added in v0.23.9

type SafeBeaconKeys interface {

	// RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given
	// epoch, only if my key has been confirmed valid and safe for use.
	//
	// Returns:
	//   - (key, true, nil) if the key is present and confirmed valid
	//   - (nil, false, nil) if the key has been marked invalid or unavailable
	//     -> no beacon key will ever be available for the epoch in this case
	//   - (nil, false, storage.ErrNotFound) if the DKG has not ended
	//   - (nil, false, error) for any unexpected exception
	RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error)
}

SafeBeaconKeys is a safe way to access beacon keys.

type Seals

type Seals interface {

	// Store inserts the seal.
	Store(seal *flow.Seal) error

	// ByID retrieves the seal by the collection
	// fingerprint.
	ByID(sealID flow.Identifier) (*flow.Seal, error)

	// HighestInFork retrieves the highest seal that was included in the
	// fork up to (and including) the given blockID.
	// This method should return
	//   - a seal for any block known to the node.
	//   - storage.ErrNotFound if blockID is unknown.
	HighestInFork(blockID flow.Identifier) (*flow.Seal, error)

	// FinalizedSealForBlock retrieves the finalized seal for the given block ID.
	// Returns storage.ErrNotFound if blockID is unknown or no _finalized_ seal
	// is known for the block.
	FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, error)
}

Seals represents persistent storage for seals.

type ServiceEvents added in v0.14.0

type ServiceEvents interface {
	// BatchStore stores service events keyed by a blockID in provided batch
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchStore(blockID flow.Identifier, events []flow.Event, batch BatchStorage) error

	// ByBlockID returns the events for the given block ID
	ByBlockID(blockID flow.Identifier) ([]flow.Event, error)

	// BatchRemoveByBlockID removes service events keyed by a blockID in provided batch
	// No errors are expected during normal operation, even if no entries are matched.
	// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
	BatchRemoveByBlockID(blockID flow.Identifier, batch BatchStorage) error
}

type StoredChunkDataPack added in v0.33.36

type StoredChunkDataPack struct {
	ChunkID           flow.Identifier
	StartState        flow.StateCommitment
	Proof             flow.StorageProof
	CollectionID      flow.Identifier
	SystemChunk       bool
	ExecutionDataRoot flow.BlockExecutionDataRoot
}

StoredChunkDataPack is an in-storage representation of chunk data pack. Its prime difference is instead of an actual collection, it keeps a collection ID hence relying on maintaining the collection on a secondary storage.

func ToStoredChunkDataPack added in v0.33.36

func ToStoredChunkDataPack(c *flow.ChunkDataPack) *StoredChunkDataPack

type Transaction added in v0.15.1

type Transaction interface {
	Set(key, val []byte) error
}

type TransactionResults

type TransactionResults interface {

	// BatchStore inserts a batch of transaction result into a batch
	BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch BatchStorage) error

	// ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID
	ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResult, error)

	// ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index
	ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error)

	// ByBlockID gets all transaction results for a block, ordered by transaction index
	ByBlockID(id flow.Identifier) ([]flow.TransactionResult, error)
}

TransactionResults represents persistent storage for transaction result

type Transactions

type Transactions interface {

	// Store inserts the transaction, keyed by fingerprint. Duplicate transaction insertion is ignored
	Store(tx *flow.TransactionBody) error

	// ByID returns the transaction for the given fingerprint.
	ByID(txID flow.Identifier) (*flow.TransactionBody, error)
}

Transactions represents persistent storage for transactions.

type VersionBeacons added in v0.31.0

type VersionBeacons interface {

	// Highest finds the highest flow.SealedVersionBeacon but no higher than
	// belowOrEqualTo
	// Returns nil if no version beacon has been sealed below or equal to the
	// input height.
	Highest(belowOrEqualTo uint64) (*flow.SealedVersionBeacon, error)
}

VersionBeacons represents persistent storage for Version Beacons.

Directories

Path Synopsis
Package mocks is a generated GoMock package.
Package mocks is a generated GoMock package.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL