Documentation ¶
Index ¶
- Variables
- func IsResultAlreadyExistsErr(err error) bool
- func NewResultAlreadyExistsErrorf(msg string, blockID, resultID flow.Identifier, args ...interface{}) error
- type All
- type BatchStorage
- type Blocks
- type ChunkDataPacks
- type ChunksQueue
- type Cleaner
- type ClusterBlocks
- type ClusterPayloads
- type Collections
- type Commits
- type ConsumerProgress
- type DKGState
- type EpochCommits
- type EpochSetups
- type EpochStatuses
- type Events
- type ExecutionReceipts
- type ExecutionResults
- type Guarantees
- type Headers
- type Index
- type Ledger
- type LedgerVerifier
- type MyExecutionReceipts
- type Payloads
- type ResultAlreadyExistsErr
- type ResultApprovals
- type SafeBeaconKeys
- type Seals
- type ServiceEvents
- type Transaction
- type TransactionResults
- type Transactions
Constants ¶
This section is empty.
Variables ¶
var ( // Note: there is another not found error: badger.ErrKeyNotFound. The difference between // badger.ErrKeyNotFound and storage.ErrNotFound is that: // badger.ErrKeyNotFound is the error returned by the badger API. // Modules in storage/badger and storage/badger/operation package both // return storage.ErrNotFound for not found error ErrNotFound = errors.New("key not found") ErrAlreadyExists = errors.New("key already exists") ErrDataMismatch = errors.New("data for key is different") )
Functions ¶
func IsResultAlreadyExistsErr ¶ added in v0.24.5
func NewResultAlreadyExistsErrorf ¶ added in v0.24.5
func NewResultAlreadyExistsErrorf(msg string, blockID, resultID flow.Identifier, args ...interface{}) error
Types ¶
type All ¶ added in v0.12.0
type All struct { Headers Headers Guarantees Guarantees Seals Seals Index Index Payloads Payloads Blocks Blocks Setups EpochSetups EpochCommits EpochCommits Statuses EpochStatuses Results ExecutionResults Receipts ExecutionReceipts ChunkDataPacks ChunkDataPacks Commits Commits Transactions Transactions TransactionResults TransactionResults Collections Collections Events Events }
All includes all the storage modules
type BatchStorage ¶ added in v0.15.0
type BatchStorage interface { GetWriter() *badger.WriteBatch // OnSucceed adds a callback to execute after the batch has // been successfully flushed. // useful for implementing the cache where we will only cache // after the batch has been successfully flushed OnSucceed(callback func()) // Flush will flush the write batch and update the cache. Flush() error }
BatchStorage
type Blocks ¶
type Blocks interface { // Store will atomically store a block with all its dependencies. Store(block *flow.Block) error // StoreTx allows us to store a new block, including its payload & header, as part of a DB transaction, while // still going through the caching layer. StoreTx(block *flow.Block) func(*transaction.Tx) error // ByID returns the block with the given hash. It is available for // finalized and ambiguous blocks. ByID(blockID flow.Identifier) (*flow.Block, error) // ByHeight returns the block at the given height. It is only available // for finalized blocks. ByHeight(height uint64) (*flow.Block, error) // ByCollectionID returns the block for the given collection ID. ByCollectionID(collID flow.Identifier) (*flow.Block, error) // IndexBlockForCollections indexes the block each collection was // included in. IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error // UpdateLastFullBlockHeight updates the FullBlockHeight index // The FullBlockHeight index indicates that block for which all collections have been received UpdateLastFullBlockHeight(height uint64) error // GetLastFullBlockHeight retrieves the FullBlockHeight GetLastFullBlockHeight() (height uint64, err error) }
Blocks represents persistent storage for blocks.
type ChunkDataPacks ¶
type ChunkDataPacks interface { // Store inserts the chunk header, keyed by chunk ID. Store(c *flow.ChunkDataPack) error // BatchStore inserts the chunk header, keyed by chunk ID into a given batch BatchStore(c *flow.ChunkDataPack, batch BatchStorage) error // Remove removes the chunk data for the given chunk ID, if it exists. Remove(chunkID flow.Identifier) error // ByChunkID returns the chunk data for the given a chunk ID. ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) }
ChunkDataPacks represents persistent storage for chunk data packs.
type ChunksQueue ¶ added in v0.15.0
type ClusterBlocks ¶
type ClusterBlocks interface { // Store stores the cluster block. Store(block *cluster.Block) error // ByID returns the block with the given ID. ByID(blockID flow.Identifier) (*cluster.Block, error) // ByHeight returns the block with the given height. Only available for // finalized blocks. ByHeight(height uint64) (*cluster.Block, error) }
type ClusterPayloads ¶
type ClusterPayloads interface { // Store stores and indexes the given cluster payload. Store(blockID flow.Identifier, payload *cluster.Payload) error // ByBlockID returns the cluster payload for the given block ID. ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) }
ClusterPayloads handles storing and retrieving payloads for collection node cluster consensus.
type Collections ¶
type Collections interface { // StoreLight inserts the collection. It does not insert, nor check // existence of, the constituent transactions. StoreLight(collection *flow.LightCollection) error // Store inserts the collection keyed by ID and all constituent // transactions. Store(collection *flow.Collection) error // Remove removes the collection and all constituent transactions. Remove(collID flow.Identifier) error // LightByID returns collection with the given ID. Only retrieves // transaction hashes. LightByID(collID flow.Identifier) (*flow.LightCollection, error) // ByID returns the collection with the given ID, including all // transactions within the collection. ByID(collID flow.Identifier) (*flow.Collection, error) // StoreLightAndIndexByTransaction inserts the light collection (only // transaction IDs) and adds a transaction id index for each of the // transactions within the collection (transaction_id->collection_id). // // NOTE: Currently it is possible in rare circumstances for two collections // to be guaranteed which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/3556). // The second of these will revert upon reaching the execution node, so // this doesn't impact the execution state, but it can result in the Access // node processing two collections which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/5337). // To handle this, we skip indexing the affected transaction when inserting // the transaction_id->collection_id index when an index for the transaction // already exists. StoreLightAndIndexByTransaction(collection *flow.LightCollection) error // LightByTransactionID returns the collection for the given transaction ID. Only retrieves // transaction hashes. LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) }
Collections represents persistent storage for collections.
type Commits ¶
type Commits interface { // Store will store a commit in the persistent storage. Store(blockID flow.Identifier, commit flow.StateCommitment) error // BatchStore will store a commit in a given batch BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch BatchStorage) error // ByBlockID will retrieve a commit by its ID from persistent storage. ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) RemoveByBlockID(blockID flow.Identifier) error }
Commits represents persistent storage for state commitments.
type ConsumerProgress ¶ added in v0.15.0
type ConsumerProgress interface { // read the current processed index ProcessedIndex() (uint64, error) // insert the default processed index to the storage layer, can only be done once. // initialize for the second time will return storage.ErrAlreadyExists InitProcessedIndex(defaultIndex uint64) error // update the processed index in the storage layer. // it will fail if InitProcessedIndex was never called. SetProcessedIndex(processed uint64) error }
ConsumerProgress reads and writes the last processed index of the job in the job queue
type DKGState ¶ added in v0.23.9
type DKGState interface { // SetDKGStarted sets the flag indicating the DKG has started for the given epoch. SetDKGStarted(epochCounter uint64) error // GetDKGStarted checks whether the DKG has been started for the given epoch. GetDKGStarted(epochCounter uint64) (bool, error) // SetDKGEndState stores that the DKG has ended, and its end state. SetDKGEndState(epochCounter uint64, endState flow.DKGEndState) error // GetDKGEndState retrieves the end state for the given DKG. GetDKGEndState(epochCounter uint64) (flow.DKGEndState, error) // InsertMyBeaconPrivateKey stores the random beacon private key for an epoch. // // CAUTION: these keys are stored before they are validated against the // canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys // to guarantee only keys safe for signing are returned InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error // RetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch. // // CAUTION: these keys are stored before they are validated against the // canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys // to guarantee only keys safe for signing are returned RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) }
DKGState is the storage interface for storing all artifacts and state related to the DKG process, including the latest state of a running or completed DKG, and computed beacon keys.
type EpochCommits ¶
type EpochCommits interface { // StoreTx allows us to store a new epoch commit in a DB transaction while updating the cache. StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error // ByCommitID will return the EpochCommit event by its ID. ByID(flow.Identifier) (*flow.EpochCommit, error) }
type EpochSetups ¶
type EpochSetups interface { // StoreTx allows us to store a new epoch setup in a DB transaction while going through the cache. StoreTx(*flow.EpochSetup) func(*transaction.Tx) error // ByID will return the EpochSetup event by its ID. ByID(flow.Identifier) (*flow.EpochSetup, error) }
type EpochStatuses ¶
type EpochStatuses interface { // StoreTx stores a new epoch state in a DB transaction while going through the cache. StoreTx(blockID flow.Identifier, state *flow.EpochStatus) func(*transaction.Tx) error // ByBlockID will return the epoch status for the given block ByBlockID(flow.Identifier) (*flow.EpochStatus, error) }
type Events ¶
type Events interface { // BatchStore will store events for the given block ID in a given batch BatchStore(blockID flow.Identifier, events []flow.EventsList, batch BatchStorage) error // ByBlockID returns the events for the given block ID ByBlockID(blockID flow.Identifier) ([]flow.Event, error) // ByBlockIDTransactionID returns the events for the given block ID and transaction ID ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) ([]flow.Event, error) // ByBlockIDTransactionIndex returns the events for the transaction at given index in a given block ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) // ByBlockIDEventType returns the events for the given block ID and event type ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) }
Events represents persistent storage for events.
type ExecutionReceipts ¶
type ExecutionReceipts interface { // Store stores an execution receipt. Store(receipt *flow.ExecutionReceipt) error // BatchStore stores an execution receipt inside given batch BatchStore(receipt *flow.ExecutionReceipt, batch BatchStorage) error // ByID retrieves an execution receipt by its ID. ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) // ByBlockID retrieves all known execution receipts for the given block // (from any Execution Node). ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) }
ExecutionReceipts holds and indexes Execution Receipts. The storage-layer abstraction is from the viewpoint of the network: there are multiple execution nodes which produce several receipts for each block. By default, there is no distinguished execution node (the are all equal).
type ExecutionResults ¶
type ExecutionResults interface { // Store stores an execution result. Store(result *flow.ExecutionResult) error // BatchStore stores an execution result in a given batch BatchStore(result *flow.ExecutionResult, batch BatchStorage) error // ByID retrieves an execution result by its ID. ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) // ByIDTx retrieves an execution result by its ID in the context of the given transaction ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) // Index indexes an execution result by block ID. Index(blockID flow.Identifier, resultID flow.Identifier) error // ForceIndex indexes an execution result by block ID overwriting existing database entry ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error // BatchIndex indexes an execution result by block ID in a given batch BatchIndex(blockID flow.Identifier, resultID flow.Identifier, ForceIndex bool, batch BatchStorage) error // ByBlockID retrieves an execution result by block ID. ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) }
type Guarantees ¶
type Guarantees interface { // Store inserts the collection guarantee. Store(guarantee *flow.CollectionGuarantee) error // ByCollectionID retrieves the collection guarantee by collection ID. ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) }
Guarantees represents persistent storage for collection guarantees.
type Headers ¶
type Headers interface { // Store will store a header. Store(header *flow.Header) error // ByBlockID returns the header with the given ID. It is available for // finalized and ambiguous blocks. ByBlockID(blockID flow.Identifier) (*flow.Header, error) // ByHeight returns the block with the given number. It is only available // for finalized blocks. ByHeight(height uint64) (*flow.Header, error) // Find all children for the given parent block. The returned headers might // be unfinalized; if there is more than one, at least one of them has to // be unfinalized. ByParentID(parentID flow.Identifier) ([]*flow.Header, error) // Indexes block ID by chunk ID IndexByChunkID(headerID, chunkID flow.Identifier) error // Indexes block ID by chunk ID in a given batch BatchIndexByChunkID(headerID, chunkID flow.Identifier, batch BatchStorage) error // Finds the ID of the block corresponding to given chunk ID IDByChunkID(chunkID flow.Identifier) (flow.Identifier, error) RollbackExecutedBlock(header *flow.Header) error }
Headers represents persistent storage for blocks.
type Index ¶
type Index interface { // Store stores the index for a block payload. Store(blockID flow.Identifier, index *flow.Index) error // ByBlockID retrieves the index for a block payload. ByBlockID(blockID flow.Identifier) (*flow.Index, error) }
type Ledger ¶
type Ledger interface { module.ReadyDoneAware EmptyStateCommitment() flow.StateCommitment // Trusted methods (without proof) // Get registers at specific StateCommitment by a list of register ids GetRegisters(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) (values []flow.RegisterValue, err error) // Batched atomic updates of a subset of registers at specific state UpdateRegisters(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (newStateCommitment flow.StateCommitment, err error) // Untrusted methods (providing proofs) // Get registers at specific StateCommitment by a list of register ids with proofs GetRegistersWithProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) (values []flow.RegisterValue, proofs []flow.StorageProof, err error) // Batched atomic updates of a subset of registers at specific state with proofs UpdateRegistersWithProof(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (newStateCommitment flow.StateCommitment, proofs []flow.StorageProof, err error) DiskSize() (int64, error) }
Ledger takes care of storing registers (key, value pairs) providing proof of correctness we aim to store a state of the order of 10^10 registers with up to 1M historic state versions
type LedgerVerifier ¶
type LedgerVerifier interface { // verify if a provided proof for getRegisters is accurate VerifyRegistersProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment, values []flow.RegisterValue, proof []flow.StorageProof) (verified bool, err error) }
LedgerVerifier should be designed as an standalone package to verify proofs of storage
type MyExecutionReceipts ¶ added in v0.15.0
type MyExecutionReceipts interface { // StoreMyReceipt stores the receipt and marks it as mine (trusted). My // receipts are indexed by the block whose result they compute. Currently, // we only support indexing a _single_ receipt per block. Attempting to // store conflicting receipts for the same block will error. StoreMyReceipt(receipt *flow.ExecutionReceipt) error // BatchStoreMyReceipt stores the receipt and marks it as mine (trusted) in a given batch BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch BatchStorage) error // MyReceipt retrieves my receipt for the given block. MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) }
MyExecutionReceipts reuses the storage.ExecutionReceipts API, but doesn't expose them. Instead, it includes the "My" in the method name in order to highlight the notion of "MY execution receipt", from the viewpoint of an individual Execution Node.
type Payloads ¶
type Payloads interface { // Store will store a payload and index its contents. Store(blockID flow.Identifier, payload *flow.Payload) error // ByBlockID returns the payload with the given hash. It is available for // finalized and ambiguous blocks. ByBlockID(blockID flow.Identifier) (*flow.Payload, error) }
Payloads represents persistent storage for payloads.
type ResultAlreadyExistsErr ¶ added in v0.24.5
type ResultAlreadyExistsErr struct { BlockID flow.Identifier ResultID flow.Identifier // contains filtered or unexported fields }
func (ResultAlreadyExistsErr) Error ¶ added in v0.24.5
func (e ResultAlreadyExistsErr) Error() string
func (ResultAlreadyExistsErr) Unwrap ¶ added in v0.24.5
func (e ResultAlreadyExistsErr) Unwrap() error
type ResultApprovals ¶ added in v0.14.0
type ResultApprovals interface { // Store stores a ResultApproval Store(result *flow.ResultApproval) error // Index indexes a ResultApproval by result ID and chunk index Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error // ByID retrieves a ResultApproval by its ID ByID(approvalID flow.Identifier) (*flow.ResultApproval, error) // ByChunk retrieves a ResultApproval by result ID and chunk index ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error) }
type SafeBeaconKeys ¶ added in v0.23.9
type SafeBeaconKeys interface { // RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given // epoch, only if my key has been confirmed valid and safe for use. // // Returns: // * (key, true, nil) if the key is present and confirmed valid // * (nil, false, nil) if no key was generated or the key has been marked invalid (by SetDKGEnded) // * (nil, false, error) for any other condition, or exception RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error) }
SafeBeaconKeys is a safe way to access beacon keys.
type Seals ¶
type Seals interface { // Store inserts the seal. Store(seal *flow.Seal) error // ByID retrieves the seal by the collection // fingerprint. ByID(sealID flow.Identifier) (*flow.Seal, error) // ByBlockID retrieves the last seal in the chain of seals for the block. ByBlockID(sealedID flow.Identifier) (*flow.Seal, error) }
Seals represents persistent storage for seals.
type ServiceEvents ¶ added in v0.14.0
type ServiceEvents interface { // BatchStore will store service events for the given block ID in a given batch BatchStore(blockID flow.Identifier, events []flow.Event, batch BatchStorage) error // ByBlockID returns the events for the given block ID ByBlockID(blockID flow.Identifier) ([]flow.Event, error) }
type Transaction ¶ added in v0.15.1
type TransactionResults ¶
type TransactionResults interface { // BatchStore inserts a batch of transaction result into a batch BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch BatchStorage) error // ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResult, error) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error) }
TransactionResults represents persistent storage for transaction result
type Transactions ¶
type Transactions interface { // Store inserts the transaction, keyed by fingerprint. Duplicate transaction insertion is ignored Store(tx *flow.TransactionBody) error // ByID returns the transaction for the given fingerprint. ByID(txID flow.Identifier) (*flow.TransactionBody, error) }
Transactions represents persistent storage for transactions.
Source Files ¶
- all.go
- approvals.go
- batch.go
- blocks.go
- chunkDataPacks.go
- chunks_queue.go
- cleaner.go
- cluster_blocks.go
- cluster_payloads.go
- collections.go
- commits.go
- consumer_progress.go
- dkg.go
- epoch_commits.go
- epoch_setups.go
- epoch_statuses.go
- errors.go
- events.go
- guarantees.go
- headers.go
- index.go
- ledger.go
- payloads.go
- receipts.go
- results.go
- seals.go
- transaction_results.go
- transactions.go