Documentation
¶
Index ¶
- Constants
- Variables
- func InitAll(metrics module.CacheMetrics, db *badger.DB) *storage.All
- func InitPublic(opts badger.Options) (*badger.DB, error)
- func InitSecret(opts badger.Options) (*badger.DB, error)
- type Batch
- type BatchBuilder
- type Blocks
- func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error)
- func (b *Blocks) ByHeight(height uint64) (*flow.Block, error)
- func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error)
- func (b *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error
- func (b *Blocks) Store(block *flow.Block) error
- func (b *Blocks) StoreTx(block *flow.Block) func(*transaction.Tx) error
- type Cache
- type ChunksQueue
- type Cleaner
- type ClusterBlocks
- type ClusterPayloads
- type Collections
- func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error)
- func (c *Collections) LightByID(colID flow.Identifier) (*flow.LightCollection, error)
- func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error)
- func (c *Collections) Remove(colID flow.Identifier) error
- func (c *Collections) Store(collection *flow.Collection) error
- func (c *Collections) StoreLight(collection *flow.LightCollection) error
- func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error
- type ComputationResultUploadStatus
- func (c *ComputationResultUploadStatus) ByID(computationResultID flow.Identifier) (bool, error)
- func (c *ComputationResultUploadStatus) GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error)
- func (c *ComputationResultUploadStatus) Remove(computationResultID flow.Identifier) error
- func (c *ComputationResultUploadStatus) Upsert(blockID flow.Identifier, wasUploadCompleted bool) error
- type EpochCommits
- type EpochProtocolStateEntries
- func (s *EpochProtocolStateEntries) ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error)
- func (s *EpochProtocolStateEntries) ByID(epochProtocolStateEntryID flow.Identifier) (*flow.RichEpochStateEntry, error)
- func (s *EpochProtocolStateEntries) Index(blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) func(*transaction.Tx) error
- func (s *EpochProtocolStateEntries) StoreTx(epochProtocolStateEntryID flow.Identifier, ...) func(*transaction.Tx) error
- type EpochSetups
- type ExecutionReceipts
- func (r *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch storage.ReaderBatchWriter) error
- func (r *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error)
- func (r *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error)
- func (r *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error
- type ExecutionResults
- func (r *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, ...) error
- func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error
- func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.ReaderBatchWriter) error
- func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error)
- func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error)
- func (r *ExecutionResults) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error)
- func (r *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error
- func (r *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error
- func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error
- func (r *ExecutionResults) Store(result *flow.ExecutionResult) error
- type Guarantees
- type Headers
- func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error)
- func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error)
- func (h *Headers) ByHeight(height uint64) (*flow.Header, error)
- func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error)
- func (h *Headers) Exists(blockID flow.Identifier) (bool, error)
- func (h *Headers) FindHeaders(filter func(header *flow.Header) bool) ([]flow.Header, error)
- func (h *Headers) RollbackExecutedBlock(header *flow.Header) error
- func (h *Headers) Store(header *flow.Header) error
- type Index
- type Payloads
- type ProtocolKVStore
- func (s *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error)
- func (s *ProtocolKVStore) ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, error)
- func (s *ProtocolKVStore) IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error
- func (s *ProtocolKVStore) StoreTx(stateID flow.Identifier, data *flow.PSKeyValueStoreData) func(*transaction.Tx) error
- type QuorumCertificates
- type RecoverablePrivateBeaconKeyStateMachine
- func (ds *RecoverablePrivateBeaconKeyStateMachine) CommitMyBeaconPrivateKey(epochCounter uint64, commit *flow.EpochCommit) error
- func (ds *RecoverablePrivateBeaconKeyStateMachine) GetDKGState(epochCounter uint64) (flow.DKGState, error)
- func (ds *RecoverablePrivateBeaconKeyStateMachine) InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error
- func (ds *RecoverablePrivateBeaconKeyStateMachine) IsDKGStarted(epochCounter uint64) (bool, error)
- func (ds *RecoverablePrivateBeaconKeyStateMachine) RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error)
- func (ds *RecoverablePrivateBeaconKeyStateMachine) SetDKGState(epochCounter uint64, newState flow.DKGState) error
- func (ds *RecoverablePrivateBeaconKeyStateMachine) UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error)
- func (ds *RecoverablePrivateBeaconKeyStateMachine) UpsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey, commit *flow.EpochCommit) error
- type ResultApprovals
- func (r *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error)
- func (r *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApproval, error)
- func (r *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error
- func (r *ResultApprovals) Store(approval *flow.ResultApproval) error
- type Seals
- type Transactions
- type VersionBeacons
Constants ¶
const DefaultCacheSize = uint(1000)
const JobQueueChunksQueue = "JobQueueChunksQueue"
Variables ¶
var DefaultEpochProtocolStateCacheSize uint = 20
DefaultEpochProtocolStateCacheSize is the default size for primary epoch protocol state entry cache. Minimally, we have 3 entries per epoch (one on epoch Switchover, one on receiving the Epoch Setup and one when seeing the Epoch Commit event). Let's be generous and assume we have 20 different epoch state entries per epoch.
var DefaultProtocolKVStoreByBlockIDCacheSize uint = 1000
DefaultProtocolKVStoreByBlockIDCacheSize is the default value for secondary index `byBlockIdCache`. We want to be able to cover a broad interval of views without cache misses, so we use a bigger value. Generally, many blocks will reference the same KV store snapshot.
var DefaultProtocolKVStoreCacheSize uint = 10
DefaultProtocolKVStoreCacheSize is the default size for primary protocol KV store cache. KV store is rarely updated, so we will have a limited number of unique snapshots. Let's be generous and assume we have 10 different KV stores used at the same time.
var DefaultProtocolStateIndexCacheSize uint = 1000
DefaultProtocolStateIndexCacheSize is the default value for secondary byBlockIdCache. We want to be able to cover a broad interval of views without cache misses, so we use a bigger value.
Functions ¶
func InitAll ¶ added in v0.12.0
func InitAll(metrics module.CacheMetrics, db *badger.DB) *storage.All
func InitPublic ¶ added in v0.22.4
func InitPublic(opts badger.Options) (*badger.DB, error)
InitPublic initializes a public database by checking and setting the database type marker. If an existing, inconsistent type marker is set, this method will return an error. Once a database type marker has been set using these methods, the type cannot be changed.
func InitSecret ¶ added in v0.22.4
func InitSecret(opts badger.Options) (*badger.DB, error)
InitSecret initializes a secrets database by checking and setting the database type marker. If an existing, inconsistent type marker is set, this method will return an error. Once a database type marker has been set using these methods, the type cannot be changed.
Types ¶
type Batch ¶ added in v0.15.1
type Batch struct {
// contains filtered or unexported fields
}
func NewBatch ¶ added in v0.15.1
func NewBatch(db BatchBuilder) *Batch
type BatchBuilder ¶ added in v0.32.0
type BatchBuilder interface {
NewWriteBatch() *badger.WriteBatch
}
type Blocks ¶
type Blocks struct {
// contains filtered or unexported fields
}
Blocks implements a simple block storage around a badger DB.
func (*Blocks) ByCollectionID ¶
ByCollectionID ...
func (*Blocks) IndexBlockForCollections ¶
func (b *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error
IndexBlockForCollections ...
type Cache ¶
type Cache[K comparable, V any] struct { // contains filtered or unexported fields }
func (*Cache[K, V]) Get ¶
Get will try to retrieve the resource from cache first, and then from the injected. During normal operations, the following error returns are expected:
- `storage.ErrNotFound` if key is unknown.
func (*Cache[K, V]) Insert ¶ added in v0.16.1
func (c *Cache[K, V]) Insert(key K, resource V)
Insert will add a resource directly to the cache with the given ID
func (*Cache[K, V]) IsCached ¶ added in v0.31.0
IsCached returns true if the key exists in the cache. It DOES NOT check whether the key exists in the underlying data store.
type ChunksQueue ¶ added in v0.15.0
type ChunksQueue struct {
// contains filtered or unexported fields
}
ChunksQueue stores a queue of chunk locators that assigned to me to verify. Job consumers can read the locators as job from the queue by index. Chunk locators stored in this queue are unique.
func NewChunkQueue ¶ added in v0.15.0
func NewChunkQueue(db *badger.DB) *ChunksQueue
NewChunkQueue will initialize the underlying badger database of chunk locator queue.
func (*ChunksQueue) AtIndex ¶ added in v0.15.0
func (q *ChunksQueue) AtIndex(index uint64) (*chunks.Locator, error)
AtIndex returns the chunk locator stored at the given index in the queue.
func (*ChunksQueue) Init ¶ added in v0.15.0
func (q *ChunksQueue) Init(defaultIndex uint64) (bool, error)
Init initializes chunk queue's latest index with the given default index.
func (*ChunksQueue) LatestIndex ¶ added in v0.15.0
func (q *ChunksQueue) LatestIndex() (uint64, error)
LatestIndex returns the index of the latest chunk locator stored in the queue.
func (*ChunksQueue) StoreChunkLocator ¶ added in v0.15.0
func (q *ChunksQueue) StoreChunkLocator(locator *chunks.Locator) (bool, error)
StoreChunkLocator stores a new chunk locator that assigned to me to the job queue. A true will be returned, if the locator was new. A false will be returned, if the locator was duplicate.
type Cleaner ¶
Cleaner uses component.ComponentManager to implement module.Startable and module.ReadyDoneAware to run an internal goroutine which run badger value log garbage collection at a semi-regular interval. The Cleaner exists for 2 reasons:
- Run GC frequently enough that each GC is relatively inexpensive
- Avoid GC being synchronized across all nodes. Since in the happy path, all nodes have very similar database load patterns, without intervention they are likely to schedule GC at the same time, which can cause temporary consensus halts.
func NewCleaner ¶
func NewCleaner(log zerolog.Logger, db *badger.DB, metrics module.CleanerMetrics, interval time.Duration) *Cleaner
NewCleaner returns a cleaner that runs the badger value log garbage collection once every `interval` duration if an interval of zero is passed in, we will not run the GC at all.
type ClusterBlocks ¶
type ClusterBlocks struct {
// contains filtered or unexported fields
}
ClusterBlocks implements a simple block storage around a badger DB.
func NewClusterBlocks ¶
func NewClusterBlocks(db *badger.DB, chainID flow.ChainID, headers *Headers, payloads *ClusterPayloads) *ClusterBlocks
func (*ClusterBlocks) ByHeight ¶
func (b *ClusterBlocks) ByHeight(height uint64) (*cluster.Block, error)
func (*ClusterBlocks) ByID ¶
func (b *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error)
type ClusterPayloads ¶
type ClusterPayloads struct {
// contains filtered or unexported fields
}
ClusterPayloads implements storage of block payloads for collection node cluster consensus.
func NewClusterPayloads ¶
func NewClusterPayloads(cacheMetrics module.CacheMetrics, db *badger.DB) *ClusterPayloads
func (*ClusterPayloads) ByBlockID ¶
func (cp *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, error)
func (*ClusterPayloads) Store ¶
func (cp *ClusterPayloads) Store(blockID flow.Identifier, payload *cluster.Payload) error
type Collections ¶
type Collections struct {
// contains filtered or unexported fields
}
func NewCollections ¶
func NewCollections(db *badger.DB, transactions *Transactions) *Collections
func (*Collections) ByID ¶
func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error)
func (*Collections) LightByID ¶
func (c *Collections) LightByID(colID flow.Identifier) (*flow.LightCollection, error)
func (*Collections) LightByTransactionID ¶
func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error)
func (*Collections) Remove ¶
func (c *Collections) Remove(colID flow.Identifier) error
func (*Collections) Store ¶
func (c *Collections) Store(collection *flow.Collection) error
func (*Collections) StoreLight ¶
func (c *Collections) StoreLight(collection *flow.LightCollection) error
func (*Collections) StoreLightAndIndexByTransaction ¶
func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error
type ComputationResultUploadStatus ¶ added in v0.28.0
type ComputationResultUploadStatus struct {
// contains filtered or unexported fields
}
func NewComputationResultUploadStatus ¶ added in v0.28.0
func NewComputationResultUploadStatus(db *badger.DB) *ComputationResultUploadStatus
func (*ComputationResultUploadStatus) ByID ¶ added in v0.28.0
func (c *ComputationResultUploadStatus) ByID(computationResultID flow.Identifier) (bool, error)
func (*ComputationResultUploadStatus) GetIDsByUploadStatus ¶ added in v0.28.0
func (c *ComputationResultUploadStatus) GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error)
func (*ComputationResultUploadStatus) Remove ¶ added in v0.28.0
func (c *ComputationResultUploadStatus) Remove(computationResultID flow.Identifier) error
func (*ComputationResultUploadStatus) Upsert ¶ added in v0.28.0
func (c *ComputationResultUploadStatus) Upsert(blockID flow.Identifier, wasUploadCompleted bool) error
type EpochCommits ¶
type EpochCommits struct {
// contains filtered or unexported fields
}
func NewEpochCommits ¶
func NewEpochCommits(collector module.CacheMetrics, db *badger.DB) *EpochCommits
func (*EpochCommits) ByID ¶
func (ec *EpochCommits) ByID(commitID flow.Identifier) (*flow.EpochCommit, error)
ByID will return the EpochCommit event by its ID. Error returns: * storage.ErrNotFound if no EpochCommit with the ID exists
func (*EpochCommits) StoreTx ¶
func (ec *EpochCommits) StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error
type EpochProtocolStateEntries ¶ added in v0.33.30
type EpochProtocolStateEntries struct {
// contains filtered or unexported fields
}
EpochProtocolStateEntries implements a persistent, fork-aware storage for the Epoch-related sub-states of the overall of the overall Protocol State (KV Store). It uses an embedded cache which is populated on first retrieval to speed up access to frequently used epoch sub-state.
func NewEpochProtocolStateEntries ¶ added in v0.33.30
func NewEpochProtocolStateEntries(collector module.CacheMetrics, epochSetups storage.EpochSetups, epochCommits storage.EpochCommits, db *badger.DB, stateCacheSize uint, stateByBlockIDCacheSize uint, ) *EpochProtocolStateEntries
NewEpochProtocolStateEntries creates a EpochProtocolStateEntries instance, which stores a subset of the state stored by the Dynamic Protocol State. It supports storing, caching and retrieving by ID or the additionally indexed block ID.
func (*EpochProtocolStateEntries) ByBlockID ¶ added in v0.33.30
func (s *EpochProtocolStateEntries) ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error)
ByBlockID retrieves the epoch protocol state entry that the block with the given ID proposes. CAUTION: this protocol state requires confirmation by a QC and will only become active at the child block, _after_ validating the QC. Protocol convention:
- Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, the protocol state changes if we seal some execution results emitting service events.
- For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, the hash of the resulting protocol state at the end of processing B is to be used.
- CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, _after_ validating the QC.
Expected errors during normal operations:
- storage.ErrNotFound if no state entry has been indexed for the given block.
func (*EpochProtocolStateEntries) ByID ¶ added in v0.33.30
func (s *EpochProtocolStateEntries) ByID(epochProtocolStateEntryID flow.Identifier) (*flow.RichEpochStateEntry, error)
ByID returns the epoch protocol state entry by its ID. Expected errors during normal operations:
- storage.ErrNotFound if no protocol state with the given Identifier is known.
func (*EpochProtocolStateEntries) Index ¶ added in v0.33.30
func (s *EpochProtocolStateEntries) Index(blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) func(*transaction.Tx) error
Index returns an anonymous function that is intended to be executed as part of a database transaction. In a nutshell, we want to maintain a map from `blockID` to `epochStateEntry`, where `blockID` references the block that _proposes_ the referenced epoch protocol state entry. Upon call, the anonymous function persists the specific map entry in the node's database. Protocol convention:
- Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, the protocol state changes if we seal some execution results emitting service events.
- For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, the hash of the resulting protocol state at the end of processing B is to be used.
- CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, _after_ validating the QC.
Expected errors during normal operations:
- storage.ErrAlreadyExists if a state entry for the given blockID has already been indexed
func (*EpochProtocolStateEntries) StoreTx ¶ added in v0.33.30
func (s *EpochProtocolStateEntries) StoreTx(epochProtocolStateEntryID flow.Identifier, epochStateEntry *flow.MinEpochStateEntry) func(*transaction.Tx) error
StoreTx returns an anonymous function (intended to be executed as part of a badger transaction), which persists the given epoch protocol state entry as part of a DB tx. Per convention, the identities in the flow.MinEpochStateEntry must be in canonical order for the current and next epoch (if present), otherwise an exception is returned. Expected errors of the returned anonymous function:
- storage.ErrAlreadyExists if a state entry with the given id is already stored
type EpochSetups ¶
type EpochSetups struct {
// contains filtered or unexported fields
}
func NewEpochSetups ¶
func NewEpochSetups(collector module.CacheMetrics, db *badger.DB) *EpochSetups
NewEpochSetups instantiates a new EpochSetups storage.
func (*EpochSetups) ByID ¶
func (es *EpochSetups) ByID(setupID flow.Identifier) (*flow.EpochSetup, error)
ByID will return the EpochSetup event by its ID. Error returns: * storage.ErrNotFound if no EpochSetup with the ID exists
func (*EpochSetups) StoreTx ¶
func (es *EpochSetups) StoreTx(setup *flow.EpochSetup) func(tx *transaction.Tx) error
type ExecutionReceipts ¶
type ExecutionReceipts struct {
// contains filtered or unexported fields
}
ExecutionReceipts implements storage for execution receipts.
func NewExecutionReceipts ¶
func NewExecutionReceipts(collector module.CacheMetrics, db *badger.DB, results *ExecutionResults, cacheSize uint) *ExecutionReceipts
NewExecutionReceipts Creates ExecutionReceipts instance which is a database of receipts which supports storing and indexing receipts by receipt ID and block ID.
func (*ExecutionReceipts) BatchStore ¶ added in v0.15.1
func (r *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch storage.ReaderBatchWriter) error
func (*ExecutionReceipts) ByBlockID ¶
func (r *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error)
func (*ExecutionReceipts) ByID ¶
func (r *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error)
func (*ExecutionReceipts) Store ¶
func (r *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error
type ExecutionResults ¶
type ExecutionResults struct {
// contains filtered or unexported fields
}
ExecutionResults implements persistent storage for execution results.
func NewExecutionResults ¶
func NewExecutionResults(collector module.CacheMetrics, db *badger.DB) *ExecutionResults
func (*ExecutionResults) BatchIndex ¶ added in v0.15.0
func (r *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error
func (*ExecutionResults) BatchRemoveIndexByBlockID ¶ added in v0.30.0
func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error
BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. No errors are expected during normal operation, even if no entries are matched. If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned.
func (*ExecutionResults) BatchStore ¶ added in v0.15.0
func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.ReaderBatchWriter) error
func (*ExecutionResults) ByBlockID ¶
func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error)
func (*ExecutionResults) ByID ¶
func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error)
func (*ExecutionResults) ByIDTx ¶ added in v0.23.2
func (r *ExecutionResults) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error)
func (*ExecutionResults) ForceIndex ¶ added in v0.21.0
func (r *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error
func (*ExecutionResults) Index ¶
func (r *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error
func (*ExecutionResults) RemoveIndexByBlockID ¶ added in v0.26.0
func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error
func (*ExecutionResults) Store ¶
func (r *ExecutionResults) Store(result *flow.ExecutionResult) error
type Guarantees ¶
type Guarantees struct {
// contains filtered or unexported fields
}
Guarantees implements persistent storage for collection guarantees.
func NewGuarantees ¶
func NewGuarantees(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *Guarantees
func (*Guarantees) ByCollectionID ¶
func (g *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error)
func (*Guarantees) Store ¶
func (g *Guarantees) Store(guarantee *flow.CollectionGuarantee) error
type Headers ¶
type Headers struct {
// contains filtered or unexported fields
}
Headers implements a simple read-only header storage around a badger DB.
func NewHeaders ¶
func NewHeaders(collector module.CacheMetrics, db *badger.DB) *Headers
func (*Headers) BlockIDByHeight ¶ added in v0.25.15
func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error)
BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized version of `ByHeight` that skips retrieving the block. Expected errors during normal operations:
- `storage.ErrNotFound` if no finalized block is known at given height.
func (*Headers) ByParentID ¶
func (*Headers) Exists ¶ added in v0.31.0
func (h *Headers) Exists(blockID flow.Identifier) (bool, error)
Exists returns true if a header with the given ID has been stored. No errors are expected during normal operation.
func (*Headers) FindHeaders ¶
func (*Headers) RollbackExecutedBlock ¶ added in v0.24.5
RollbackExecutedBlock update the executed block header to the given header. only useful for execution node to roll back executed block height
type Index ¶
type Index struct {
// contains filtered or unexported fields
}
Index implements a simple read-only payload storage around a badger DB.
func NewIndex ¶
func NewIndex(collector module.CacheMetrics, db *badger.DB) *Index
type Payloads ¶
type Payloads struct {
// contains filtered or unexported fields
}
func NewPayloads ¶
func NewPayloads(db *badger.DB, index *Index, guarantees *Guarantees, seals *Seals, receipts *ExecutionReceipts, results *ExecutionResults) *Payloads
type ProtocolKVStore ¶ added in v0.33.30
type ProtocolKVStore struct {
// contains filtered or unexported fields
}
ProtocolKVStore implements persistent storage for storing KV store snapshots.
func NewProtocolKVStore ¶ added in v0.33.30
func NewProtocolKVStore(collector module.CacheMetrics, db *badger.DB, kvStoreCacheSize uint, kvStoreByBlockIDCacheSize uint, ) *ProtocolKVStore
NewProtocolKVStore creates a ProtocolKVStore instance, which is a database holding KV store snapshots. It supports storing, caching and retrieving by ID or the additionally indexed block ID.
func (*ProtocolKVStore) ByBlockID ¶ added in v0.33.30
func (s *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error)
ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, _after_ validating the QC. Protocol convention:
- Consider block B, whose ingestion might potentially lead to an updated KV store state. For example, the state changes if we seal some execution results emitting specific service events.
- For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, the hash of the resulting state at the end of processing B is to be used.
- CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, _after_ validating the QC.
Expected errors during normal operations:
- storage.ErrNotFound if no snapshot has been indexed for the given block.
func (*ProtocolKVStore) ByID ¶ added in v0.33.30
func (s *ProtocolKVStore) ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, error)
ByID retrieves the KV store snapshot with the given ID. Expected errors during normal operations:
- storage.ErrNotFound if no snapshot with the given Identifier is known.
func (*ProtocolKVStore) IndexTx ¶ added in v0.33.30
func (s *ProtocolKVStore) IndexTx(blockID flow.Identifier, stateID flow.Identifier) func(*transaction.Tx) error
IndexTx returns an anonymous function intended to be executed as part of a database transaction. In a nutshell, we want to maintain a map from `blockID` to `stateID`, where `blockID` references the block that _proposes_ updated key-value store. Upon call, the anonymous function persists the specific map entry in the node's database. Protocol convention:
- Consider block B, whose ingestion might potentially lead to an updated KV store. For example, the KV store changes if we seal some execution results emitting specific service events.
- For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store.
- CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, _after_ validating the QC.
Expected errors during normal operations:
- storage.ErrAlreadyExists if a KV store for the given blockID has already been indexed.
func (*ProtocolKVStore) StoreTx ¶ added in v0.33.30
func (s *ProtocolKVStore) StoreTx(stateID flow.Identifier, data *flow.PSKeyValueStoreData) func(*transaction.Tx) error
StoreTx returns an anonymous function (intended to be executed as part of a badger transaction), which persists the given KV-store snapshot as part of a DB tx. Expected errors of the returned anonymous function:
- storage.ErrAlreadyExists if a KV-store snapshot with the given id is already stored.
type QuorumCertificates ¶ added in v0.30.0
type QuorumCertificates struct {
// contains filtered or unexported fields
}
QuorumCertificates implements persistent storage for quorum certificates.
func NewQuorumCertificates ¶ added in v0.30.0
func NewQuorumCertificates(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *QuorumCertificates
NewQuorumCertificates Creates QuorumCertificates instance which is a database of quorum certificates which supports storing, caching and retrieving by block ID.
func (*QuorumCertificates) ByBlockID ¶ added in v0.30.0
func (q *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error)
func (*QuorumCertificates) StoreTx ¶ added in v0.30.0
func (q *QuorumCertificates) StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error
type RecoverablePrivateBeaconKeyStateMachine ¶ added in v0.39.0
type RecoverablePrivateBeaconKeyStateMachine struct {
// contains filtered or unexported fields
}
RecoverablePrivateBeaconKeyStateMachine stores state information about in-progress and completed DKGs, including computed keys. Must be instantiated using secrets database. On the happy path, each consensus committee member takes part in the DKG, and after successfully finishing the DKG protocol it obtains a random beacon private key, which is stored in the database along with DKG state flow.DKGStateCompleted. If for any reason the DKG fails, then the private key will be nil and DKG state is set to flow.DKGStateFailure. When the epoch recovery takes place, we need to query the last valid beacon private key for the current replica and also set it for use during the Recovery Epoch, otherwise replicas won't be able to vote for blocks during the Recovery Epoch. CAUTION: This implementation heavily depends on atomic Badger transactions with interleaved reads and writes for correctness.
func NewRecoverableRandomBeaconStateMachine ¶ added in v0.39.0
func NewRecoverableRandomBeaconStateMachine(collector module.CacheMetrics, db *badger.DB, myNodeID flow.Identifier) (*RecoverablePrivateBeaconKeyStateMachine, error)
NewRecoverableRandomBeaconStateMachine returns the RecoverablePrivateBeaconKeyStateMachine implementation backed by Badger DB. No errors are expected during normal operations.
func (*RecoverablePrivateBeaconKeyStateMachine) CommitMyBeaconPrivateKey ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) CommitMyBeaconPrivateKey(epochCounter uint64, commit *flow.EpochCommit) error
CommitMyBeaconPrivateKey commits the previously inserted random beacon private key for an epoch. Effectively, this method transitions the state machine into the flow.RandomBeaconKeyCommitted state if the current state is flow.DKGStateCompleted. The caller needs to supply the flow.EpochCommit as evidence that the stored key is valid for the specified epoch. Repeated calls for the same epoch are accepted (idempotent operation), if and only if the provided EpochCommit confirms the already committed key. No errors are expected during normal operations.
func (*RecoverablePrivateBeaconKeyStateMachine) GetDKGState ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) GetDKGState(epochCounter uint64) (flow.DKGState, error)
GetDKGState retrieves the current state of the state machine for the given epoch. If an error is returned, the state is undefined meaning that state machine is in initial state Error returns:
- storage.ErrNotFound - if there is no state stored for given epoch, meaning the state machine is in initial state.
func (*RecoverablePrivateBeaconKeyStateMachine) InsertMyBeaconPrivateKey ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error
InsertMyBeaconPrivateKey stores the random beacon private key for an epoch and transitions the state machine into the flow.DKGStateCompleted state.
CAUTION: these keys are stored before they are validated against the canonical key vector and may not be valid for use in signing. Use storage.SafeBeaconKeys interface to guarantee only keys safe for signing are returned. Error returns:
- storage.ErrAlreadyExists - if there is already a key stored for given epoch.
- storage.InvalidDKGStateTransitionError - if the requested state transition is invalid.
func (*RecoverablePrivateBeaconKeyStateMachine) IsDKGStarted ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) IsDKGStarted(epochCounter uint64) (bool, error)
IsDKGStarted checks whether the DKG has been started for the given epoch. No errors expected during normal operation.
func (*RecoverablePrivateBeaconKeyStateMachine) RetrieveMyBeaconPrivateKey ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error)
RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given epoch, only if my key has been confirmed valid and safe for use.
Returns:
- (key, true, nil) if the key is present and confirmed valid
- (nil, false, nil) if the key has been marked invalid or unavailable -> no beacon key will ever be available for the epoch in this case
- (nil, false, storage.ErrNotFound) if the DKG has not ended
- (nil, false, error) for any unexpected exception
func (*RecoverablePrivateBeaconKeyStateMachine) SetDKGState ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) SetDKGState(epochCounter uint64, newState flow.DKGState) error
SetDKGState performs a state transition for the Random Beacon Recoverable State Machine. Some state transitions may not be possible using this method. For instance, we might not be able to enter flow.DKGStateCompleted state directly from flow.DKGStateStarted, even if such transition is valid. The reason for this is that some states require additional data to be processed by the state machine before the transition can be made. For such cases there are dedicated methods that should be used, ex. InsertMyBeaconPrivateKey and UpsertMyBeaconPrivateKey, which allow to store the needed data and perform the transition in one atomic operation. Error returns:
- storage.InvalidDKGStateTransitionError - if the requested state transition is invalid.
func (*RecoverablePrivateBeaconKeyStateMachine) UnsafeRetrieveMyBeaconPrivateKey ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error)
UnsafeRetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch.
CAUTION: these keys were stored before they are validated against the canonical key vector and may not be valid for use in signing. Use storage.SafeBeaconKeys interface to guarantee only keys safe for signing are returned Error returns:
- storage.ErrNotFound - if there is no key stored for given epoch.
func (*RecoverablePrivateBeaconKeyStateMachine) UpsertMyBeaconPrivateKey ¶ added in v0.39.0
func (ds *RecoverablePrivateBeaconKeyStateMachine) UpsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey, commit *flow.EpochCommit) error
UpsertMyBeaconPrivateKey overwrites the random beacon private key for the epoch that recovers the protocol from Epoch Fallback Mode. The resulting state of this method call is flow.RandomBeaconKeyCommitted. State transitions are allowed if and only if the current state is not equal to flow.RandomBeaconKeyCommitted. Repeated calls for the same epoch are idempotent, if and only if the provided EpochCommit confirms the already committed key (error otherwise). No errors are expected during normal operations.
type ResultApprovals ¶ added in v0.14.0
type ResultApprovals struct {
// contains filtered or unexported fields
}
ResultApprovals implements persistent storage for result approvals.
func NewResultApprovals ¶ added in v0.14.0
func NewResultApprovals(collector module.CacheMetrics, db *badger.DB) *ResultApprovals
func (*ResultApprovals) ByChunk ¶ added in v0.14.0
func (r *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error)
ByChunk retrieves a ResultApproval by result ID and chunk index. The ResultApprovals store is only used within a verification node, where it is assumed that there is never more than one approval per chunk.
func (*ResultApprovals) ByID ¶ added in v0.14.0
func (r *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApproval, error)
ByID retrieves a ResultApproval by its ID
func (*ResultApprovals) Index ¶ added in v0.14.0
func (r *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error
Index indexes a ResultApproval by chunk (ResultID + chunk index). operation is idempotent (repeated calls with the same value are equivalent to just calling the method once; still the method succeeds on each call).
func (*ResultApprovals) Store ¶ added in v0.14.0
func (r *ResultApprovals) Store(approval *flow.ResultApproval) error
Store stores a ResultApproval
type Seals ¶
type Seals struct {
// contains filtered or unexported fields
}
func NewSeals ¶
func NewSeals(collector module.CacheMetrics, db *badger.DB) *Seals
func (*Seals) FinalizedSealForBlock ¶ added in v0.26.6
FinalizedSealForBlock returns the seal for the given block, only if that seal has been included in a finalized block. Returns storage.ErrNotFound if the block is unknown or unsealed.
func (*Seals) HighestInFork ¶ added in v0.26.6
HighestInFork retrieves the highest seal that was included in the fork up to (and including) blockID. This method should return a seal for any block known to the node. Returns storage.ErrNotFound if blockID is unknown.
type Transactions ¶
type Transactions struct {
// contains filtered or unexported fields
}
Transactions ...
func NewTransactions ¶
func NewTransactions(cacheMetrics module.CacheMetrics, db *badger.DB) *Transactions
NewTransactions ...
func (*Transactions) ByID ¶
func (t *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error)
ByID ...
func (*Transactions) Store ¶
func (t *Transactions) Store(flowTx *flow.TransactionBody) error
Store ...
type VersionBeacons ¶ added in v0.31.0
type VersionBeacons struct {
// contains filtered or unexported fields
}
func NewVersionBeacons ¶ added in v0.31.0
func NewVersionBeacons(db *badger.DB) *VersionBeacons
func (*VersionBeacons) Highest ¶ added in v0.31.0
func (r *VersionBeacons) Highest( belowOrEqualTo uint64, ) (*flow.SealedVersionBeacon, error)
Source Files
¶
- all.go
- approvals.go
- batch.go
- blocks.go
- cache.go
- chunks_queue.go
- cleaner.go
- cluster_blocks.go
- cluster_payloads.go
- collections.go
- computation_result.go
- dkg_state.go
- epoch_commits.go
- epoch_protocol_state.go
- epoch_setups.go
- guarantees.go
- headers.go
- index.go
- init.go
- payloads.go
- protocol_kv_store.go
- qcs.go
- receipts.go
- results.go
- seals.go
- transactions.go
- version_beacon.go