pebble

package
v0.33.36-pebble-util Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 10, 2024 License: AGPL-3.0 Imports: 35 Imported by: 0

Documentation

Index

Constants

View Source
const DefaultCacheSize = uint(10_000)
View Source
const DefaultPebbleCacheSize = 1 << 20
View Source
const JobQueueChunksQueue = "JobQueueChunksQueue"
View Source
const (

	// MinLookupKeyLen defines the minimum length for a valid lookup key
	//
	// Lookup keys use the following format:
	//     [code] [owner] / [key] / [height]
	// Where:
	// - code: 1 byte indicating the type of data stored
	// - owner: optional variable length field
	// - key: optional variable length field
	// - height: 8 bytes representing the block height (uint64)
	// - separator: '/' is used to separate variable length fields (required 2)
	//
	// Therefore the minimum key would be 3 bytes + # of bytes for height
	//     [code] / / [height]
	MinLookupKeyLen = 3 + registers.HeightSuffixLen
)

Variables

View Source
var ErrAlreadyBootstrapped = errors.New("found latest key set on badger instance, DB is already bootstrapped")

ErrAlreadyBootstrapped is the sentinel error for an already bootstrapped pebble instance

Functions

func DefaultPebbleOptions

func DefaultPebbleOptions(cache *pebble.Cache, comparer *pebble.Comparer) *pebble.Options

DefaultPebbleOptions returns an optimized set of pebble options. This is mostly copied form pebble's nightly performance benchmark.

func InitAll

func InitAll(metrics module.CacheMetrics, db *pebble.DB) *storage.All

func InitPublic

func InitPublic(dir string, opts *pebble.Options) (*pebble.DB, error)

InitPublic initializes a public database by checking and setting the database type marker. If an existing, inconsistent type marker is set, this method will return an error. Once a database type marker has been set using these methods, the type cannot be changed.

func InitSecret

func InitSecret(dir string, opts *pebble.Options) (*pebble.DB, error)

InitSecret initializes a secrets database by checking and setting the database type marker. If an existing, inconsistent type marker is set, this method will return an error. Once a database type marker has been set using these methods, the type cannot be changed.

func IsBootstrapped

func IsBootstrapped(db *pebble.DB) (bool, error)

IsBootstrapped returns true if the db is bootstrapped otherwise return false it returns error if the db is corrupted or other exceptions

func NewBootstrappedRegistersWithPathForTest added in v0.32.7

func NewBootstrappedRegistersWithPathForTest(tb testing.TB, dir string, first, latest uint64) *pebble.DB

func OpenDefaultPebbleDB added in v0.33.36

func OpenDefaultPebbleDB(dir string) (*pebble.DB, error)

OpenDefaultPebbleDB opens a pebble database using default options, such as cache size and comparer

func OpenRegisterPebbleDB

func OpenRegisterPebbleDB(dir string) (*pebble.DB, error)

OpenRegisterPebbleDB opens the database The difference between OpenDefaultPebbleDB is that it uses a customized comparer (NewMVCCComparer) which is needed to implement finding register values at any given height using pebble's SeekPrefixGE function

func ReadHeightsFromBootstrappedDB

func ReadHeightsFromBootstrappedDB(db *pebble.DB) (firstHeight uint64, latestHeight uint64, err error)

ReadHeightsFromBootstrappedDB reads the first and latest height from a bootstrapped register db If the register db is not bootstrapped, it returns storage.ErrNotBootstrapped If the register db is corrupted, it returns an error

func RunWithRegistersStorageAtInitialHeights

func RunWithRegistersStorageAtInitialHeights(tb testing.TB, first uint64, latest uint64, f func(r *Registers))

Types

type Batch added in v0.33.36

type Batch struct {
	// contains filtered or unexported fields
}

func NewBatch added in v0.33.36

func NewBatch(db *pebble.DB) *Batch

func (*Batch) Close added in v0.33.36

func (b *Batch) Close() error

func (*Batch) Flush added in v0.33.36

func (b *Batch) Flush() error

Flush will call the badger Batch's Flush method, in addition, it will call the callbacks added by OnSucceed any error are exceptions

func (*Batch) GetReader

func (b *Batch) GetReader() storage.Reader

func (*Batch) GetWriter added in v0.33.36

func (b *Batch) GetWriter() storage.BatchWriter

func (*Batch) OnSucceed added in v0.33.36

func (b *Batch) OnSucceed(callback func())

OnSucceed adds a callback to execute after the batch has been successfully flushed. useful for implementing the cache where we will only cache after the batch has been successfully flushed

type Blocks

type Blocks struct {
	// contains filtered or unexported fields
}

Blocks implements a simple block storage around a pebble DB.

func NewBlocks

func NewBlocks(db *pebble.DB, headers *Headers, payloads *Payloads) *Blocks

NewBlocks ...

func (*Blocks) ByCollectionID

func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error)

ByCollectionID ...

func (*Blocks) ByHeight

func (b *Blocks) ByHeight(height uint64) (*flow.Block, error)

ByHeight ...

func (*Blocks) ByID

func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error)

ByID ...

func (*Blocks) GetLastFullBlockHeight

func (b *Blocks) GetLastFullBlockHeight() (uint64, error)

GetLastFullBlockHeight ...

func (*Blocks) IndexBlockForCollections

func (b *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error

IndexBlockForCollections ...

func (*Blocks) InsertLastFullBlockHeightIfNotExists

func (b *Blocks) InsertLastFullBlockHeightIfNotExists(height uint64) error

InsertLastFullBlockHeightIfNotExists inserts the last full block height Calling this function multiple times is a no-op and returns no expected errors.

func (*Blocks) Store

func (b *Blocks) Store(block *flow.Block) error

Store ...

func (*Blocks) StorePebble

func (b *Blocks) StorePebble(block *flow.Block) func(storage.PebbleReaderBatchWriter) error

func (*Blocks) StoreTx

func (b *Blocks) StoreTx(block *flow.Block) func(*transaction.Tx) error

Ignored

func (*Blocks) UpdateLastFullBlockHeight

func (b *Blocks) UpdateLastFullBlockHeight(height uint64) error

UpdateLastFullBlockHeight upsert (update or insert) the last full block height

type Cache added in v0.33.36

type Cache[K comparable, V any] struct {
	// contains filtered or unexported fields
}

Cache is a read-through cache for underlying storage layer. Note: when a resource is not found in the cache nor the underlying storage, then it will not be cached. In other words, finding the missing item again will query the underlying storage again.

func (*Cache[K, V]) Get added in v0.33.36

func (c *Cache[K, V]) Get(key K) func(pebble.Reader) (V, error)

Get will try to retrieve the resource from cache first, and then from the injected. During normal operations, the following error returns are expected:

  • `storage.ErrNotFound` if key is unknown.

func (*Cache[K, V]) Insert added in v0.33.36

func (c *Cache[K, V]) Insert(key K, resource V)

Insert will add a resource directly to the cache with the given ID assuming the resource has been added to storage already. make as private

func (*Cache[K, V]) IsCached added in v0.33.36

func (c *Cache[K, V]) IsCached(key K) bool

IsCached returns true if the key exists in the cache. It DOES NOT check whether the key exists in the underlying data store.

func (*Cache[K, V]) PutPebble

func (c *Cache[K, V]) PutPebble(key K, resource V) func(storage.PebbleReaderBatchWriter) error

func (*Cache[K, V]) Remove added in v0.33.36

func (c *Cache[K, V]) Remove(key K)

type CacheBackend added in v0.33.12

type CacheBackend interface {
	Get(key string) (value flow.RegisterValue, ok bool)
	Add(key string, value flow.RegisterValue)
	Contains(key string) bool
	Len() int
	Remove(key string)
}

type CacheType added in v0.33.12

type CacheType int
const (
	CacheTypeLRU CacheType = iota + 1
	CacheTypeTwoQueue
)

func ParseCacheType added in v0.33.12

func ParseCacheType(s string) (CacheType, error)

func (CacheType) String added in v0.33.12

func (m CacheType) String() string

type ChunkDataPacks added in v0.33.36

type ChunkDataPacks struct {
	// contains filtered or unexported fields
}

func NewChunkDataPacks added in v0.33.36

func NewChunkDataPacks(collector module.CacheMetrics, db *pebble.DB, collections storage.Collections, byChunkIDCacheSize uint) *ChunkDataPacks

func (*ChunkDataPacks) BatchRemove added in v0.33.36

func (ch *ChunkDataPacks) BatchRemove(chunkID flow.Identifier, batch storage.BatchStorage) error

BatchRemove is not used in pebble implementation

func (*ChunkDataPacks) ByChunkID added in v0.33.36

func (ch *ChunkDataPacks) ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error)

ByChunkID finds the chunk data pack by chunk ID. it returns storage.ErrNotFound if not found other errors are exceptions

func (*ChunkDataPacks) Remove added in v0.33.36

func (ch *ChunkDataPacks) Remove(cs []flow.Identifier) error

Remove removes chunk data packs by IDs, it removes them atomically. Any errors are exceptions

func (*ChunkDataPacks) Store added in v0.33.36

func (ch *ChunkDataPacks) Store(cs []*flow.ChunkDataPack) error

Store stores the given chunk data pack lists, it stores them atomically. Any error are exceptions

type ChunksQueue

type ChunksQueue struct {
	// contains filtered or unexported fields
}

ChunksQueue stores a queue of chunk locators that assigned to me to verify. Job consumers can read the locators as job from the queue by index. Chunk locators stored in this queue are unique.

func NewChunkQueue

func NewChunkQueue(db *pebble.DB) *ChunksQueue

NewChunkQueue will initialize the underlying pebble database of chunk locator queue.

func (*ChunksQueue) AtIndex

func (q *ChunksQueue) AtIndex(index uint64) (*chunks.Locator, error)

AtIndex returns the chunk locator stored at the given index in the queue.

func (*ChunksQueue) Init

func (q *ChunksQueue) Init(defaultIndex uint64) (bool, error)

Init initializes chunk queue's latest index with the given default index.

func (*ChunksQueue) LatestIndex

func (q *ChunksQueue) LatestIndex() (uint64, error)

LatestIndex returns the index of the latest chunk locator stored in the queue.

func (*ChunksQueue) StoreChunkLocator

func (q *ChunksQueue) StoreChunkLocator(locator *chunks.Locator) (bool, error)

StoreChunkLocator stores a new chunk locator that assigned to me to the job queue. A true will be returned, if the locator was new. A false will be returned, if the locator was duplicate.

type ClusterBlocks

type ClusterBlocks struct {
	// contains filtered or unexported fields
}

ClusterBlocks implements a simple block storage around a pebble DB.

func NewClusterBlocks

func NewClusterBlocks(db *pebble.DB, chainID flow.ChainID, headers *Headers, payloads *ClusterPayloads) *ClusterBlocks

func (*ClusterBlocks) ByHeight

func (b *ClusterBlocks) ByHeight(height uint64) (*cluster.Block, error)

func (*ClusterBlocks) ByID

func (b *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error)

func (*ClusterBlocks) Store

func (b *ClusterBlocks) Store(block *cluster.Block) error

type ClusterPayloads

type ClusterPayloads struct {
	// contains filtered or unexported fields
}

ClusterPayloads implements storage of block payloads for collection node cluster consensus.

func NewClusterPayloads

func NewClusterPayloads(cacheMetrics module.CacheMetrics, db *pebble.DB) *ClusterPayloads

func (*ClusterPayloads) ByBlockID

func (cp *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, error)

func (*ClusterPayloads) Store

func (cp *ClusterPayloads) Store(blockID flow.Identifier, payload *cluster.Payload) error

type Collections

type Collections struct {
	// contains filtered or unexported fields
}

func NewCollections

func NewCollections(db *pebble.DB, transactions *Transactions) *Collections

func (*Collections) ByID

func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error)

func (*Collections) LightByID

func (c *Collections) LightByID(colID flow.Identifier) (*flow.LightCollection, error)

func (*Collections) LightByTransactionID

func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error)

func (*Collections) Remove

func (c *Collections) Remove(colID flow.Identifier) error

func (*Collections) Store

func (c *Collections) Store(collection *flow.Collection) error

func (*Collections) StoreLight

func (c *Collections) StoreLight(collection *flow.LightCollection) error

func (*Collections) StoreLightAndIndexByTransaction

func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error

type Commits

type Commits struct {
	// contains filtered or unexported fields
}

func NewCommits

func NewCommits(collector module.CacheMetrics, db *pebble.DB) *Commits

func (*Commits) BatchRemoveByBlockID

func (c *Commits) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error

BatchRemoveByBlockID removes Commit keyed by blockID in provided batch No errors are expected during normal operation, even if no entries are matched. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*Commits) BatchStore

func (c *Commits) BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch storage.BatchStorage) error

BatchStore stores Commit keyed by blockID in provided batch No errors are expected during normal operation, even if no entries are matched. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*Commits) ByBlockID

func (c *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error)

func (*Commits) RemoveByBlockID

func (c *Commits) RemoveByBlockID(blockID flow.Identifier) error

func (*Commits) Store

func (c *Commits) Store(blockID flow.Identifier, commit flow.StateCommitment) error

type ComputationResultUploadStatus

type ComputationResultUploadStatus struct {
	// contains filtered or unexported fields
}

func NewComputationResultUploadStatus

func NewComputationResultUploadStatus(db *pebble.DB) *ComputationResultUploadStatus

func (*ComputationResultUploadStatus) ByID

func (c *ComputationResultUploadStatus) ByID(computationResultID flow.Identifier) (bool, error)

func (*ComputationResultUploadStatus) GetIDsByUploadStatus

func (c *ComputationResultUploadStatus) GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error)

func (*ComputationResultUploadStatus) Remove

func (c *ComputationResultUploadStatus) Remove(computationResultID flow.Identifier) error

func (*ComputationResultUploadStatus) Upsert

func (c *ComputationResultUploadStatus) Upsert(blockID flow.Identifier,
	wasUploadCompleted bool) error

type ConsumerProgress added in v0.37.1

type ConsumerProgress struct {
	// contains filtered or unexported fields
}

func NewConsumerProgress added in v0.37.1

func NewConsumerProgress(db *pebble.DB, consumer string) *ConsumerProgress

func (*ConsumerProgress) InitProcessedIndex added in v0.37.1

func (cp *ConsumerProgress) InitProcessedIndex(defaultIndex uint64) error

InitProcessedIndex insert the default processed index to the storage layer, can only be done once. initialize for the second time will return storage.ErrAlreadyExists

func (*ConsumerProgress) ProcessedIndex added in v0.37.1

func (cp *ConsumerProgress) ProcessedIndex() (uint64, error)

func (*ConsumerProgress) SetProcessedIndex added in v0.37.1

func (cp *ConsumerProgress) SetProcessedIndex(processed uint64) error

type DKGState

type DKGState struct {
	// contains filtered or unexported fields
}

DKGState stores state information about in-progress and completed DKGs, including computed keys. Must be instantiated using secrets database.

func NewDKGState

func NewDKGState(collector module.CacheMetrics, db *pebble.DB) (*DKGState, error)

NewDKGState returns the DKGState implementation backed by Pebble DB.

func (*DKGState) GetDKGEndState

func (ds *DKGState) GetDKGEndState(epochCounter uint64) (flow.DKGEndState, error)

GetDKGEndState retrieves the DKG end state for the epoch.

func (*DKGState) GetDKGStarted

func (ds *DKGState) GetDKGStarted(epochCounter uint64) (bool, error)

GetDKGStarted checks whether the DKG has been started for the given epoch.

func (*DKGState) InsertMyBeaconPrivateKey

func (ds *DKGState) InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error

InsertMyBeaconPrivateKey stores the random beacon private key for an epoch.

CAUTION: these keys are stored before they are validated against the canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys to guarantee only keys safe for signing are returned

func (*DKGState) RetrieveMyBeaconPrivateKey

func (ds *DKGState) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error)

RetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch.

CAUTION: these keys are stored before they are validated against the canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys to guarantee only keys safe for signing are returned

func (*DKGState) SetDKGEndState

func (ds *DKGState) SetDKGEndState(epochCounter uint64, endState flow.DKGEndState) error

SetDKGEndState stores that the DKG has ended, and its end state.

func (*DKGState) SetDKGStarted

func (ds *DKGState) SetDKGStarted(epochCounter uint64) error

SetDKGStarted sets the flag indicating the DKG has started for the given epoch.

type EpochCommits

type EpochCommits struct {
	// contains filtered or unexported fields
}

func NewEpochCommits

func NewEpochCommits(collector module.CacheMetrics, db *pebble.DB) *EpochCommits

func (*EpochCommits) ByID

func (ec *EpochCommits) ByID(commitID flow.Identifier) (*flow.EpochCommit, error)

ByID will return the EpochCommit event by its ID. Error returns: * storage.ErrNotFound if no EpochCommit with the ID exists

func (*EpochCommits) StorePebble

func (ec *EpochCommits) StorePebble(commit *flow.EpochCommit) func(storage.PebbleReaderBatchWriter) error

func (*EpochCommits) StoreTx

func (ec *EpochCommits) StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error

type EpochSetups

type EpochSetups struct {
	// contains filtered or unexported fields
}

func NewEpochSetups

func NewEpochSetups(collector module.CacheMetrics, db *pebble.DB) *EpochSetups

NewEpochSetups instantiates a new EpochSetups storage.

func (*EpochSetups) ByID

func (es *EpochSetups) ByID(setupID flow.Identifier) (*flow.EpochSetup, error)

ByID will return the EpochSetup event by its ID. Error returns: * storage.ErrNotFound if no EpochSetup with the ID exists

func (*EpochSetups) StorePebble

func (es *EpochSetups) StorePebble(setup *flow.EpochSetup) func(storage.PebbleReaderBatchWriter) error

func (*EpochSetups) StoreTx

func (es *EpochSetups) StoreTx(setup *flow.EpochSetup) func(*transaction.Tx) error

type EpochStatuses

type EpochStatuses struct {
	// contains filtered or unexported fields
}

func NewEpochStatuses

func NewEpochStatuses(collector module.CacheMetrics, db *pebble.DB) *EpochStatuses

NewEpochStatuses ...

func (*EpochStatuses) ByBlockID

func (es *EpochStatuses) ByBlockID(blockID flow.Identifier) (*flow.EpochStatus, error)

ByBlockID will return the epoch status for the given block Error returns: * storage.ErrNotFound if EpochStatus for the block does not exist

func (*EpochStatuses) StorePebble

func (es *EpochStatuses) StorePebble(blockID flow.Identifier, status *flow.EpochStatus) func(storage.PebbleReaderBatchWriter) error

func (*EpochStatuses) StoreTx

func (es *EpochStatuses) StoreTx(blockID flow.Identifier, status *flow.EpochStatus) func(tx *transaction.Tx) error

type Events

type Events struct {
	// contains filtered or unexported fields
}

func NewEvents

func NewEvents(collector module.CacheMetrics, db *pebble.DB) *Events

func (*Events) BatchRemoveByBlockID

func (e *Events) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error

BatchRemoveByBlockID removes events keyed by a blockID in provided batch No errors are expected during normal operation, even if no entries are matched. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*Events) BatchStore

func (e *Events) BatchStore(blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.BatchStorage) error

BatchStore stores events keyed by a blockID in provided batch No errors are expected during normal operation, but it may return generic error if pebble fails to process request

func (*Events) ByBlockID

func (e *Events) ByBlockID(blockID flow.Identifier) ([]flow.Event, error)

ByBlockID returns the events for the given block ID Note: This method will return an empty slice and no error if no entries for the blockID are found

func (*Events) ByBlockIDEventType

func (e *Events) ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error)

ByBlockIDEventType returns the events for the given block ID and event type Note: This method will return an empty slice and no error if no entries for the blockID are found

func (*Events) ByBlockIDTransactionID

func (e *Events) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) ([]flow.Event, error)

ByBlockIDTransactionID returns the events for the given block ID and transaction ID Note: This method will return an empty slice and no error if no entries for the blockID are found

func (*Events) ByBlockIDTransactionIndex

func (e *Events) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error)

ByBlockIDTransactionIndex returns the events for the given block ID and transaction index Note: This method will return an empty slice and no error if no entries for the blockID are found

func (*Events) RemoveByBlockID

func (e *Events) RemoveByBlockID(blockID flow.Identifier) error

RemoveByBlockID removes events by block ID

func (*Events) Store

func (e *Events) Store(blockID flow.Identifier, blockEvents []flow.EventsList) error

Store will store events for the given block ID

type ExecutionReceipts

type ExecutionReceipts struct {
	// contains filtered or unexported fields
}

ExecutionReceipts implements storage for execution receipts.

func NewExecutionReceipts

func NewExecutionReceipts(collector module.CacheMetrics, db *pebble.DB, results *ExecutionResults, cacheSize uint) *ExecutionReceipts

NewExecutionReceipts Creates ExecutionReceipts instance which is a database of receipts which supports storing and indexing receipts by receipt ID and block ID.

func (*ExecutionReceipts) BatchStore

func (r *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch storage.BatchStorage) error

func (*ExecutionReceipts) ByBlockID

func (*ExecutionReceipts) ByID

func (*ExecutionReceipts) Store

func (r *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error

type ExecutionResults

type ExecutionResults struct {
	// contains filtered or unexported fields
}

ExecutionResults implements persistent storage for execution results.

func NewExecutionResults

func NewExecutionResults(collector module.CacheMetrics, db *pebble.DB) *ExecutionResults

func (*ExecutionResults) BatchIndex

func (r *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.BatchStorage) error

func (*ExecutionResults) BatchRemoveIndexByBlockID

func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error

BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. No errors are expected during normal operation, even if no entries are matched. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*ExecutionResults) BatchStore

func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.BatchStorage) error

func (*ExecutionResults) ByBlockID

func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error)

func (*ExecutionResults) ByID

func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error)

func (*ExecutionResults) ByIDTx

func (r *ExecutionResults) ByIDTx(resultID flow.Identifier) func(interface{}) (*flow.ExecutionResult, error)

func (*ExecutionResults) ForceIndex

func (r *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error

func (*ExecutionResults) Index

func (r *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error

func (*ExecutionResults) RemoveIndexByBlockID

func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error

func (*ExecutionResults) Store

func (r *ExecutionResults) Store(result *flow.ExecutionResult) error

type Guarantees

type Guarantees struct {
	// contains filtered or unexported fields
}

Guarantees implements persistent storage for collection guarantees.

func NewGuarantees

func NewGuarantees(collector module.CacheMetrics, db *pebble.DB, cacheSize uint) *Guarantees

func (*Guarantees) ByCollectionID

func (g *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error)

func (*Guarantees) Store

func (g *Guarantees) Store(guarantee *flow.CollectionGuarantee) error

type Headers

type Headers struct {
	// contains filtered or unexported fields
}

func NewHeaders

func NewHeaders(collector module.CacheMetrics, db *pebble.DB) *Headers

func (*Headers) BlockIDByHeight

func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error)

BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized version of `ByHeight` that skips retrieving the block. Expected errors during normal operation:

  • `storage.ErrNotFound` if no finalized block is known at given height.

func (*Headers) ByBlockID

func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error)

func (*Headers) ByHeight

func (h *Headers) ByHeight(height uint64) (*flow.Header, error)

func (*Headers) ByParentID

func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error)

func (*Headers) Exists

func (h *Headers) Exists(blockID flow.Identifier) (bool, error)

Exists returns true if a header with the given ID has been stored. No errors are expected during normal operation.

func (*Headers) FindHeaders

func (h *Headers) FindHeaders(filter func(header *flow.Header) bool) ([]flow.Header, error)

func (*Headers) RollbackExecutedBlock

func (h *Headers) RollbackExecutedBlock(header *flow.Header) error

RollbackExecutedBlock update the executed block header to the given header. only useful for execution node to roll back executed block height not concurrent safe

func (*Headers) Store

func (h *Headers) Store(header *flow.Header) error

type Index

type Index struct {
	// contains filtered or unexported fields
}

Index implements a simple read-only payload storage around a pebble DB.

func NewIndex

func NewIndex(collector module.CacheMetrics, db *pebble.DB) *Index

func (*Index) ByBlockID

func (i *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error)

func (*Index) Store

func (i *Index) Store(blockID flow.Identifier, index *flow.Index) error

type LightTransactionResults

type LightTransactionResults struct {
	// contains filtered or unexported fields
}

func NewLightTransactionResults

func NewLightTransactionResults(collector module.CacheMetrics, db *pebble.DB, transactionResultsCacheSize uint) *LightTransactionResults

func (*LightTransactionResults) BatchStore

func (tr *LightTransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch storage.BatchStorage) error

func (*LightTransactionResults) ByBlockID

ByBlockID gets all transaction results for a block, ordered by transaction index

func (*LightTransactionResults) ByBlockIDTransactionID

func (tr *LightTransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) (*flow.LightTransactionResult, error)

ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID

func (*LightTransactionResults) ByBlockIDTransactionIndex

func (tr *LightTransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error)

ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index

type MyExecutionReceipts

type MyExecutionReceipts struct {
	// contains filtered or unexported fields
}

MyExecutionReceipts holds and indexes Execution Receipts. MyExecutionReceipts is implemented as a wrapper around pebble.ExecutionReceipts The wrapper adds the ability to "MY execution receipt", from the viewpoint of an individual Execution Node.

func NewMyExecutionReceipts

func NewMyExecutionReceipts(collector module.CacheMetrics, db *pebble.DB, receipts *ExecutionReceipts) *MyExecutionReceipts

NewMyExecutionReceipts creates instance of MyExecutionReceipts which is a wrapper wrapper around pebble.ExecutionReceipts It's useful for execution nodes to keep track of produced execution receipts.

func (*MyExecutionReceipts) BatchRemoveIndexByBlockID

func (m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error

BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch No errors are expected during normal operation, even if no entries are matched. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*MyExecutionReceipts) BatchStoreMyReceipt

func (m *MyExecutionReceipts) BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch storage.BatchStorage) error

BatchStoreMyReceipt stores blockID-to-my-receipt index entry keyed by blockID in a provided batch. No errors are expected during normal operation If entity fails marshalling, the error is wrapped in a generic error and returned. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*MyExecutionReceipts) MyReceipt

func (m *MyExecutionReceipts) MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error)

MyReceipt retrieves my receipt for the given block. Returns storage.ErrNotFound if no receipt was persisted for the block.

func (*MyExecutionReceipts) RemoveIndexByBlockID

func (m *MyExecutionReceipts) RemoveIndexByBlockID(blockID flow.Identifier) error

func (*MyExecutionReceipts) StoreMyReceipt

func (m *MyExecutionReceipts) StoreMyReceipt(receipt *flow.ExecutionReceipt) error

StoreMyReceipt stores the receipt and marks it as mine (trusted). My receipts are indexed by the block whose result they compute. Currently, we only support indexing a _single_ receipt per block. Attempting to store conflicting receipts for the same block will error.

type Payloads

type Payloads struct {
	// contains filtered or unexported fields
}

func NewPayloads

func NewPayloads(db *pebble.DB, index *Index, guarantees *Guarantees, seals *Seals, receipts *ExecutionReceipts,
	results *ExecutionResults) *Payloads

func (*Payloads) ByBlockID

func (p *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error)

func (*Payloads) Store

func (p *Payloads) Store(blockID flow.Identifier, payload *flow.Payload) error

type QuorumCertificates

type QuorumCertificates struct {
	// contains filtered or unexported fields
}

QuorumCertificates implements persistent storage for quorum certificates.

func NewQuorumCertificates

func NewQuorumCertificates(collector module.CacheMetrics, db *pebble.DB, cacheSize uint) *QuorumCertificates

NewQuorumCertificates Creates QuorumCertificates instance which is a database of quorum certificates which supports storing, caching and retrieving by block ID.

func (*QuorumCertificates) ByBlockID

func (q *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error)

func (*QuorumCertificates) StorePebble

func (*QuorumCertificates) StoreTx

type ReadCache added in v0.33.12

type ReadCache struct {
	// contains filtered or unexported fields
}

func (*ReadCache) Get added in v0.33.12

func (c *ReadCache) Get(key string) (flow.RegisterValue, error)

Get will try to retrieve the resource from cache first, and then from the injected. During normal operations, the following error returns are expected:

  • `storage.ErrNotFound` if key is unknown.

func (*ReadCache) Insert added in v0.33.12

func (c *ReadCache) Insert(key string, resource flow.RegisterValue)

Insert will add a resource directly to the cache with the given ID

func (*ReadCache) IsCached added in v0.33.12

func (c *ReadCache) IsCached(key string) bool

IsCached returns true if the key exists in the cache. It DOES NOT check whether the key exists in the underlying data store.

func (*ReadCache) Remove added in v0.33.12

func (c *ReadCache) Remove(key string)

type RegisterBootstrap

type RegisterBootstrap struct {
	// contains filtered or unexported fields
}

func NewRegisterBootstrap

func NewRegisterBootstrap(
	db *pebble.DB,
	checkpointFile string,
	rootHeight uint64,
	rootHash ledger.RootHash,
	log zerolog.Logger,
) (*RegisterBootstrap, error)

NewRegisterBootstrap creates the bootstrap object for reading checkpoint data and the height tracker in pebble This object must be initialized and RegisterBootstrap.IndexCheckpointFile must be run to have the pebble db instance in the correct state to initialize a Registers store.

func (*RegisterBootstrap) IndexCheckpointFile

func (b *RegisterBootstrap) IndexCheckpointFile(ctx context.Context, workerCount int) error

IndexCheckpointFile indexes the checkpoint file in the Dir provided

type Registers

type Registers struct {
	// contains filtered or unexported fields
}

Registers library that implements pebble storage for registers given a pebble instance with root block and root height populated

func NewBootstrappedRegistersWithPath

func NewBootstrappedRegistersWithPath(dir string) (*Registers, *pebble.DB, error)

NewBootstrappedRegistersWithPath initializes a new Registers instance with a pebble db if the database is not initialized, it close the database and return storage.ErrNotBootstrapped

func NewRegisters

func NewRegisters(db *pebble.DB) (*Registers, error)

NewRegisters takes a populated pebble instance with LatestHeight and FirstHeight set. return storage.ErrNotBootstrapped if they those two keys are unavailable as it implies a uninitialized state return other error if database is in a corrupted state

func (*Registers) FirstHeight

func (s *Registers) FirstHeight() uint64

FirstHeight first indexed height found in the store, typically root block for the spork

func (*Registers) Get

func (s *Registers) Get(
	reg flow.RegisterID,
	height uint64,
) (flow.RegisterValue, error)

Get returns the most recent updated payload for the given RegisterID. "most recent" means the updates happens most recent up the given height.

For example, if there are 2 values stored for register A at height 6 and 11, then GetPayload(13, A) would return the value at height 11.

- storage.ErrNotFound if no register values are found - storage.ErrHeightNotIndexed if the requested height is out of the range of stored heights

func (*Registers) LatestHeight

func (s *Registers) LatestHeight() uint64

LatestHeight Gets the latest height of complete registers available

func (*Registers) Store

func (s *Registers) Store(
	entries flow.RegisterEntries,
	height uint64,
) error

Store sets the given entries in a batch. This function is expected to be called at one batch per height, sequentially. Under normal conditions, it should be called wth the value of height set to LatestHeight + 1 CAUTION: This function is not safe for concurrent use.

type RegistersCache added in v0.33.12

type RegistersCache struct {
	*Registers
	// contains filtered or unexported fields
}

func NewRegistersCache added in v0.33.12

func NewRegistersCache(registers *Registers, cacheType CacheType, size uint, metrics module.CacheMetrics) (*RegistersCache, error)

NewRegistersCache wraps a read cache around Get requests to a underlying Registers object.

func (*RegistersCache) Get added in v0.33.12

func (c *RegistersCache) Get(
	reg flow.RegisterID,
	height uint64,
) (flow.RegisterValue, error)

Get returns the most recent updated payload for the given RegisterID. "most recent" means the updates happens most recent up the given height.

For example, if there are 2 values stored for register A at height 6 and 11, then GetPayload(13, A) would return the value at height 11.

- storage.ErrNotFound if no register values are found - storage.ErrHeightNotIndexed if the requested height is out of the range of stored heights

type ResultApprovals

type ResultApprovals struct {
	// contains filtered or unexported fields
}

ResultApprovals implements persistent storage for result approvals.

func NewResultApprovals

func NewResultApprovals(collector module.CacheMetrics, db *pebble.DB) *ResultApprovals

func (*ResultApprovals) ByChunk

func (r *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error)

ByChunk retrieves a ResultApproval by result ID and chunk index. The ResultApprovals store is only used within a verification node, where it is assumed that there is never more than one approval per chunk.

func (*ResultApprovals) ByID

func (r *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApproval, error)

ByID retrieves a ResultApproval by its ID

func (*ResultApprovals) Index

func (r *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error

Index indexes a ResultApproval by chunk (ResultID + chunk index). operation is idempotent (repeated calls with the same value are equivalent to just calling the method once; still the method succeeds on each call).

func (*ResultApprovals) Store

func (r *ResultApprovals) Store(approval *flow.ResultApproval) error

Store stores a ResultApproval

type SafeBeaconPrivateKeys

type SafeBeaconPrivateKeys struct {
	// contains filtered or unexported fields
}

SafeBeaconPrivateKeys is the safe beacon key storage backed by Pebble DB.

func NewSafeBeaconPrivateKeys

func NewSafeBeaconPrivateKeys(state *DKGState) *SafeBeaconPrivateKeys

NewSafeBeaconPrivateKeys returns a safe beacon key storage backed by Pebble DB.

func (*SafeBeaconPrivateKeys) RetrieveMyBeaconPrivateKey

func (keys *SafeBeaconPrivateKeys) RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error)

RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given epoch, only if my key has been confirmed valid and safe for use.

Returns:

  • (key, true, nil) if the key is present and confirmed valid
  • (nil, false, nil) if the key has been marked invalid or unavailable -> no beacon key will ever be available for the epoch in this case
  • (nil, false, storage.ErrNotFound) if the DKG has not ended
  • (nil, false, error) for any unexpected exception

type Seals

type Seals struct {
	// contains filtered or unexported fields
}

func NewSeals

func NewSeals(collector module.CacheMetrics, db *pebble.DB) *Seals

func (*Seals) ByID

func (s *Seals) ByID(sealID flow.Identifier) (*flow.Seal, error)

func (*Seals) FinalizedSealForBlock

func (s *Seals) FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, error)

FinalizedSealForBlock returns the seal for the given block, only if that seal has been included in a finalized block. Returns storage.ErrNotFound if the block is unknown or unsealed.

func (*Seals) HighestInFork

func (s *Seals) HighestInFork(blockID flow.Identifier) (*flow.Seal, error)

HighestInFork retrieves the highest seal that was included in the fork up to (and including) blockID. This method should return a seal for any block known to the node. Returns storage.ErrNotFound if blockID is unknown.

func (*Seals) Store

func (s *Seals) Store(seal *flow.Seal) error

type ServiceEvents

type ServiceEvents struct {
	// contains filtered or unexported fields
}

func NewServiceEvents

func NewServiceEvents(collector module.CacheMetrics, db *pebble.DB) *ServiceEvents

func (*ServiceEvents) BatchRemoveByBlockID

func (e *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error

BatchRemoveByBlockID removes service events keyed by a blockID in provided batch No errors are expected during normal operation, even if no entries are matched. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*ServiceEvents) BatchStore

func (e *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event, batch storage.BatchStorage) error

BatchStore stores service events keyed by a blockID in provided batch No errors are expected during normal operation, even if no entries are matched. If pebble unexpectedly fails to process the request, the error is wrapped in a generic error and returned.

func (*ServiceEvents) ByBlockID

func (e *ServiceEvents) ByBlockID(blockID flow.Identifier) ([]flow.Event, error)

ByBlockID returns the events for the given block ID

func (*ServiceEvents) RemoveByBlockID

func (e *ServiceEvents) RemoveByBlockID(blockID flow.Identifier) error

RemoveByBlockID removes service events by block ID

type Transaction

type Transaction struct {
	// contains filtered or unexported fields
}

func (*Transaction) Delete

func (t *Transaction) Delete(key []byte) error

func (*Transaction) DeleteRange

func (t *Transaction) DeleteRange(start, end []byte) error

func (*Transaction) Set

func (t *Transaction) Set(key, value []byte) error

type TransactionResults

type TransactionResults struct {
	// contains filtered or unexported fields
}

func NewTransactionResults

func NewTransactionResults(collector module.CacheMetrics, db *pebble.DB, transactionResultsCacheSize uint) *TransactionResults

func (*TransactionResults) BatchRemoveByBlockID

func (tr *TransactionResults) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error

BatchRemoveByBlockID batch removes transaction results by block ID

func (*TransactionResults) BatchStore

func (tr *TransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.BatchStorage) error

BatchStore will store the transaction results for the given block ID in a batch

func (*TransactionResults) ByBlockID

func (tr *TransactionResults) ByBlockID(blockID flow.Identifier) ([]flow.TransactionResult, error)

ByBlockID gets all transaction results for a block, ordered by transaction index

func (*TransactionResults) ByBlockIDTransactionID

func (tr *TransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) (*flow.TransactionResult, error)

ByBlockIDTransactionID returns the runtime transaction result for the given block ID and transaction ID

func (*TransactionResults) ByBlockIDTransactionIndex

func (tr *TransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error)

ByBlockIDTransactionIndex returns the runtime transaction result for the given block ID and transaction index

func (*TransactionResults) RemoveByBlockID

func (tr *TransactionResults) RemoveByBlockID(blockID flow.Identifier) error

RemoveByBlockID removes transaction results by block ID

type Transactions

type Transactions struct {
	// contains filtered or unexported fields
}

Transactions ...

func NewTransactions

func NewTransactions(cacheMetrics module.CacheMetrics, db *pebble.DB) *Transactions

NewTransactions ...

func (*Transactions) ByID

ByID ...

func (*Transactions) Store

func (t *Transactions) Store(flowTx *flow.TransactionBody) error

Store ...

type VersionBeacons

type VersionBeacons struct {
	// contains filtered or unexported fields
}

func NewVersionBeacons

func NewVersionBeacons(db *pebble.DB) *VersionBeacons

func (*VersionBeacons) Highest

func (r *VersionBeacons) Highest(
	belowOrEqualTo uint64,
) (*flow.SealedVersionBeacon, error)

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL