storage

package
v0.0.0-...-4fefc29 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 15, 2024 License: MIT Imports: 17 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	// ErrNotFound is returned when a blob is not found in the storage.
	ErrNotFound = errors.New("blob not found")
	// ErrStorage is returned when there is an error accessing the storage.
	ErrStorage = errors.New("error accessing storage")
	// ErrMarshaling is returned when there is an error in (un)marshaling the blob
	ErrMarshaling = errors.New("error encoding/decoding blob")
	// ErrCompress is returned when there is an error gzipping the data
	ErrCompress = errors.New("error compressing blob")
)
View Source
var BackfillMu sync.Mutex

Functions

This section is empty.

Types

type BackfillProcess

type BackfillProcess struct {
	Start   v1.BeaconBlockHeader `json:"start_block"`
	Current v1.BeaconBlockHeader `json:"current_block"`
}

type BackfillProcesses

type BackfillProcesses map[common.Hash]BackfillProcess

BackfillProcesses maps backfill start block hash --> BackfillProcess. This allows us to track multiple processes and reengage a previous backfill in case an archiver restart interrupted an active backfill

type BlobData

type BlobData struct {
	Header       Header       `json:"header"`
	BlobSidecars BlobSidecars `json:"blob_sidecars"`
}

type BlobSidecars

type BlobSidecars struct {
	Data []*deneb.BlobSidecar `json:"data"`
}

func (*BlobSidecars) MarshalSSZ

func (b *BlobSidecars) MarshalSSZ() ([]byte, error)

MarshalSSZ marshals the blob sidecars into SSZ. As the blob sidecars are a single list of fixed size elements, we can simply concatenate the marshaled sidecars together.

func (*BlobSidecars) SizeSSZ

func (b *BlobSidecars) SizeSSZ() int

type DataStore

type DataStore interface {
	DataStoreReader
	DataStoreWriter
}

DataStore is the interface for a data store that can be both written to and read from.

func NewStorage

func NewStorage(cfg flags.StorageConfig, l log.Logger) (DataStore, error)

type DataStoreReader

type DataStoreReader interface {
	// Exists returns true if the given blob hash exists in the data store, false otherwise.
	// It should return one of the following:
	// - nil: the existence check was successful. In this case the boolean should also be set correctly.
	// - ErrStorage: there was an error accessing the data store.
	Exists(ctx context.Context, hash common.Hash) (bool, error)
	// ReadBlob reads the blob data for the given beacon block hash from the data store.
	// It should return one of the following:
	// - nil: reading the blob was successful. The blob data is also returned.
	// - ErrNotFound: the blob data was not found in the data store.
	// - ErrStorage: there was an error accessing the data store.
	// - ErrMarshaling: there was an error decoding the blob data.
	ReadBlob(ctx context.Context, hash common.Hash) (BlobData, error)
	ReadBackfillProcesses(ctx context.Context) (BackfillProcesses, error)
	ReadLockfile(ctx context.Context) (Lockfile, error)
}

DataStoreReader is the interface for reading from a data store.

type DataStoreWriter

type DataStoreWriter interface {
	// WriteBlob writes the given blob data to the data store. It should return one of the following errors:
	// - nil: writing the blob was successful.
	// - ErrStorage: there was an error accessing the data store.
	// - ErrMarshaling: there was an error encoding the blob data.
	WriteBlob(ctx context.Context, data BlobData) error
	WriteBackfillProcesses(ctx context.Context, data BackfillProcesses) error
	WriteLockfile(ctx context.Context, data Lockfile) error
}

DataStoreWriter is the interface for writing to a data store.

type FileStorage

type FileStorage struct {
	// contains filtered or unexported fields
}

func NewFileStorage

func NewFileStorage(dir string, l log.Logger) *FileStorage

func (*FileStorage) Exists

func (s *FileStorage) Exists(_ context.Context, hash common.Hash) (bool, error)

func (*FileStorage) ReadBackfillProcesses

func (s *FileStorage) ReadBackfillProcesses(ctx context.Context) (BackfillProcesses, error)

func (*FileStorage) ReadBlob

func (s *FileStorage) ReadBlob(_ context.Context, hash common.Hash) (BlobData, error)

func (*FileStorage) ReadLockfile

func (s *FileStorage) ReadLockfile(ctx context.Context) (Lockfile, error)

func (*FileStorage) WriteBackfillProcesses

func (s *FileStorage) WriteBackfillProcesses(_ context.Context, data BackfillProcesses) error

func (*FileStorage) WriteBlob

func (s *FileStorage) WriteBlob(_ context.Context, data BlobData) error

func (*FileStorage) WriteLockfile

func (s *FileStorage) WriteLockfile(_ context.Context, data Lockfile) error
type Header struct {
	BeaconBlockHash common.Hash `json:"beacon_block_hash"`
}

type Lockfile

type Lockfile struct {
	ArchiverId string `json:"archiver_id"`
	Timestamp  int64  `json:"timestamp"`
}

type S3Storage

type S3Storage struct {
	// contains filtered or unexported fields
}

func NewS3Storage

func NewS3Storage(cfg flags.S3Config, l log.Logger) (*S3Storage, error)

func (*S3Storage) Exists

func (s *S3Storage) Exists(ctx context.Context, hash common.Hash) (bool, error)

func (*S3Storage) ReadBackfillProcesses

func (s *S3Storage) ReadBackfillProcesses(ctx context.Context) (BackfillProcesses, error)

func (*S3Storage) ReadBlob

func (s *S3Storage) ReadBlob(ctx context.Context, hash common.Hash) (BlobData, error)

func (*S3Storage) ReadLockfile

func (s *S3Storage) ReadLockfile(ctx context.Context) (Lockfile, error)

func (*S3Storage) WriteBackfillProcesses

func (s *S3Storage) WriteBackfillProcesses(ctx context.Context, data BackfillProcesses) error

func (*S3Storage) WriteBlob

func (s *S3Storage) WriteBlob(ctx context.Context, data BlobData) error

func (*S3Storage) WriteLockfile

func (s *S3Storage) WriteLockfile(ctx context.Context, data Lockfile) error

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL