Documentation ¶
Index ¶
- Constants
- Variables
- func Compact(ctx context.Context, basePath string, opts *Options, validate bool) error
- func ValidatePinCollectionChunks(ctx context.Context, basePath, pin, location string, opts *Options) error
- func ValidateReserve(ctx context.Context, basePath string, opts *Options) error
- func ValidateRetrievalIndex(ctx context.Context, basePath string, opts *Options) error
- type BinC
- type CacheStat
- type CacheStore
- type ChunkStoreStat
- type DB
- func (db *DB) Cache() storage.Putter
- func (db *DB) CacheShallowCopy(ctx context.Context, store transaction.Storage, addrs ...swarm.Address) error
- func (db *DB) ChunkStore() storage.ReadOnlyChunkStore
- func (db *DB) Close() error
- func (db *DB) CommittedDepth() uint8
- func (db *DB) DebugInfo(ctx context.Context) (Info, error)
- func (db *DB) DeletePin(ctx context.Context, root swarm.Address) (err error)
- func (db *DB) DeleteSession(tagID uint64) error
- func (db *DB) DirectUpload() PutterSession
- func (db *DB) Download(cache bool) storage.Getter
- func (db *DB) EvictBatch(ctx context.Context, batchID []byte) error
- func (db *DB) HasPin(root swarm.Address) (has bool, err error)
- func (db *DB) IsWithinStorageRadius(addr swarm.Address) bool
- func (db *DB) IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error
- func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error)
- func (db *DB) Lock(strs ...string) func()
- func (db *DB) Lookup() storage.Getter
- func (db *DB) Metrics() []prometheus.Collector
- func (db *DB) NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error)
- func (db *DB) NewCollection(ctx context.Context) (PutterSession, error)
- func (db *DB) NewSession() (SessionInfo, error)
- func (db *DB) PinIntegrity() *PinIntegrity
- func (db *DB) Pins() (address []swarm.Address, err error)
- func (db *DB) PusherFeed() <-chan *pusher.Op
- func (db *DB) Report(ctx context.Context, chunk swarm.Chunk, state storage.ChunkState) error
- func (db *DB) ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (ch swarm.Chunk, err error)
- func (db *DB) ReserveHas(addr swarm.Address, batchID []byte, stampHash []byte) (has bool, err error)
- func (db *DB) ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error
- func (db *DB) ReserveLastBinIDs() ([]uint64, uint64, error)
- func (db *DB) ReservePutter() storage.Putter
- func (db *DB) ReserveSample(ctx context.Context, anchor []byte, committedDepth uint8, consensusTime uint64, ...) (Sample, error)
- func (db *DB) ReserveSize() int
- func (db *DB) ReserveSizeWithinRadius() uint64
- func (db *DB) ResetReserve(ctx context.Context) error
- func (db *DB) Session(tagID uint64) (SessionInfo, error)
- func (db *DB) SetRetrievalService(r retrieval.Interface)
- func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error))
- func (db *DB) Storage() transaction.Storage
- func (db *DB) StorageRadius() uint8
- func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
- func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func())
- func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error)
- type Debugger
- type Info
- type LocalStore
- type NeighborhoodStat
- type NeighborhoodStats
- type NetStore
- type Options
- type PinIntegrity
- type PinIterator
- type PinStat
- type PinStore
- type PinningStat
- type PutterSession
- type RadiusChecker
- type Reserve
- type ReserveIterator
- type ReserveStat
- type ReserveStore
- type Sample
- type SampleItem
- type SampleStats
- type SessionInfo
- type Syncer
- type UploadStat
- type UploadStore
Constants ¶
const (
DefaultReserveCapacity = 1 << 22 // 4194304 chunks
)
Default options for levelDB.
const SampleSize = 16
Variables ¶
var ErrDBQuit = errors.New("db quit")
Functions ¶
func Compact ¶
Compact minimizes sharky disk usage by, using the current sharky locations from the storer, relocating chunks starting from the end of the used slots to the first available slots.
func ValidatePinCollectionChunks ¶
func ValidatePinCollectionChunks(ctx context.Context, basePath, pin, location string, opts *Options) error
ValidatePinCollectionChunks collects all chunk addresses that are present in a pin collection but are either invalid or missing altogether.
func ValidateReserve ¶ added in v2.1.0
Validate ensures that all retrievalIndex chunks are correctly stored in sharky.
Types ¶
type BinC ¶
BinC is the result returned from the SubscribeBin channel that contains the chunk address and the binID
type CacheStore ¶
type CacheStore interface { // Lookup method provides a storage.Getter wrapped around the underlying // ChunkStore which will update cache related indexes if required on successful // lookups. Lookup() storage.Getter // Cache method provides a storage.Putter which will add the chunks to cache. // This will add the chunk to underlying store as well as new indexes which // will keep track of the chunk in the cache. Cache() storage.Putter }
CacheStore is a logical component of the storer that deals with cache content.
type ChunkStoreStat ¶
type DB ¶
type DB struct {
// contains filtered or unexported fields
}
DB implements all the component stores described above.
func New ¶
New returns a newly constructed DB object which implements all the above component stores.
func (*DB) CacheShallowCopy ¶
func (db *DB) CacheShallowCopy(ctx context.Context, store transaction.Storage, addrs ...swarm.Address) error
CacheShallowCopy creates cache entries with the expectation that the chunk already exists in the chunkstore.
func (*DB) ChunkStore ¶
func (db *DB) ChunkStore() storage.ReadOnlyChunkStore
func (*DB) CommittedDepth ¶ added in v2.3.0
func (*DB) DeleteSession ¶
DeleteSession is the implementation of the UploadStore.DeleteSession method.
func (*DB) DirectUpload ¶
func (db *DB) DirectUpload() PutterSession
DirectUpload is the implementation of the NetStore.DirectUpload method.
func (*DB) EvictBatch ¶
EvictBatch evicts all chunks belonging to a batch from the reserve.
func (*DB) IteratePinCollection ¶
func (*DB) ListSessions ¶
func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error)
ListSessions is the implementation of the UploadStore.ListSessions method.
func (*DB) Metrics ¶
func (db *DB) Metrics() []prometheus.Collector
Metrics returns set of prometheus collectors.
func (*DB) NeighborhoodsStat ¶ added in v2.3.0
func (db *DB) NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error)
func (*DB) NewCollection ¶
func (db *DB) NewCollection(ctx context.Context) (PutterSession, error)
NewCollection is the implementation of the PinStore.NewCollection method.
func (*DB) NewSession ¶
func (db *DB) NewSession() (SessionInfo, error)
NewSession is the implementation of UploadStore.NewSession method.
func (*DB) PinIntegrity ¶
func (db *DB) PinIntegrity() *PinIntegrity
func (*DB) PusherFeed ¶
PusherFeed is the implementation of the NetStore.PusherFeed method.
func (*DB) Report ¶
Report implements the storage.PushReporter by wrapping the internal reporter with a transaction.
func (*DB) ReserveGet ¶
func (*DB) ReserveHas ¶
func (*DB) ReserveIterateChunks ¶
func (*DB) ReserveLastBinIDs ¶
ReserveLastBinIDs returns all of the highest binIDs from all the bins in the reserve and the epoch time of the reserve.
func (*DB) ReservePutter ¶
ReservePutter returns a Putter for inserting chunks into the reserve.
func (*DB) ReserveSample ¶
func (db *DB) ReserveSample( ctx context.Context, anchor []byte, committedDepth uint8, consensusTime uint64, minBatchBalance *big.Int, ) (Sample, error)
ReserveSample generates the sample of reserve storage of a node required for the storage incentives agent to participate in the lottery round. In order to generate this sample we need to iterate through all the chunks in the node's reserve and calculate the transformed hashes of all the chunks using the anchor as the salt. In order to generate the transformed hashes, we will use the std hmac keyed-hash implementation by using the anchor as the key. Nodes need to calculate the sample in the most optimal way and there are time restrictions. The lottery round is a time based round, so nodes participating in the round need to perform this calculation within the round limits. In order to optimize this we use a simple pipeline pattern: Iterate chunk addresses -> Get the chunk data and calculate transformed hash -> Assemble the sample If the node has doubled their capacity by some factor, sampling process need to only pertain to the chunks of the selected neighborhood as determined by the anchor and the "committed depth" and NOT the whole reseve. The committed depth is the sum of the radius and the doubling factor. For example, the committed depth is 11, but the local node has a doubling factor of 3, so the local radius will eventually drop to 8. The sampling must only consider chunks with proximity 11 to the anchor.
func (*DB) ReserveSize ¶
func (*DB) ReserveSizeWithinRadius ¶
func (*DB) ResetReserve ¶ added in v2.2.0
Reset removes all entries
func (*DB) Session ¶
func (db *DB) Session(tagID uint64) (SessionInfo, error)
Session is the implementation of the UploadStore.Session method.
func (*DB) SetRetrievalService ¶
func (*DB) StartReserveWorker ¶
func (*DB) Storage ¶ added in v2.1.0
func (db *DB) Storage() transaction.Storage
func (*DB) StorageRadius ¶
func (*DB) SubscribeBin ¶
func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
SubscribeBin returns a channel that feeds all the chunks in the reserve from a certain bin between a start and end binIDs.
func (*DB) SubscribePush ¶
type Info ¶
type Info struct { Upload UploadStat Pinning PinningStat Cache CacheStat Reserve ReserveStat ChunkStore ChunkStoreStat }
type LocalStore ¶
type LocalStore interface {
ChunkStore() storage.ReadOnlyChunkStore
}
LocalStore is a read-only ChunkStore. It can be used to check if chunk is known locally, but it cannot tell what is the context of the chunk (whether it is pinned, uploaded, etc.).
type NeighborhoodStat ¶ added in v2.3.0
type NeighborhoodStat struct { Neighborhood swarm.Neighborhood ReserveSizeWithinRadius int Proximity uint8 }
type NeighborhoodStats ¶ added in v2.3.0
type NeighborhoodStats interface {
NeighborhoodsStat(ctx context.Context) ([]*NeighborhoodStat, error)
}
type NetStore ¶
type NetStore interface { // DirectUpload provides a session which can be used to push chunks directly // to the network. DirectUpload() PutterSession // Download provides a getter which can be used to download data. If the data // is found locally, its returned immediately, otherwise it is retrieved from // the network. Download(cache bool) storage.Getter // PusherFeed is the feed for direct push chunks. This can be used by the // pusher component to push out the chunks. PusherFeed() <-chan *pusher.Op }
NetStore is a logical component of the storer that deals with network. It will push/retrieve chunks from the network.
type Options ¶
type Options struct { // These are options related to levelDB. Currently, the underlying storage used is levelDB. LdbStats atomic.Pointer[prometheus.HistogramVec] LdbOpenFilesLimit uint64 LdbBlockCacheCapacity uint64 LdbWriteBufferSize uint64 LdbDisableSeeksCompaction bool Logger log.Logger Tracer *tracing.Tracer Address swarm.Address WarmupDuration time.Duration Batchstore postage.Storer ValidStamp postage.ValidStampFn RadiusSetter topology.SetStorageRadiuser StateStore storage.StateStorer ReserveCapacity int ReserveWakeUpDuration time.Duration ReserveMinEvictCount uint64 ReserveCapacityDoubling int CacheCapacity uint64 CacheMinEvictCount uint64 MinimumStorageRadius uint }
Options provides a container to configure different things in the storer.
type PinIntegrity ¶
type PinIterator ¶
type PinIterator interface {
IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error
}
PinIterator is a helper interface which can be used to iterate over all the chunks in a pinning collection.
type PinStore ¶
type PinStore interface { // NewCollection can be used to create a new PutterSession which writes a new // pinning collection. The address passed in during the Done of the session is // used as the root referencce. NewCollection(context.Context) (PutterSession, error) // DeletePin deletes all the chunks associated with the collection pointed to // by the swarm.Address passed in. DeletePin(context.Context, swarm.Address) error // Pins returns all the root references of pinning collections. Pins() ([]swarm.Address, error) // HasPin is a helper which checks if a collection exists with the root // reference passed in. HasPin(swarm.Address) (bool, error) }
PinStore is a logical component of the storer which deals with pinning functionality.
type PinningStat ¶
type PutterSession ¶
type PutterSession interface { storage.Putter // Done is used to close the session and optionally assign a swarm.Address to // this session. Done(swarm.Address) error // Cleanup is used to cleanup any state related to this session in case of // any error. Cleanup() error }
PutterSession provides a session around the storage.Putter. The session on successful completion commits all the operations or in case of error, rolls back the state.
type RadiusChecker ¶
type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 CommittedDepth() uint8 }
RadiusChecker provides the radius related functionality.
type Reserve ¶
type Reserve interface { ReserveStore EvictBatch(ctx context.Context, batchID []byte) error ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error) ReserveSize() int }
Reserve is a logical component of the storer that deals with reserve content. It will implement all the core functionality required for the protocols.
type ReserveIterator ¶
ReserveIterator is a helper interface which can be used to iterate over all the chunks in the reserve.
type ReserveStat ¶
type ReserveStore ¶
type ReserveStore interface { ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (swarm.Chunk, error) ReserveHas(addr swarm.Address, batchID []byte, stampHash []byte) (bool, error) ReservePutter() storage.Putter SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) ReserveLastBinIDs() ([]uint64, uint64, error) RadiusChecker }
ReserveStore is a logical component of the storer that deals with reserve content. It will implement all the core functionality required for the protocols.
type Sample ¶
type Sample struct { Stats SampleStats Items []SampleItem }
func MakeSampleUsingChunks ¶
MakeSampleUsingChunks returns Sample constructed using supplied chunks.
type SampleItem ¶
type SampleStats ¶
type SampleStats struct { TotalDuration time.Duration TotalIterated int64 IterationDuration time.Duration SampleInserts int64 NewIgnored int64 InvalidStamp int64 BelowBalanceIgnored int64 TaddrDuration time.Duration ValidStampDuration time.Duration BatchesBelowValueDuration time.Duration RogueChunk int64 ChunkLoadDuration time.Duration ChunkLoadFailed int64 StampLoadFailed int64 }
type SessionInfo ¶
SessionInfo is a type which exports the storer tag object. This object stores all the relevant information about a particular session.
type UploadStat ¶
type UploadStore ¶
type UploadStore interface { // Upload provides a PutterSession which is tied to the tagID. Optionally if // users requests to pin the data, a new pinning collection is created. Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) // NewSession can be used to obtain a tag ID to use for a new Upload session. NewSession() (SessionInfo, error) // Session will show the information about the session. Session(tagID uint64) (SessionInfo, error) // DeleteSession will delete the session info associated with the tag id. DeleteSession(tagID uint64) error // ListSessions will list all the Sessions currently being tracked. ListSessions(offset, limit int) ([]SessionInfo, error) }
UploadStore is a logical component of the storer which deals with the upload of data to swarm.