Documentation ¶
Index ¶
- Constants
- Variables
- type BinC
- type CacheStat
- type CacheStore
- type ChunkStoreStat
- type DB
- func (db *DB) BatchHint(address swarm.Address) ([]byte, error)
- func (db *DB) Cache() storage.Putter
- func (db *DB) ChunkStore() storage.ReadOnlyChunkStore
- func (db *DB) Close() error
- func (db *DB) DebugInfo(ctx context.Context) (Info, error)
- func (db *DB) DeletePin(ctx context.Context, root swarm.Address) (err error)
- func (db *DB) DeleteSession(tagID uint64) error
- func (db *DB) DirectUpload() PutterSession
- func (db *DB) Do(ctx context.Context, op func(internal.Storage) error) error
- func (db *DB) Download(cache bool) storage.Getter
- func (db *DB) EvictBatch(ctx context.Context, batchID []byte) (err error)
- func (db *DB) HasPin(root swarm.Address) (has bool, err error)
- func (db *DB) IsWithinStorageRadius(addr swarm.Address) bool
- func (db *DB) IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error
- func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error)
- func (db *DB) Lookup() storage.Getter
- func (db *DB) Metrics() []prometheus.Collector
- func (db *DB) NewCollection(ctx context.Context) (PutterSession, error)
- func (db *DB) NewSession() (SessionInfo, error)
- func (db *DB) Pins() (address []swarm.Address, err error)
- func (db *DB) PusherFeed() <-chan *pusher.Op
- func (db *DB) Report(ctx context.Context, chunk swarm.Chunk, state storage.ChunkState) error
- func (db *DB) ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte) (chunk swarm.Chunk, err error)
- func (db *DB) ReserveHas(addr swarm.Address, batchID []byte) (has bool, err error)
- func (db *DB) ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error
- func (db *DB) ReserveLastBinIDs() ([]uint64, error)
- func (db *DB) ReservePutter() storage.Putter
- func (db *DB) ReserveSample(ctx context.Context, anchor []byte, storageRadius uint8, consensusTime uint64, ...) (Sample, error)
- func (db *DB) ReserveSize() int
- func (db *DB) Session(tagID uint64) (SessionInfo, error)
- func (db *DB) SetRetrievalService(r retrieval.Interface)
- func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error))
- func (db *DB) StorageRadius() uint8
- func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
- func (db *DB) SubscribePush(ctx context.Context) (<-chan swarm.Chunk, func())
- func (db *DB) Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error)
- type Debugger
- type Info
- type LocalStore
- type NetStore
- type Options
- type PinIterator
- type PinStore
- type PinningStat
- type PutterSession
- type RadiusChecker
- type Reserve
- type ReserveIterator
- type ReserveStat
- type ReserveStore
- type Sample
- type SampleItem
- type SampleStats
- type SessionInfo
- type Syncer
- type UploadStat
- type UploadStore
Constants ¶
const SampleSize = 8
Variables ¶
var ErrDBQuit = errors.New("db quit")
Functions ¶
This section is empty.
Types ¶
type BinC ¶
BinC is the result returned from the SubscribeBin channel that contains the chunk address and the binID
type CacheStore ¶
type CacheStore interface { // Lookup method provides a storage.Getter wrapped around the underlying // ChunkStore which will update cache related indexes if required on successful // lookups. Lookup() storage.Getter // Cache method provides a storage.Putter which will add the chunks to cache. // This will add the chunk to underlying store as well as new indexes which // will keep track of the chunk in the cache. Cache() storage.Putter }
CacheStore is a logical component of the storer that deals with cache content.
type ChunkStoreStat ¶
type ChunkStoreStat struct { TotalChunks int }
type DB ¶
type DB struct {
// contains filtered or unexported fields
}
DB implements all the component stores described above.
func New ¶
New returns a newly constructed DB object which implements all the above component stores.
func (*DB) ChunkStore ¶
func (db *DB) ChunkStore() storage.ReadOnlyChunkStore
func (*DB) DeleteSession ¶
DeleteSession is the implementation of the UploadStore.DeleteSession method.
func (*DB) DirectUpload ¶
func (db *DB) DirectUpload() PutterSession
DirectUpload is the implementation of the NetStore.DirectUpload method.
func (*DB) EvictBatch ¶
EvictBatch evicts all chunks belonging to a batch from the reserve.
func (*DB) IteratePinCollection ¶
func (*DB) ListSessions ¶
func (db *DB) ListSessions(offset, limit int) ([]SessionInfo, error)
ListSessions is the implementation of the UploadStore.ListSessions method.
func (*DB) Metrics ¶
func (db *DB) Metrics() []prometheus.Collector
Metrics returns set of prometheus collectors.
func (*DB) NewCollection ¶
func (db *DB) NewCollection(ctx context.Context) (PutterSession, error)
NewCollection is the implementation of the PinStore.NewCollection method.
func (*DB) NewSession ¶
func (db *DB) NewSession() (SessionInfo, error)
NewSession is the implementation of UploadStore.NewSession method.
func (*DB) PusherFeed ¶
PusherFeed is the implementation of the NetStore.PusherFeed method.
func (*DB) Report ¶
Report implements the storage.PushReporter by wrapping the internal reporter with a transaction.
func (*DB) ReserveGet ¶
func (*DB) ReserveHas ¶
func (*DB) ReserveIterateChunks ¶
func (*DB) ReserveLastBinIDs ¶
ReserveLastBinIDs returns all of the highest binIDs from all the bins in the reserve.
func (*DB) ReservePutter ¶
ReservePutter returns a Putter for inserting chunks into the reserve.
func (*DB) ReserveSample ¶
func (db *DB) ReserveSample( ctx context.Context, anchor []byte, storageRadius uint8, consensusTime uint64, minBatchBalance *big.Int, ) (Sample, error)
ReserveSample generates the sample of reserve storage of a node required for the storage incentives agent to participate in the lottery round. In order to generate this sample we need to iterate through all the chunks in the node's reserve and calculate the transformed hashes of all the chunks using the anchor as the salt. In order to generate the transformed hashes, we will use the std hmac keyed-hash implementation by using the anchor as the key. Nodes need to calculate the sample in the most optimal way and there are time restrictions. The lottery round is a time based round, so nodes participating in the round need to perform this calculation within the round limits. In order to optimize this we use a simple pipeline pattern: Iterate chunk addresses -> Get the chunk data and calculate transformed hash -> Assemble the sample
func (*DB) ReserveSize ¶
func (*DB) Session ¶
func (db *DB) Session(tagID uint64) (SessionInfo, error)
Session is the implementation of the UploadStore.Session method.
func (*DB) SetRetrievalService ¶
func (*DB) StartReserveWorker ¶
func (*DB) StorageRadius ¶
func (*DB) SubscribeBin ¶
func (db *DB) SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
SubscribeBin returns a channel that feeds all the chunks in the reserve from a certain bin between a start and end binIDs.
func (*DB) SubscribePush ¶
type Info ¶
type Info struct { Upload UploadStat Pinning PinningStat Cache CacheStat Reserve ReserveStat ChunkStore ChunkStoreStat }
type LocalStore ¶
type LocalStore interface {
ChunkStore() storage.ReadOnlyChunkStore
}
LocalStore is a read-only ChunkStore. It can be used to check if chunk is known locally, but it cannot tell what is the context of the chunk (whether it is pinned, uploaded, etc.).
type NetStore ¶
type NetStore interface { // DirectUpload provides a session which can be used to push chunks directly // to the network. DirectUpload() PutterSession // Download provides a getter which can be used to download data. If the data // is found locally, its returned immediately, otherwise it is retrieved from // the network. Download(pin bool) storage.Getter // PusherFeed is the feed for direct push chunks. This can be used by the // pusher component to push out the chunks. PusherFeed() <-chan *pusher.Op }
NetStore is a logical component of the storer that deals with network. It will push/retrieve chunks from the network.
type Options ¶
type Options struct { // These are options related to levelDB. Currently the underlying storage used // is levelDB. LdbOpenFilesLimit uint64 LdbBlockCacheCapacity uint64 LdbWriteBufferSize uint64 LdbDisableSeeksCompaction bool CacheCapacity uint64 Logger log.Logger Address swarm.Address WarmupDuration time.Duration Batchstore postage.Storer ValidStamp postage.ValidStampFn RadiusSetter topology.SetStorageRadiuser StateStore storage.StateStorer ReserveCapacity int ReserveWakeUpDuration time.Duration }
Options provides a container to configure different things in the storer.
type PinIterator ¶
type PinIterator interface {
IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error
}
PinIterator is a helper interface which can be used to iterate over all the chunks in a pinning collection.
type PinStore ¶
type PinStore interface { // NewCollection can be used to create a new PutterSession which writes a new // pinning collection. The address passed in during the Done of the session is // used as the root referencce. NewCollection(context.Context) (PutterSession, error) // DeletePin deletes all the chunks associated with the collection pointed to // by the swarm.Address passed in. DeletePin(context.Context, swarm.Address) error // Pins returns all the root references of pinning collections. Pins() ([]swarm.Address, error) // HasPin is a helper which checks if a collection exists with the root // reference passed in. HasPin(swarm.Address) (bool, error) }
PinStore is a logical component of the storer which deals with pinning functionality.
type PinningStat ¶
type PutterSession ¶
type PutterSession interface { storage.Putter // Done is used to close the session and optionally assign a swarm.Address to // this session. Done(swarm.Address) error // Cleanup is used to cleanup any state related to this session in case of // any error. Cleanup() error }
PutterSession provides a session around the storage.Putter. The session on successful completion commits all the operations or in case of error, rolls back the state.
type RadiusChecker ¶
type RadiusChecker interface { IsWithinStorageRadius(addr swarm.Address) bool StorageRadius() uint8 }
RadiusChecker provides the radius related functionality.
type Reserve ¶
type Reserve interface { ReserveStore EvictBatch(ctx context.Context, batchID []byte) error ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error) ReserveSize() int }
Reserve is a logical component of the storer that deals with reserve content. It will implement all the core functionality required for the protocols.
type ReserveIterator ¶
ReserveIterator is a helper interface which can be used to iterate over all the chunks in the reserve.
type ReserveStat ¶
type ReserveStore ¶
type ReserveStore interface { ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte) (swarm.Chunk, error) ReserveHas(addr swarm.Address, batchID []byte) (bool, error) ReservePutter() storage.Putter SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error) ReserveLastBinIDs() ([]uint64, error) RadiusChecker }
ReserveStore is a logical component of the storer that deals with reserve content. It will implement all the core functionality required for the protocols.
type Sample ¶
type Sample struct { Stats SampleStats Items []SampleItem }
type SampleItem ¶
type SampleStats ¶
type SampleStats struct { TotalDuration time.Duration TotalIterated int64 IterationDuration time.Duration SampleInserts int64 NewIgnored int64 InvalidStamp int64 BelowBalanceIgnored int64 HmacrDuration time.Duration ValidStampDuration time.Duration BatchesBelowValueDuration time.Duration RogueChunk int64 ChunkLoadDuration time.Duration ChunkLoadFailed int64 StampLoadFailed int64 }
type SessionInfo ¶
SessionInfo is a type which exports the storer tag object. This object stores all the relevant information about a particular session.
type UploadStat ¶
type UploadStore ¶
type UploadStore interface { // Upload provides a PutterSession which is tied to the tagID. Optionally if // users requests to pin the data, a new pinning collection is created. Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error) // NewSession can be used to obtain a tag ID to use for a new Upload session. NewSession() (SessionInfo, error) // Session will show the information about the session. Session(tagID uint64) (SessionInfo, error) // DeleteSession will delete the session info associated with the tag id. DeleteSession(tagID uint64) error // ListSessions will list all the Sessions currently being tracked. ListSessions(offset, limit int) ([]SessionInfo, error) // BatchHint will return the batch ID hint for the chunk reference if known. BatchHint(swarm.Address) ([]byte, error) }
UploadStore is a logical component of the storer which deals with the upload of data to swarm.