Documentation ¶
Overview ¶
Copyright 2016 The go-ethereum Authors This file is part of the go-ethereum library.
The go-ethereum library is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
The go-ethereum library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
Index ¶
- Constants
- Variables
- func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool, tag *chunk.Tag) *hasherStore
- type Address
- func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, ...) (Address, func(context.Context) error, error)
- func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter, ...) (Address, func(context.Context) error, error)
- func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error)
- type AddressCollection
- type Chunk
- type ChunkData
- type ChunkStore
- type ChunkValidator
- type ChunkerParams
- type ContentAddressValidator
- type FakeChunkStore
- func (f *FakeChunkStore) Close() error
- func (f *FakeChunkStore) Get(_ context.Context, _ chunk.ModeGet, ref Address) (Chunk, error)
- func (f *FakeChunkStore) GetMulti(_ context.Context, _ chunk.ModeGet, refs ...Address) ([]Chunk, error)
- func (f *FakeChunkStore) Has(_ context.Context, ref Address) (bool, error)
- func (f *FakeChunkStore) HasMulti(_ context.Context, refs ...Address) ([]bool, error)
- func (f *FakeChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error)
- func (f *FakeChunkStore) Put(_ context.Context, _ chunk.ModePut, ch ...Chunk) ([]bool, error)
- func (f *FakeChunkStore) Set(ctx context.Context, mode chunk.ModeSet, addrs ...chunk.Address) (err error)
- func (f *FakeChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func())
- type Fetcher
- type FileStore
- func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader) (addrs AddressCollection, err error)
- func (f *FileStore) HashSize() int
- func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool)
- func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error)
- type FileStoreParams
- type Getter
- type HashWithLength
- type JoinerParams
- type LNetStore
- type LazyChunkReader
- func (r *LazyChunkReader) Context() context.Context
- func (r *LazyChunkReader) Read(b []byte) (read int, err error)
- func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error)
- func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error)
- func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, err error)
- type LazySectionReader
- type LazyTestSectionReader
- type NetStore
- func (n *NetStore) Close() error
- func (n *NetStore) Get(ctx context.Context, mode chunk.ModeGet, req *Request) (ch Chunk, err error)
- func (n *NetStore) GetOrCreateFetcher(ctx context.Context, ref Address, interestedParty string) (f *Fetcher, loaded bool, ok bool)
- func (n *NetStore) Has(ctx context.Context, ref Address) (bool, error)
- func (n *NetStore) Put(ctx context.Context, mode chunk.ModePut, chs ...Chunk) ([]bool, error)
- func (n *NetStore) RemoteFetch(ctx context.Context, req *Request, fi *Fetcher) (chunk.Chunk, error)
- type Putter
- type PyramidChunker
- func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error)
- func (pc *PyramidChunker) Join(addr Address, getter Getter, depth int) LazySectionReader
- func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error)
- type PyramidSplitterParams
- type Reference
- type RemoteGetFunc
- type Request
- type SplitterParams
- type StoreParams
- type SwarmHash
- type SwarmHasher
- type TreeChunker
- type TreeEntry
- type TreeSplitterParams
Constants ¶
const ( ErrInit = iota ErrNotFound ErrInvalidValue ErrDataOverflow ErrNothingToReturn ErrInvalidSignature ErrNotSynced )
const ( BMTHash = "BMT" SHA3Hash = "SHA3" // http://golang.org/pkg/hash/#Hash DefaultHash = BMTHash )
const AddressLength = chunk.AddressLength
AddressLength is the same as chunk.AddressLength for backward compatibility.
const (
ChunkProcessors = 8
)
const MaxPO = chunk.MaxPO
MaxPO is the same as chunk.MaxPO for backward compatibility.
Variables ¶
var ( ErrChunkNotFound = chunk.ErrChunkNotFound ErrChunkInvalid = chunk.ErrChunkNotFound )
Errors are the same as the ones in chunk package for backward compatibility.
var (
ErrNoSuitablePeer = errors.New("no suitable peer")
)
var NewChunk = chunk.NewChunk
NewChunk is the same as chunk.NewChunk for backward compatibility.
var Proximity = chunk.Proximity
Proximity is the same as chunk.Proximity for backward compatibility.
var ZeroAddr = chunk.ZeroAddr
ZeroAddr is the same as chunk.ZeroAddr for backward compatibility.
Functions ¶
func NewHasherStore ¶
func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool, tag *chunk.Tag) *hasherStore
NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces. With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore and the hasherStore will take core of encryption/decryption of data if necessary
Types ¶
type Address ¶
Address is an alias for chunk.Address for backward compatibility.
func PyramidAppend ¶
func PyramidSplit ¶
func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter, tag *chunk.Tag) (Address, func(context.Context) error, error)
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Address), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides.
func TreeSplit ¶
func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error)
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides.
type AddressCollection ¶
type AddressCollection []Address
func NewAddressCollection ¶
func NewAddressCollection(l int) AddressCollection
func (AddressCollection) Len ¶
func (c AddressCollection) Len() int
func (AddressCollection) Less ¶
func (c AddressCollection) Less(i, j int) bool
func (AddressCollection) Swap ¶
func (c AddressCollection) Swap(i, j int)
type Chunk ¶
Chunk is an alias for chunk.Chunk for backward compatibility.
func GenerateRandomChunk ¶
func GenerateRandomChunks ¶
type ChunkStore ¶
type ChunkValidator ¶
type ChunkerParams ¶
type ChunkerParams struct {
// contains filtered or unexported fields
}
type ContentAddressValidator ¶
type ContentAddressValidator struct {
Hasher SwarmHasher
}
Provides method for validation of content address in chunks Holds the corresponding hasher to create the address
func NewContentAddressValidator ¶
func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator
Constructor
func (*ContentAddressValidator) Validate ¶
func (v *ContentAddressValidator) Validate(ch Chunk) bool
Validate that the given key is a valid content address for the given data
type FakeChunkStore ¶
type FakeChunkStore struct { }
FakeChunkStore doesn't store anything, just implements the ChunkStore interface It can be used to inject into a hasherStore if you don't want to actually store data just do the hashing
func (*FakeChunkStore) Close ¶
func (f *FakeChunkStore) Close() error
Close doesn't store anything it is just here to implement ChunkStore
func (*FakeChunkStore) HasMulti ¶ added in v0.5.0
HasMulti doesn't do anything it is just here to implement ChunkStore
func (*FakeChunkStore) LastPullSubscriptionBinID ¶
func (f *FakeChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error)
func (*FakeChunkStore) SubscribePull ¶
func (f *FakeChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func())
type Fetcher ¶ added in v0.4.2
type Fetcher struct { Delivered chan struct{} // when closed, it means that the chunk this Fetcher refers to is delivered Chunk chunk.Chunk // the delivered chunk data CreatedAt time.Time // timestamp when the fetcher was created, used for metrics measuring lifetime of fetchers CreatedBy string // who created the fetcher - "request" or "syncing", used for metrics measuring lifecycle of fetchers RequestedBySyncer bool // whether we have issued at least once a request through Offered/Wanted hashes flow // contains filtered or unexported fields }
Fetcher is a struct which maintains state of remote requests. Fetchers are stored in fetchers map and signal to all interested parties if a given chunk is delivered the mutex controls who closes the channel, and make sure we close the channel only once
func NewFetcher ¶ added in v0.4.2
func NewFetcher() *Fetcher
NewFetcher is a constructor for a Fetcher
func (*Fetcher) SafeClose ¶ added in v0.4.2
SafeClose signals to interested parties (those waiting for a signal on fi.Delivered) that a chunk is delivered. It sets the delivered chunk data to the fi.Chunk field, then closes the fi.Delivered channel through the sync.Once object, because it is possible for a chunk to be delivered multiple times concurrently.
type FileStore ¶
type FileStore struct { ChunkStore // contains filtered or unexported fields }
func NewFileStore ¶
func NewFileStore(store ChunkStore, putterStore ChunkStore, params *FileStoreParams, tags *chunk.Tags) *FileStore
func NewLocalFileStore ¶
func NewLocalFileStore(datadir string, basekey []byte, tags *chunk.Tags) (*FileStore, func(), error)
for testing locally
func (*FileStore) GetAllReferences ¶
func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader) (addrs AddressCollection, err error)
GetAllReferences is a public API. This endpoint returns all chunk hashes (only) for a given file
func (*FileStore) Retrieve ¶
func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool)
Retrieve is a public API. Main entry point for document retrieval directly. Used by the FS-aware API and httpaccess Chunk retrieval blocks on netStore requests with a timeout so reader will report error if retrieval of chunks within requested range time out. It returns a reader with the chunk data and whether the content was encrypted
type FileStoreParams ¶
type FileStoreParams struct {
Hash string
}
func NewFileStoreParams ¶
func NewFileStoreParams() *FileStoreParams
type HashWithLength ¶
func (*HashWithLength) SetSpanBytes ¶ added in v0.5.6
func (h *HashWithLength) SetSpanBytes(length []byte)
type JoinerParams ¶
type JoinerParams struct { ChunkerParams // contains filtered or unexported fields }
type LNetStore ¶ added in v0.4.2
type LNetStore struct {
*NetStore
}
LNetStore is a wrapper of NetStore, which implements the chunk.Store interface. It is used only by the FileStore, the component used by the Swarm API to store and retrieve content and to split and join chunks.
func NewLNetStore ¶ added in v0.4.2
NewLNetStore is a constructor for LNetStore
type LazyChunkReader ¶
type LazyChunkReader struct {
// contains filtered or unexported fields
}
LazyChunkReader implements LazySectionReader
func TreeJoin ¶
Join reconstructs original content based on a root key. When joining, the caller gets returned a Lazy SectionReader, which is seekable and implements on-demand fetching of chunks as and where it is read. New chunks to retrieve are coming from the getter, which the caller provides. If an error is encountered during joining, it appears as a reader error. The SectionReader. As a result, partial reads from a document are possible even if other parts are corrupt or lost. The chunks are not meant to be validated by the chunker when joining. This is because it is left to the DPA to decide which sources are trusted.
func (*LazyChunkReader) Context ¶
func (r *LazyChunkReader) Context() context.Context
func (*LazyChunkReader) Read ¶
func (r *LazyChunkReader) Read(b []byte) (read int, err error)
Read keeps a cursor so cannot be called simulateously, see ReadAt
func (*LazyChunkReader) ReadAt ¶
func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error)
read at can be called numerous times concurrent reads are allowed Size() needs to be called synchronously on the LazyChunkReader first
type LazySectionReader ¶
type LazySectionReader interface { Context() context.Context Size(context.Context, chan bool) (int64, error) io.Seeker io.Reader io.ReaderAt }
Size, Seek, Read, ReadAt
type LazyTestSectionReader ¶
type LazyTestSectionReader struct {
*io.SectionReader
}
func (*LazyTestSectionReader) Context ¶
func (r *LazyTestSectionReader) Context() context.Context
type NetStore ¶
type NetStore struct { chunk.Store LocalID enode.ID // our local enode - used when issuing RetrieveRequests RemoteGet RemoteGetFunc // contains filtered or unexported fields }
NetStore is an extension of LocalStore it implements the ChunkStore interface on request it initiates remote cloud retrieval
func NewNetStore ¶
NewNetStore creates a new NetStore using the provided chunk.Store and localID of the node.
func (*NetStore) Get ¶
Get retrieves a chunk If it is not found in the LocalStore then it uses RemoteGet to fetch from the network.
func (*NetStore) GetOrCreateFetcher ¶ added in v0.4.2
func (n *NetStore) GetOrCreateFetcher(ctx context.Context, ref Address, interestedParty string) (f *Fetcher, loaded bool, ok bool)
GetOrCreateFetcher returns the Fetcher for a given chunk, if this chunk is not in the LocalStore. If the chunk is in the LocalStore, it returns nil for the Fetcher and ok == false
func (*NetStore) Has ¶ added in v0.4.2
Has is the storage layer entry point to query the underlying database to return if it has a chunk or not.
func (*NetStore) Put ¶
Put stores a chunk in localstore, and delivers to all requestor peers using the fetcher stored in the fetchers cache
func (*NetStore) RemoteFetch ¶ added in v0.4.2
RemoteFetch is handling the retry mechanism when making a chunk request to our peers. For a given chunk Request, we call RemoteGet, which selects the next eligible peer and issues a RetrieveRequest and we wait for a delivery. If a delivery doesn't arrive within the SearchTimeout we retry.
type Putter ¶
type Putter interface { Put(context.Context, ChunkData) (Reference, error) // RefSize returns the length of the Reference created by this Putter RefSize() int64 // Close is to indicate that no more chunk data will be Put on this Putter Close() // Wait returns if all data has been store and the Close() was called. Wait(context.Context) error }
Putter is responsible to store data and create a reference for it
type PyramidChunker ¶
type PyramidChunker struct {
// contains filtered or unexported fields
}
func NewPyramidSplitter ¶
func NewPyramidSplitter(params *PyramidSplitterParams, tag *chunk.Tag) (pc *PyramidChunker)
func (*PyramidChunker) Join ¶
func (pc *PyramidChunker) Join(addr Address, getter Getter, depth int) LazySectionReader
type PyramidSplitterParams ¶
type PyramidSplitterParams struct { SplitterParams // contains filtered or unexported fields }
type RemoteGetFunc ¶ added in v0.4.2
type Request ¶ added in v0.4.2
type Request struct { Addr Address // chunk address Origin enode.ID // who is sending us that request? we compare Origin to the suggested peer from RequestFromPeers PeersToSkip sync.Map // peers not to request chunk from }
Request encapsulates all the necessary arguments when making a request to NetStore. These could have also been added as part of the interface of NetStore.Get, but a request struct seemed like a better option
func NewRequest ¶ added in v0.4.2
NewRequest returns a new instance of Request based on chunk address skip check and a map of peers to skip.
type SplitterParams ¶
type SplitterParams struct { ChunkerParams // contains filtered or unexported fields }
type StoreParams ¶
type StoreParams struct { Hash SwarmHasher `toml:"-"` DbCapacity uint64 CacheCapacity uint BaseKey []byte }
func NewDefaultStoreParams ¶
func NewDefaultStoreParams() *StoreParams
func NewStoreParams ¶
func NewStoreParams(ldbCap uint64, cacheCap uint, hash SwarmHasher, basekey []byte) *StoreParams
type SwarmHasher ¶
type SwarmHasher func() SwarmHash
func MakeHashFunc ¶
func MakeHashFunc(hash string) SwarmHasher
type TreeChunker ¶
type TreeChunker struct {
// contains filtered or unexported fields
}
func NewTreeJoiner ¶
func NewTreeJoiner(params *JoinerParams) *TreeChunker
func NewTreeSplitter ¶
func NewTreeSplitter(params *TreeSplitterParams) *TreeChunker
func (*TreeChunker) Join ¶
func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader
type TreeEntry ¶
type TreeEntry struct {
// contains filtered or unexported fields
}
Entry to create a tree node
func NewTreeEntry ¶
func NewTreeEntry(pyramid *PyramidChunker) *TreeEntry
type TreeSplitterParams ¶
type TreeSplitterParams struct { SplitterParams // contains filtered or unexported fields }
Source Files ¶
Directories ¶
Path | Synopsis |
---|---|
Package feeds defines Swarm Feeds.
|
Package feeds defines Swarm Feeds. |
lookup
Package lookup defines feed lookup algorithms and provides tools to place updates so they can be found
|
Package lookup defines feed lookup algorithms and provides tools to place updates so they can be found |
Package localstore provides disk storage layer for Swarm Chunk persistence.
|
Package localstore provides disk storage layer for Swarm Chunk persistence. |
Package mock defines types that are used by different implementations of mock storages.
|
Package mock defines types that are used by different implementations of mock storages. |
db
Package db implements a mock store that keeps all chunk data in LevelDB database.
|
Package db implements a mock store that keeps all chunk data in LevelDB database. |
mem
Package mem implements a mock store that keeps all chunk data in memory.
|
Package mem implements a mock store that keeps all chunk data in memory. |
rpc
Package rpc implements an RPC client that connect to a centralized mock store.
|
Package rpc implements an RPC client that connect to a centralized mock store. |
test
Package test provides functions that are used for testing GlobalStorer implementations.
|
Package test provides functions that are used for testing GlobalStorer implementations. |