Documentation ¶
Overview ¶
Copyright 2016 The go-ethereum Authors This file is part of the go-ethereum library.
The go-ethereum library is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
The go-ethereum library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
Index ¶
- Constants
- Variables
- func AddrBucket(addr []byte) []byte
- func BytesToU64(data []byte) uint64
- func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore
- func StringSliceEqualBCE(a, b []byte) bool
- func U64ToBytes(val uint64) []byte
- type Address
- func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, ...) (Address, func(context.Context) error, error)
- func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error)
- func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error)
- type AddressCollection
- type Chunk
- type ChunkData
- type ChunkStore
- type ChunkValidator
- type ChunkerParams
- type ContentAddressValidator
- type DataCache
- type FakeChunkStore
- func (f *FakeChunkStore) Close()
- func (f *FakeChunkStore) Get(_ context.Context, ref Address) (Chunk, error)
- func (f *FakeChunkStore) Has(_ context.Context, ref Address) bool
- func (f *FakeChunkStore) Put(_ context.Context, ch Chunk) error
- func (f *FakeChunkStore) Validate(ch []byte, addr Address) bool
- type FileStore
- func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error)
- func (f *FileStore) HashSize() int
- func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool)
- func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error)
- type FileStoreParams
- type Getter
- type HashWithLength
- type JoinerParams
- type LDBDatabase
- func (db *LDBDatabase) Close()
- func (db *LDBDatabase) CompactRange(r util.Range) error
- func (db *LDBDatabase) Delete(key []byte) error
- func (db *LDBDatabase) Get(key []byte) ([]byte, error)
- func (db *LDBDatabase) NewIterator() iterator.Iterator
- func (db *LDBDatabase) Put(key []byte, value []byte) error
- func (db *LDBDatabase) Write(batch *leveldb.Batch) error
- type LDBStore
- func (s *LDBStore) BinIndex(po uint8) uint64
- func (s *LDBStore) CleanGCIndex() error
- func (s *LDBStore) Cleanup(f func(Chunk) bool)
- func (s *LDBStore) Close()
- func (s *LDBStore) Delete(addr Address) error
- func (s *LDBStore) Export(out io.Writer) (int64, error)
- func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error)
- func (s *LDBStore) GetSchema() (string, error)
- func (s *LDBStore) Has(_ context.Context, addr Address) bool
- func (s *LDBStore) Import(in io.Reader) (int64, error)
- func (s *LDBStore) MarkAccessed(addr Address)
- func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error
- func (s *LDBStore) PutSchema(schema string) error
- func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error
- func (s *LDBStore) VerifyHash(chunkData, address []byte) bool
- type LDBStoreParams
- type LazyChunkReader
- func (r *LazyChunkReader) Context() context.Context
- func (r *LazyChunkReader) Read(b []byte) (read int, err error)
- func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error)
- func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error)
- func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, err error)
- type LazySectionReader
- type LazyTestSectionReader
- type LocalStore
- func (ls *LocalStore) BinIndex(po uint8) uint64
- func (ls *LocalStore) Close()
- func (ls *LocalStore) FetchFunc(ctx context.Context, addr Address) func(context.Context) error
- func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk Chunk, err error)
- func (ls *LocalStore) Has(ctx context.Context, addr Address) bool
- func (ls *LocalStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error
- func (ls *LocalStore) Migrate() error
- func (ls *LocalStore) Put(ctx context.Context, chunk Chunk) error
- func (ls *LocalStore) Validate(ch []byte, addr Address) bool
- type LocalStoreParams
- type MemStore
- type NetFetcher
- type NetStore
- func (n *NetStore) BinIndex(po uint8) uint64
- func (n *NetStore) Close()
- func (n *NetStore) FetchFunc(ctx context.Context, ref Address) func(context.Context) error
- func (n *NetStore) Get(rctx context.Context, ref Address) (Chunk, error)
- func (n *NetStore) Has(ctx context.Context, ref Address) bool
- func (n *NetStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error
- func (n *NetStore) Put(ctx context.Context, ch Chunk) error
- func (n *NetStore) RequestsCacheLen() int
- func (n *NetStore) Validate(ch []byte, ref Address) bool
- type NewNetFetcherFunc
- type Putter
- type PyramidChunker
- func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error)
- func (pc *PyramidChunker) Join(addr Address, getter Getter, depth int) LazySectionReader
- func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error)
- type PyramidSplitterParams
- type Reference
- type ReportData
- type RpData
- type SplitterParams
- type StoreParams
- type SwarmHash
- type SwarmHasher
- type SyncChunkStore
- type TYPE_ENUM
- type TreeChunker
- type TreeEntry
- type TreeSplitterParams
- type WiredtigerDB
- func (db *WiredtigerDB) Close()
- func (db *WiredtigerDB) NewWtDeleteDataFunc() func(addr chunk.Address) (err error)
- func (db *WiredtigerDB) NewWtEncodeDataFunc() func(chunk chunk.Chunk) ([]byte, error)
- func (db *WiredtigerDB) NewWtGetDataFunc() func(addr chunk.Address) (data []byte, err error)
- func (db *WiredtigerDB) OpenDB()
Constants ¶
const ( ErrInit = iota ErrNotFound ErrInvalidValue ErrDataOverflow ErrNothingToReturn ErrInvalidSignature ErrNotSynced )
const ( BMTHash = "BMT" SHA3Hash = "SHA3" // http://golang.org/pkg/hash/#Hash DefaultHash = BMTHash )
const AddressLength = chunk.AddressLength
AddressLength is the same as chunk.AddressLength for backward compatibility.
const (
ChunkProcessors = 64
)
const CurrentDbSchema = DbSchemaHalloween
The DB schema we want to use. The actual/current DB schema might differ until migrations are run.
const DbSchemaHalloween = "halloween"
"halloween" is here because we had a screw in the garbage collector index. Because of that we had to rebuild the GC index to get rid of erroneous entries and that takes a long time. This schema is used for bookkeeping, so rebuild index will run just once.
const DbSchemaNone = ""
There was a time when we had no schema at all.
const DbSchemaPurity = "purity"
"purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5
const (
MAX_FILE_CACHE = 26214500 //256*1024*100+100
)
const MaxPO = chunk.MaxPO
MaxPO is the same as chunk.MaxPO for backward compatibility.
Variables ¶
var ( ErrChunkNotFound = chunk.ErrChunkNotFound ErrChunkInvalid = chunk.ErrChunkNotFound )
Errors are the same as the ones in chunk package for backward compatibility.
var (
ErrDBClosed = errors.New("LDBStore closed")
)
var NewChunk = chunk.NewChunk
NewChunk is the same as chunk.NewChunk for backward compatibility.
var Proximity = chunk.Proximity
Proximity is the same as chunk.Proximity for backward compatibility.
var ZeroAddr = chunk.ZeroAddr
ZeroAddr is the same as chunk.ZeroAddr for backward compatibility.
Functions ¶
func AddrBucket ¶
func BytesToU64 ¶
func NewHasherStore ¶
func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore
NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces. With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore and the hasherStore will take core of encryption/decryption of data if necessary
func StringSliceEqualBCE ¶
func U64ToBytes ¶
Types ¶
type Address ¶
Address is an alias for chunk.Address for backward compatibility.
func PyramidAppend ¶
func PyramidSplit ¶
func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error)
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Address), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides.
func TreeSplit ¶
func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error)
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides.
type AddressCollection ¶
type AddressCollection []Address
func NewAddressCollection ¶
func NewAddressCollection(l int) AddressCollection
func (AddressCollection) Len ¶
func (c AddressCollection) Len() int
func (AddressCollection) Less ¶
func (c AddressCollection) Less(i, j int) bool
func (AddressCollection) Swap ¶
func (c AddressCollection) Swap(i, j int)
type Chunk ¶
Chunk is an alias for chunk.Chunk for backward compatibility.
func GenerateRandomChunk ¶
func GenerateRandomChunks ¶
type ChunkStore ¶
type ChunkValidator ¶
type ChunkerParams ¶
type ChunkerParams struct {
// contains filtered or unexported fields
}
type ContentAddressValidator ¶
type ContentAddressValidator struct {
Hasher SwarmHasher
}
Provides method for validation of content address in chunks Holds the corresponding hasher to create the address
func NewContentAddressValidator ¶
func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator
Constructor
func (*ContentAddressValidator) Validate ¶
func (v *ContentAddressValidator) Validate(ch Chunk) bool
Validate that the given key is a valid content address for the given data
type FakeChunkStore ¶
type FakeChunkStore struct { }
FakeChunkStore doesn't store anything, just implements the ChunkStore interface It can be used to inject into a hasherStore if you don't want to actually store data just do the hashing
func (*FakeChunkStore) Close ¶
func (f *FakeChunkStore) Close()
Close doesn't store anything it is just here to implement ChunkStore
func (*FakeChunkStore) Has ¶
func (f *FakeChunkStore) Has(_ context.Context, ref Address) bool
Has doesn't do anything it is just here to implement ChunkStore
type FileStore ¶
type FileStore struct { ChunkStore // contains filtered or unexported fields }
func NewFileStore ¶
func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore
func NewLocalFileStore ¶
for testing locally
func (*FileStore) GetAllReferences ¶
func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error)
GetAllReferences is a public API. This endpoint returns all chunk hashes (only) for a given file
func (*FileStore) Retrieve ¶
func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool)
Retrieve is a public API. Main entry point for document retrieval directly. Used by the FS-aware API and httpaccess Chunk retrieval blocks on netStore requests with a timeout so reader will report error if retrieval of chunks within requested range time out. It returns a reader with the chunk data and whether the content was encrypted
type FileStoreParams ¶
type FileStoreParams struct {
Hash string
}
func NewFileStoreParams ¶
func NewFileStoreParams() *FileStoreParams
type HashWithLength ¶
func (*HashWithLength) ResetWithLength ¶
func (h *HashWithLength) ResetWithLength(length []byte)
type JoinerParams ¶
type JoinerParams struct { ChunkerParams // contains filtered or unexported fields }
type LDBDatabase ¶
type LDBDatabase struct {
// contains filtered or unexported fields
}
func NewLDBDatabase ¶
func NewLDBDatabase(file string) (*LDBDatabase, error)
func (*LDBDatabase) Close ¶
func (db *LDBDatabase) Close()
func (*LDBDatabase) CompactRange ¶
func (db *LDBDatabase) CompactRange(r util.Range) error
func (*LDBDatabase) Delete ¶
func (db *LDBDatabase) Delete(key []byte) error
func (*LDBDatabase) NewIterator ¶
func (db *LDBDatabase) NewIterator() iterator.Iterator
type LDBStore ¶
type LDBStore struct {
// contains filtered or unexported fields
}
func NewLDBStore ¶
func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error)
TODO: Instead of passing the distance function, just pass the address from which distances are calculated to avoid the appearance of a pluggable distance metric and opportunities of bugs associated with providing a function different from the one that is actually used.
func NewMockDbStore ¶
func NewMockDbStore(params *LDBStoreParams, mockStore *mock.NodeStore) (s *LDBStore, err error)
NewMockDbStore creates a new instance of DbStore with mockStore set to a provided value. If mockStore argument is nil, this function behaves exactly as NewDbStore.
func (*LDBStore) CleanGCIndex ¶
CleanGCIndex rebuilds the garbage collector index from scratch, while removing inconsistent elements, e.g., indices with missing data chunks. WARN: it's a pretty heavy, long running function.
func (*LDBStore) Cleanup ¶
Cleanup iterates over the database and deletes chunks if they pass the `f` condition
func (*LDBStore) Export ¶
Export writes all chunks from the store to a tar archive, returning the number of chunks written.
func (*LDBStore) Get ¶
Get retrieves the chunk matching the provided key from the database. If the chunk entry does not exist, it returns an error Updates access count and is thread safe
func (*LDBStore) GetSchema ¶
GetSchema is returning the current named schema of the datastore as read from LevelDB
func (*LDBStore) Has ¶
Has queries the underlying DB if a chunk with the given address is stored Returns true if the chunk is found, false if not
func (*LDBStore) MarkAccessed ¶
func (s *LDBStore) GetChunkFromDb() MarkAccessed increments the access counter as a best effort for a chunk, so the chunk won't get garbage collected.
func (*LDBStore) Put ¶
Put adds a chunk to the database, adding indices and incrementing global counters. If it already exists, it merely increments the access count of the existing entry. Is thread safe
func (*LDBStore) SyncIterator ¶
func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error
SyncIterator(start, stop, po, f) calls f on each hash of a bin po from start to stop
func (*LDBStore) VerifyHash ¶
type LDBStoreParams ¶
type LDBStoreParams struct { *StoreParams ChunkPath string InfoPath string Po func(Address) uint8 }
func NewLDBStoreParams ¶
func NewLDBStoreParams(storeparams *StoreParams, path string, infoPath string) *LDBStoreParams
NewLDBStoreParams constructs LDBStoreParams with the specified values.
type LazyChunkReader ¶
type LazyChunkReader struct {
// contains filtered or unexported fields
}
LazyChunkReader implements LazySectionReader
func TreeJoin ¶
Join reconstructs original content based on a root key. When joining, the caller gets returned a Lazy SectionReader, which is seekable and implements on-demand fetching of chunks as and where it is read. New chunks to retrieve are coming from the getter, which the caller provides. If an error is encountered during joining, it appears as a reader error. The SectionReader. As a result, partial reads from a document are possible even if other parts are corrupt or lost. The chunks are not meant to be validated by the chunker when joining. This is because it is left to the DPA to decide which sources are trusted.
func (*LazyChunkReader) Context ¶
func (r *LazyChunkReader) Context() context.Context
func (*LazyChunkReader) Read ¶
func (r *LazyChunkReader) Read(b []byte) (read int, err error)
Read keeps a cursor so cannot be called simulateously, see ReadAt
func (*LazyChunkReader) ReadAt ¶
func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error)
read at can be called numerous times concurrent reads are allowed Size() needs to be called synchronously on the LazyChunkReader first
type LazySectionReader ¶
type LazySectionReader interface { Context() context.Context Size(context.Context, chan bool) (int64, error) io.Seeker io.Reader io.ReaderAt }
Size, Seek, Read, ReadAt
type LazyTestSectionReader ¶
type LazyTestSectionReader struct {
*io.SectionReader
}
func (*LazyTestSectionReader) Context ¶
func (r *LazyTestSectionReader) Context() context.Context
type LocalStore ¶
type LocalStore struct { Validators []ChunkValidator DbStore *LDBStore // contains filtered or unexported fields }
LocalStore is a combination of inmemory db over a disk persisted db implements a Get/Put with fallback (caching) logic using any 2 ChunkStores
func NewLocalStore ¶
func NewLocalStore(params *LocalStoreParams, mockStore *mock.NodeStore) (*LocalStore, error)
This constructor uses MemStore and DbStore as components
func NewTestLocalStoreForAddr ¶
func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error)
func (*LocalStore) BinIndex ¶
func (ls *LocalStore) BinIndex(po uint8) uint64
func (*LocalStore) FetchFunc ¶
从本地获取一个数据片断,如果没有,返回一个函数,该函数使用context作为参数 在需要的时候,这个返回的函数可以放到goRoutine里,
func (*LocalStore) Get ¶
Get(chunk *Chunk) looks up a chunk in the local stores This method is blocking until the chunk is retrieved so additional timeout may be needed to wrap this call if ChunkStores are remote and can have long latency
func (*LocalStore) Has ¶
func (ls *LocalStore) Has(ctx context.Context, addr Address) bool
Has queries the underlying DbStore if a chunk with the given address is being stored there. Returns true if it is stored, false if not
func (*LocalStore) Migrate ¶
func (ls *LocalStore) Migrate() error
Migrate checks the datastore schema vs the runtime schema and runs migrations if they don't match
func (*LocalStore) Put ¶
func (ls *LocalStore) Put(ctx context.Context, chunk Chunk) error
Put is responsible for doing validation and storage of the chunk by using configured ChunkValidators, MemStore and LDBStore. If the chunk is not valid, its GetErrored function will return ErrChunkInvalid. This method will check if the chunk is already in the MemStore and it will return it if it is. If there is an error from the MemStore.Get, it will be returned by calling GetErrored on the chunk. This method is responsible for closing Chunk.ReqC channel when the chunk is stored in memstore. After the LDBStore.Put, it is ensured that the MemStore contains the chunk with the same data, but nil ReqC channel.
type LocalStoreParams ¶
type LocalStoreParams struct { *StoreParams ChunkInfoPath string ChunkDbPath string Validators []ChunkValidator `toml:"-"` }
func NewDefaultLocalStoreParams ¶
func NewDefaultLocalStoreParams() *LocalStoreParams
func (*LocalStoreParams) Init ¶
func (p *LocalStoreParams) Init(path string)
this can only finally be set after all config options (file, cmd line, env vars) have been evaluated
type MemStore ¶
type MemStore struct {
// contains filtered or unexported fields
}
func NewMemStore ¶
func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore)
NewMemStore is instantiating a MemStore cache keeping all frequently requested chunks in the `cache` LRU cache.
type NetFetcher ¶
type NetStore ¶
type NetStore struct { NewNetFetcherFunc NewNetFetcherFunc //Fetcher(不是fetcher)创造工厂, // contains filtered or unexported fields }
NetStore is an extension of local storage it implements the ChunkStore interface on request it initiates remote cloud retrieval using a fetcher fetchers are unique to a chunk and are stored in fetchers LRU memory cache fetchFuncFactory is a factory object to create a fetch function for a specific chunk address
func NewNetStore ¶
func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc, cacheSize int) (*NetStore, error)
NewNetStore creates a new NetStore object using the given local store. newFetchFunc is a constructor function that can create a fetch function for a specific chunk address.
func (*NetStore) FetchFunc ¶
FetchFunc returns nil if the store contains the given address. Otherwise it returns a wait function, which returns after the chunk is available or the context is done
func (*NetStore) Get ¶
Get retrieves the chunk from the NetStore DPA synchronously. It calls NetStore.get, and if the chunk is not in local Storage it calls fetch with the request, which blocks until the chunk arrived or context is done
func (*NetStore) Has ¶
Has is the storage layer entry point to query the underlying database to return if it has a chunk or not. Called from the DebugAPI
func (*NetStore) Put ¶
Put stores a chunk in localstore, and delivers to all requestor peers using the fetcher stored in the fetchers cache
func (*NetStore) RequestsCacheLen ¶
RequestsCacheLen returns the current number of outgoing requests stored in the cache
type NewNetFetcherFunc ¶
type Putter ¶
type Putter interface { Put(context.Context, ChunkData) (Reference, error) // RefSize returns the length of the Reference created by this Putter RefSize() int64 // Close is to indicate that no more chunk data will be Put on this Putter Close() // Wait returns if all data has been store and the Close() was called. Wait(context.Context) error }
Putter is responsible to store data and create a reference for it
type PyramidChunker ¶
type PyramidChunker struct {
// contains filtered or unexported fields
}
func NewPyramidSplitter ¶
func NewPyramidSplitter(params *PyramidSplitterParams) (pc *PyramidChunker)
func (*PyramidChunker) Join ¶
func (pc *PyramidChunker) Join(addr Address, getter Getter, depth int) LazySectionReader
type PyramidSplitterParams ¶
type PyramidSplitterParams struct { SplitterParams // contains filtered or unexported fields }
type ReportData ¶
type SplitterParams ¶
type SplitterParams struct { ChunkerParams // contains filtered or unexported fields }
type StoreParams ¶
type StoreParams struct { Hash SwarmHasher `toml:"-"` DbCapacity uint64 CacheCapacity uint BaseKey []byte }
func NewDefaultStoreParams ¶
func NewDefaultStoreParams() *StoreParams
func NewStoreParams ¶
func NewStoreParams(ldbCap uint64, cacheCap uint, hash SwarmHasher, basekey []byte) *StoreParams
type SwarmHasher ¶
type SwarmHasher func() SwarmHash
func MakeHashFunc ¶
func MakeHashFunc(hash string) SwarmHasher
type SyncChunkStore ¶
type SyncChunkStore interface { ChunkStore BinIndex(po uint8) uint64 Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error FetchFunc(ctx context.Context, ref Address) func(context.Context) error }
SyncChunkStore is a ChunkStore which supports syncing
type TreeChunker ¶
type TreeChunker struct {
// contains filtered or unexported fields
}
func NewTreeJoiner ¶
func NewTreeJoiner(params *JoinerParams) *TreeChunker
func NewTreeSplitter ¶
func NewTreeSplitter(params *TreeSplitterParams) *TreeChunker
func (*TreeChunker) Join ¶
func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader
type TreeEntry ¶
type TreeEntry struct {
// contains filtered or unexported fields
}
Entry to create a tree node
func NewTreeEntry ¶
func NewTreeEntry(pyramid *PyramidChunker) *TreeEntry
type TreeSplitterParams ¶
type TreeSplitterParams struct { SplitterParams // contains filtered or unexported fields }
type WiredtigerDB ¶
type WiredtigerDB struct {
// contains filtered or unexported fields
}
func NewDatabase ¶
func NewDatabase(filePath string, shardOverlay int) *WiredtigerDB
func (*WiredtigerDB) Close ¶
func (db *WiredtigerDB) Close()
func (*WiredtigerDB) NewWtDeleteDataFunc ¶
func (db *WiredtigerDB) NewWtDeleteDataFunc() func(addr chunk.Address) (err error)
func (*WiredtigerDB) NewWtEncodeDataFunc ¶
func (db *WiredtigerDB) NewWtEncodeDataFunc() func(chunk chunk.Chunk) ([]byte, error)
encodeDataFunc returns a function that stores the chunk data to a mock store to bypass the default functionality encodeData. The constructed function always returns the nil data, as DbStore does not need to store the data, but still need to create the index.
func (*WiredtigerDB) NewWtGetDataFunc ¶
func (db *WiredtigerDB) NewWtGetDataFunc() func(addr chunk.Address) (data []byte, err error)
func (*WiredtigerDB) OpenDB ¶
func (db *WiredtigerDB) OpenDB()
Source Files ¶
Directories ¶
Path | Synopsis |
---|---|
Package feeds defines Swarm Feeds.
|
Package feeds defines Swarm Feeds. |
lookup
Package lookup defines feed lookup algorithms and provides tools to place updates so they can be found
|
Package lookup defines feed lookup algorithms and provides tools to place updates so they can be found |
Package localstore provides disk storage layer for Swarm Chunk persistence.
|
Package localstore provides disk storage layer for Swarm Chunk persistence. |
Package mock defines types that are used by different implementations of mock storages.
|
Package mock defines types that are used by different implementations of mock storages. |
db
Package db implements a mock store that keeps all chunk data in LevelDB database.
|
Package db implements a mock store that keeps all chunk data in LevelDB database. |
mem
Package mem implements a mock store that keeps all chunk data in memory.
|
Package mem implements a mock store that keeps all chunk data in memory. |
rpc
Package rpc implements an RPC client that connect to a centralized mock store.
|
Package rpc implements an RPC client that connect to a centralized mock store. |
test
Package test provides functions that are used for testing GlobalStorer implementations.
|
Package test provides functions that are used for testing GlobalStorer implementations. |