Documentation ¶
Index ¶
- Constants
- Variables
- func ComputePaddedSize(chunks uint64) (uint64, uint64)
- func Exists(name string) (bool, error)
- func IteratorPaddedSize(dataSize int64, flowPadding bool) uint64
- func MerkleTree(data IterableData) (*merkle.Tree, error)
- func NumSegmentsPadded(data IterableData) int
- func NumSplits(total int64, unit int) uint64
- func ReadAt(data IterableData, readSize int, offset int64, paddedSize uint64) ([]byte, error)
- func SegmentRoot(chunks []byte, emptyChunksPadded ...uint64) common.Hash
- type DataInMemory
- func (data *DataInMemory) Iterate(offset int64, batch int64, flowPadding bool) Iterator
- func (data *DataInMemory) NumChunks() uint64
- func (data *DataInMemory) NumSegments() uint64
- func (data *DataInMemory) PaddedSize() uint64
- func (data *DataInMemory) Read(buf []byte, offset int64) (int, error)
- func (data *DataInMemory) Size() int64
- type File
- type FileIterator
- type Flow
- type IterableData
- type Iterator
- type MemoryDataIterator
- type TreeBuilderInitializer
Constants ¶
View Source
const ( // DefaultChunkSize represents the default chunk size in bytes. DefaultChunkSize = 256 // DefaultSegmentMaxChunks represents the default maximum number of chunks within a segment. DefaultSegmentMaxChunks = 1024 // DefaultSegmentSize represents the default segment size in bytes. DefaultSegmentSize = DefaultChunkSize * DefaultSegmentMaxChunks )
Variables ¶
View Source
var ( EmptyChunk = make([]byte, DefaultChunkSize) EmptyChunkHash = crypto.Keccak256Hash(EmptyChunk) )
View Source
var ( // ErrFileRequired is returned when manipulate on a folder. ErrFileRequired = errors.New("file required") // ErrFileEmpty is returned when empty file opened. ErrFileEmpty = errors.New("file is empty") )
Functions ¶
func ComputePaddedSize ¶
func IteratorPaddedSize ¶
func MerkleTree ¶
func MerkleTree(data IterableData) (*merkle.Tree, error)
func NumSegmentsPadded ¶
func NumSegmentsPadded(data IterableData) int
Types ¶
type DataInMemory ¶
type DataInMemory struct {
// contains filtered or unexported fields
}
func NewDataInMemory ¶
func NewDataInMemory(data []byte) (*DataInMemory, error)
func (*DataInMemory) Iterate ¶
func (data *DataInMemory) Iterate(offset int64, batch int64, flowPadding bool) Iterator
func (*DataInMemory) NumChunks ¶
func (data *DataInMemory) NumChunks() uint64
func (*DataInMemory) NumSegments ¶
func (data *DataInMemory) NumSegments() uint64
func (*DataInMemory) PaddedSize ¶
func (data *DataInMemory) PaddedSize() uint64
func (*DataInMemory) Size ¶
func (data *DataInMemory) Size() int64
type FileIterator ¶
type FileIterator struct {
// contains filtered or unexported fields
}
func (*FileIterator) Current ¶
func (it *FileIterator) Current() []byte
func (*FileIterator) Next ¶
func (it *FileIterator) Next() (bool, error)
type Flow ¶
type Flow struct {
// contains filtered or unexported fields
}
func NewFlow ¶
func NewFlow(data IterableData, tags []byte) *Flow
func (*Flow) CreateSubmission ¶
func (flow *Flow) CreateSubmission() (*contract.Submission, error)
type IterableData ¶
type MemoryDataIterator ¶
type MemoryDataIterator struct {
// contains filtered or unexported fields
}
func (*MemoryDataIterator) Current ¶
func (it *MemoryDataIterator) Current() []byte
func (*MemoryDataIterator) Next ¶
func (it *MemoryDataIterator) Next() (bool, error)
type TreeBuilderInitializer ¶
type TreeBuilderInitializer struct {
// contains filtered or unexported fields
}
func (*TreeBuilderInitializer) ParallelCollect ¶
func (t *TreeBuilderInitializer) ParallelCollect(result *parallel.Result) error
ParallelCollect implements parallel.Interface.
func (*TreeBuilderInitializer) ParallelDo ¶
func (t *TreeBuilderInitializer) ParallelDo(routine int, task int) (interface{}, error)
ParallelDo implements parallel.Interface.
Source Files ¶
Click to show internal directories.
Click to hide internal directories.