Documentation
¶
Index ¶
- Constants
- Variables
- type Downloader
- type SegmentDownloader
- type SegmentUploader
- type UploadOption
- type UploadTask
- type Uploader
- func (uploader *Uploader) BatchUpload(datas []core.IterableData, waitForLogEntry bool, option ...[]UploadOption) (common.Hash, []common.Hash, error)
- func (uploader *Uploader) NewSegmentUploader(data core.IterableData, tree *merkle.Tree, startSegIndex uint64, taskSize uint) *SegmentUploader
- func (uploader *Uploader) SubmitLogEntry(datas []core.IterableData, tags [][]byte, waitForReceipt bool) (common.Hash, *types.Receipt, error)
- func (uploader *Uploader) Upload(data core.IterableData, option ...UploadOption) error
- func (uploader *Uploader) UploadFile(data core.IterableData, tree *merkle.Tree, segIndex uint64, taskSize uint) error
Constants ¶
View Source
const SubmitEventHash = "0x167ce04d2aa1981994d3a31695da0d785373335b1078cec239a1a3a2c7675555"
Variables ¶
View Source
var DataAlreadyExistsError = "Invalid params: root; data: already uploaded and finalized"
View Source
var SegmentAlreadyExistsError = "segment has already been uploaded or is being uploaded"
Functions ¶
This section is empty.
Types ¶
type Downloader ¶
type Downloader struct {
// contains filtered or unexported fields
}
func NewDownloader ¶
func NewDownloader(clients ...*node.Client) (*Downloader, error)
type SegmentDownloader ¶
type SegmentDownloader struct {
// contains filtered or unexported fields
}
func NewSegmentDownloader ¶
func NewSegmentDownloader(clients []*node.Client, shardConfigs []*node.ShardConfig, file *download.DownloadingFile, withProof bool) (*SegmentDownloader, error)
func (*SegmentDownloader) Download ¶
func (downloader *SegmentDownloader) Download() error
Download downloads segments in parallel.
func (*SegmentDownloader) ParallelCollect ¶
func (downloader *SegmentDownloader) ParallelCollect(result *parallel.Result) error
ParallelCollect implements the parallel.Interface interface.
func (*SegmentDownloader) ParallelDo ¶
func (downloader *SegmentDownloader) ParallelDo(routine, task int) (interface{}, error)
ParallelDo implements the parallel.Interface interface.
type SegmentUploader ¶
type SegmentUploader struct {
// contains filtered or unexported fields
}
func (*SegmentUploader) ParallelCollect ¶
func (uploader *SegmentUploader) ParallelCollect(result *parallel.Result) error
ParallelCollect implements parallel.Interface.
func (*SegmentUploader) ParallelDo ¶
func (uploader *SegmentUploader) ParallelDo(routine int, task int) (interface{}, error)
ParallelDo implements parallel.Interface.
type UploadOption ¶
type UploadTask ¶ added in v0.2.0
type UploadTask struct {
// contains filtered or unexported fields
}
type Uploader ¶
type Uploader struct {
// contains filtered or unexported fields
}
func NewUploader ¶
func (*Uploader) BatchUpload ¶
func (uploader *Uploader) BatchUpload(datas []core.IterableData, waitForLogEntry bool, option ...[]UploadOption) (common.Hash, []common.Hash, error)
upload data(batchly in 1 blockchain transaction if there are more than one files)
func (*Uploader) NewSegmentUploader ¶ added in v0.2.0
func (uploader *Uploader) NewSegmentUploader(data core.IterableData, tree *merkle.Tree, startSegIndex uint64, taskSize uint) *SegmentUploader
func (*Uploader) SubmitLogEntry ¶
func (*Uploader) Upload ¶
func (uploader *Uploader) Upload(data core.IterableData, option ...UploadOption) error
func (*Uploader) UploadFile ¶
func (uploader *Uploader) UploadFile(data core.IterableData, tree *merkle.Tree, segIndex uint64, taskSize uint) error
TODO error tolerance
Source Files
¶
Click to show internal directories.
Click to hide internal directories.