sdk

package
v1.7.5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 12, 2022 License: MIT Imports: 58 Imported by: 10

Documentation

Index

Constants

View Source
const (
	KB = 1024
	MB = 1024 * KB
	GB = 1024 * MB
)
View Source
const (
	// EncryptedDataPaddingSize additional bytes to save encrypted data
	EncryptedDataPaddingSize = 16
	// EncryptionHeaderSize encryption header size in chunk: PRE.MessageChecksum(128)+PRE.OverallChecksum(128)
	EncryptionHeaderSize = 128 + 128
	// ReEncryptionHeaderSize re-encryption header size in chunk
	ReEncryptionHeaderSize = 256
)
View Source
const (
	DOWNLOAD_CONTENT_FULL  = "full"
	DOWNLOAD_CONTENT_THUMB = "thumbnail"
)
View Source
const (
	OpUpload   int = 0
	OpDownload int = 1
	OpRepair   int = 2
	OpUpdate   int = 3
)
View Source
const (
	Upload      = "Upload"
	Download    = "Download"
	Update      = "Update"
	Delete      = "Delete"
	Conflict    = "Conflict"
	LocalDelete = "LocalDelete"
)

For sync app

View Source
const CHUNK_SIZE = 64 * 1024
View Source
const DefaultBlocksPerMarker int = 10
View Source
const DefaultChunkSize = 64 * 1024

DefaultChunkSize default chunk size for file and thumbnail

View Source
const NETWORK_ENDPOINT = "/network"
View Source
const STORAGE_SCADDRESS = "6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d7"

Variables

View Source
var (
	// DefaultHashFunc default hash method for stream merkle tree
	DefaultHashFunc = func(left, right string) string {
		return coreEncryption.Hash(left + right)
	}

	ErrInvalidChunkSize = errors.New("chunk: chunk size is too small. it must greater than 272 if file is uploaded with encryption")
)
View Source
var (
	// ErrClispIsNotReady clips file is not ready
	ErrClispIsNotReady = errors.New("live: clips is not ready")
)
View Source
var GetFileInfo = func(localpath string) (os.FileInfo, error) {
	return os.Stat(localpath)
}

Functions

func AddBlockDownloadReq

func AddBlockDownloadReq(req *BlockDownloadRequest)

func AddCommitRequest

func AddCommitRequest(req *CommitRequest)

func AddCurator

func AddCurator(curatorId, allocationId string) (string, error)

func AddFreeStorageAssigner

func AddFreeStorageAssigner(name, publicKey string, individualLimit, totalLimit float64) error

func CancelAllocation

func CancelAllocation(allocID string) (hash string, err error)

func CollectRewards added in v1.7.1

func CollectRewards(poolId string, providerType ProviderType) (string, error)

func CommitToFabric

func CommitToFabric(metaTxnData, fabricConfigJSON string) (string, error)

func CreateAllocation

func CreateAllocation(datashards, parityshards int, size, expiry int64,
	readPrice, writePrice PriceRange, mcct time.Duration, lock int64) (
	string, error)

func CreateAllocationForOwner

func CreateAllocationForOwner(owner, ownerpublickey string,
	datashards, parityshards int, size, expiry int64,
	readPrice, writePrice PriceRange, mcct time.Duration,
	lock int64, preferredBlobbers []string) (hash string, err error)

func CreateAllocationWithBlobbers added in v1.2.82

func CreateAllocationWithBlobbers(datashards, parityshards int, size, expiry int64,
	readPrice, writePrice PriceRange, mcct time.Duration, lock int64, blobbers []string) (
	string, error)

func CreateFreeAllocation

func CreateFreeAllocation(marker string, value int64) (string, error)

func CreateFreeUpdateAllocation

func CreateFreeUpdateAllocation(marker, allocationId string, value int64) (string, error)

func CreateReadPool

func CreateReadPool() (err error)

func CuratorTransferAllocation

func CuratorTransferAllocation(allocationId, newOwner, newOwnerPublicKey string) (string, error)

func FinalizeAllocation

func FinalizeAllocation(allocID string) (hash string, err error)

func GetAllocationMinLock

func GetAllocationMinLock(datashards, parityshards int, size, expiry int64,
	readPrice, writePrice PriceRange, mcct time.Duration) (int64, error)

func GetAllocationUpdates added in v1.4.0

func GetAllocationUpdates(allocation *Allocation) error

func GetClientEncryptedPublicKey

func GetClientEncryptedPublicKey() (string, error)

func GetLogger

func GetLogger() *logger.Logger

func GetMptData added in v1.2.82

func GetMptData(key string) ([]byte, error)

func GetVersion

func GetVersion() string

GetVersion - returns version string

func InitBlockDownloader

func InitBlockDownloader(blobbers []*blockchain.StorageNode)

func InitCommitWorker

func InitCommitWorker(blobbers []*blockchain.StorageNode)

func InitStorageSDK

func InitStorageSDK(walletJSON string, blockWorker, chainID, signatureScheme string, preferredBlobbers []string) error

InitStorageSDK init storage sdk with walletJSON

  {
		"client_id":"322d1dadec182effbcbdeef77d84f",
		"client_key":"3b6d02a22ec82d4d9aa1402917ca2",
		"keys":[{
			"public_key":"3b6d02a22ec82d4d9aa1402917ca268",
			"private_key":"25f2e1355d3864de01aba0bfec3702"
			}],
		"mnemonics":"double wink spin mushroom thing notable trumpet chapter",
		"version":"1.0",
		"date_created":"2021-08-18T08:34:39+08:00"
	 }

func NewSignalContext added in v1.3.0

func NewSignalContext(ctx context.Context) context.Context

NewSignalContext create SignalContext instance

func ReadPoolLock

func ReadPoolLock(dur time.Duration, allocID, blobberID string,
	tokens, fee int64) (err error)

ReadPoolLock locks given number of tokes for given duration in read pool.

func ReadPoolUnlock

func ReadPoolUnlock(poolID string, fee int64) (err error)

ReadPoolUnlock unlocks tokens in expired read pool

func RemoveCurator added in v1.2.85

func RemoveCurator(curatorId, allocationId string) (string, error)

func SetLogFile

func SetLogFile(logFile string, verbose bool)

SetLogFile logFile - Log file verbose - true - console output; false - no console output

func SetLogLevel

func SetLogLevel(lvl int)

SetLogLevel set the log level. lvl - 0 disabled; higher number (upto 4) more verbosity

func SetMaxTxnQuery

func SetMaxTxnQuery(num int)

func SetMinConfirmation

func SetMinConfirmation(num int)

func SetMinSubmit

func SetMinSubmit(num int)

func SetNetwork

func SetNetwork(miners []string, sharders []string)

func SetNumBlockDownloads

func SetNumBlockDownloads(num int)

func SetQuerySleepTime

func SetQuerySleepTime(time int)

func StakePoolLock

func StakePoolLock(blobberID string, value, fee int64) (poolID string, err error)

StakePoolLock locks tokens lack in stake pool

func StakePoolUnlock

func StakePoolUnlock(
	blobberID, poolID string, fee int64,
) (unstake bool, err error)

StakePoolUnlock unlocks a stake pool tokens. If tokens can't be unlocked due to opened offers, then it returns time where the tokens can be unlocked, marking the pool as 'want to unlock' to avoid its usage in offers in the future. The time is maximal time that can be lesser in some cases. To unlock tokens can't be unlocked now, wait the time and unlock them (call this function again).

func UpdateAllocation

func UpdateAllocation(size int64, expiry int64, allocationID string,
	lock int64, setImmutable, updateTerms bool) (hash string, err error)

func UpdateBlobberSettings

func UpdateBlobberSettings(blob *Blobber) (resp string, err error)

func UpdateNetworkDetails

func UpdateNetworkDetails() error

func UpdateNetworkDetailsWorker

func UpdateNetworkDetailsWorker(ctx context.Context)

func UpdateRequired

func UpdateRequired(networkDetails *Network) bool

func WritePoolLock

func WritePoolLock(dur time.Duration, allocID, blobberID string,
	tokens, fee int64) (err error)

WritePoolLock locks given number of tokes for given duration in read pool.

func WritePoolUnlock

func WritePoolUnlock(poolID string, fee int64) (err error)

WritePoolUnlock unlocks tokens in expired read pool

Types

type Allocation

type Allocation struct {
	ID             string                    `json:"id"`
	Tx             string                    `json:"tx"`
	DataShards     int                       `json:"data_shards"`
	ParityShards   int                       `json:"parity_shards"`
	Size           int64                     `json:"size"`
	Expiration     int64                     `json:"expiration_date"`
	Owner          string                    `json:"owner_id"`
	OwnerPublicKey string                    `json:"owner_public_key"`
	Payer          string                    `json:"payer_id"`
	Blobbers       []*blockchain.StorageNode `json:"blobbers"`
	Stats          *AllocationStats          `json:"stats"`
	TimeUnit       time.Duration             `json:"time_unit"`
	IsImmutable    bool                      `json:"is_immutable"`

	// BlobberDetails contains real terms used for the allocation.
	// If the allocation has updated, then terms calculated using
	// weighted average values.
	BlobberDetails []*BlobberAllocation `json:"blobber_details"`

	// ReadPriceRange is requested reading prices range.
	ReadPriceRange PriceRange `json:"read_price_range"`
	// WritePriceRange is requested writing prices range.
	WritePriceRange PriceRange `json:"write_price_range"`

	ChallengeCompletionTime time.Duration    `json:"challenge_completion_time"`
	StartTime               common.Timestamp `json:"start_time"`
	Finalized               bool             `json:"finalized,omitempty"`
	Canceled                bool             `json:"canceled,omitempty"`
	MovedToChallenge        common.Balance   `json:"moved_to_challenge,omitempty"`
	MovedBack               common.Balance   `json:"moved_back,omitempty"`
	MovedToValidators       common.Balance   `json:"moved_to_validators,omitempty"`
	Curators                []string         `json:"curators"`
	// contains filtered or unexported fields
}

func GetAllocation

func GetAllocation(allocationID string) (*Allocation, error)

func GetAllocationFromAuthTicket

func GetAllocationFromAuthTicket(authTicket string) (*Allocation, error)

func GetAllocations

func GetAllocations() ([]*Allocation, error)

func GetAllocationsForClient

func GetAllocationsForClient(clientID string) ([]*Allocation, error)

func (*Allocation) AddCollaborator

func (a *Allocation) AddCollaborator(filePath, collaboratorID string) error

func (*Allocation) CancelDownload

func (a *Allocation) CancelDownload(remotepath string) error

func (*Allocation) CancelRepair

func (a *Allocation) CancelRepair() error

func (*Allocation) CancelUpload

func (a *Allocation) CancelUpload(localpath string) error

func (*Allocation) CommitFolderChange

func (a *Allocation) CommitFolderChange(operation, preValue, currValue string) (string, error)

func (*Allocation) CommitMetaTransaction

func (a *Allocation) CommitMetaTransaction(path, crudOperation, authTicket, lookupHash string, fileMeta *ConsolidatedFileMeta, status StatusCallback) (err error)

func (*Allocation) CopyObject

func (a *Allocation) CopyObject(path string, destPath string) error

func (*Allocation) CreateDir

func (a *Allocation) CreateDir(dirName string) error

func (*Allocation) DeleteFile

func (a *Allocation) DeleteFile(path string) error

func (*Allocation) DownloadFile

func (a *Allocation) DownloadFile(localPath string, remotePath string, status StatusCallback) error

func (*Allocation) DownloadFileByBlock

func (a *Allocation) DownloadFileByBlock(localPath string, remotePath string, startBlock int64, endBlock int64, numBlocks int, status StatusCallback) error

func (*Allocation) DownloadFromAuthTicket

func (a *Allocation) DownloadFromAuthTicket(localPath string, authTicket string,
	remoteLookupHash string, remoteFilename string, rxPay bool,
	status StatusCallback) error

func (*Allocation) DownloadFromAuthTicketByBlocks

func (a *Allocation) DownloadFromAuthTicketByBlocks(localPath string,
	authTicket string, startBlock int64, endBlock int64, numBlocks int,
	remoteLookupHash string, remoteFilename string, rxPay bool,
	status StatusCallback) error

func (*Allocation) DownloadThumbnail

func (a *Allocation) DownloadThumbnail(localPath string, remotePath string, status StatusCallback) error

func (*Allocation) DownloadThumbnailFromAuthTicket

func (a *Allocation) DownloadThumbnailFromAuthTicket(localPath string,
	authTicket string, remoteLookupHash string, remoteFilename string,
	rxPay bool, status StatusCallback) error

func (*Allocation) EncryptAndUpdateFile

func (a *Allocation) EncryptAndUpdateFile(workdir string, localpath string, remotepath string,
	attrs fileref.Attributes, status StatusCallback) error

EncryptAndUpdateFile [Deprecated]please use CreateChunkedUpload

func (*Allocation) EncryptAndUpdateFileWithThumbnail

func (a *Allocation) EncryptAndUpdateFileWithThumbnail(workdir string, localpath string,
	remotepath string, thumbnailpath string, attrs fileref.Attributes, status StatusCallback) error

EncryptAndUpdateFileWithThumbnail [Deprecated]please use CreateChunkedUpload

func (*Allocation) EncryptAndUploadFile

func (a *Allocation) EncryptAndUploadFile(workdir string, localpath string, remotepath string,
	attrs fileref.Attributes, status StatusCallback) error

EncryptAndUploadFile [Deprecated]please use CreateChunkedUpload

func (*Allocation) EncryptAndUploadFileWithThumbnail

func (a *Allocation) EncryptAndUploadFileWithThumbnail(
	workdir string,
	localpath string,
	remotepath string,
	thumbnailpath string,
	attrs fileref.Attributes,
	status StatusCallback,
) error

EncryptAndUploadFileWithThumbnail [Deprecated]please use CreateChunkedUpload

func (*Allocation) GetAllocationDiff

func (a *Allocation) GetAllocationDiff(lastSyncCachePath string, localRootPath string, localFileFilters []string, remoteExcludePath []string) ([]FileDiff, error)

func (*Allocation) GetAuthTicket

func (a *Allocation) GetAuthTicket(path, filename, referenceType, refereeClientID, refereeEncryptionPublicKey string, expiration int64) (string, error)

func (*Allocation) GetAuthTicketForShare

func (a *Allocation) GetAuthTicketForShare(path string, filename string, referenceType string, refereeClientID string) (string, error)

func (*Allocation) GetBlobberStats

func (a *Allocation) GetBlobberStats() map[string]*BlobberAllocationStats

func (*Allocation) GetFileMeta

func (a *Allocation) GetFileMeta(path string) (*ConsolidatedFileMeta, error)

func (*Allocation) GetFileMetaFromAuthTicket

func (a *Allocation) GetFileMetaFromAuthTicket(authTicket string, lookupHash string) (*ConsolidatedFileMeta, error)

func (*Allocation) GetFileStats

func (a *Allocation) GetFileStats(path string) (map[string]*FileStats, error)

func (*Allocation) GetMaxStorageCost

func (a *Allocation) GetMaxStorageCost(size int64) (float64, error)

func (*Allocation) GetMaxStorageCostFromBlobbers added in v1.2.82

func (a *Allocation) GetMaxStorageCostFromBlobbers(size int64, blobbers []*BlobberAllocation) (float64, error)

func (*Allocation) GetMaxWriteRead

func (a *Allocation) GetMaxWriteRead() (maxW float64, maxR float64, err error)

func (*Allocation) GetMaxWriteReadFromBlobbers added in v1.2.82

func (a *Allocation) GetMaxWriteReadFromBlobbers(blobbers []*BlobberAllocation) (maxW float64, maxR float64, err error)

func (*Allocation) GetMinStorageCost

func (a *Allocation) GetMinStorageCost(size int64) (common.Balance, error)

func (*Allocation) GetMinWriteRead

func (a *Allocation) GetMinWriteRead() (minW float64, minR float64, err error)

func (*Allocation) GetRefs added in v1.3.0

func (a *Allocation) GetRefs(path, offsetPath, updatedDate, offsetDate, fileType, refType string, level, pageLimit int) (*ObjectTreeResult, error)

This function will retrieve paginated objectTree and will handle concensus; Required tree should be made in application side. TODO use allocation context

func (*Allocation) GetRemoteFileMap

func (a *Allocation) GetRemoteFileMap(exclMap map[string]int) (map[string]fileInfo, error)

func (*Allocation) GetStats

func (a *Allocation) GetStats() *AllocationStats

func (*Allocation) InitAllocation

func (a *Allocation) InitAllocation()

func (*Allocation) ListDir

func (a *Allocation) ListDir(path string) (*ListResult, error)

func (*Allocation) ListDirFromAuthTicket

func (a *Allocation) ListDirFromAuthTicket(authTicket string, lookupHash string) (*ListResult, error)

func (*Allocation) MoveObject

func (a *Allocation) MoveObject(path string, destPath string) error

func (*Allocation) RemoveCollaborator

func (a *Allocation) RemoveCollaborator(filePath, collaboratorID string) error

func (*Allocation) RenameObject

func (a *Allocation) RenameObject(path string, destName string) error

func (*Allocation) RepairFile

func (a *Allocation) RepairFile(localpath string, remotepath string,
	status StatusCallback) error

func (*Allocation) RepairRequired

func (a *Allocation) RepairRequired(remotepath string) (zboxutil.Uint128, bool, *fileref.FileRef, error)

func (*Allocation) RevokeShare

func (a *Allocation) RevokeShare(path string, refereeClientID string) error

func (*Allocation) SaveRemoteSnapshot

func (a *Allocation) SaveRemoteSnapshot(pathToSave string, remoteExcludePath []string) error

SaveRemoteSnapShot - Saves the remote current information to the given file This file can be passed to GetAllocationDiff to exactly find the previous sync state to current.

func (*Allocation) StartChunkedUpload added in v1.3.0

func (a *Allocation) StartChunkedUpload(workdir, localPath string,
	remotePath string,
	status StatusCallback,
	isUpdate bool,
	isRepair bool,
	thumbnailPath string,
	encryption bool,
	attrs fileref.Attributes,
) error

func (*Allocation) StartRepair

func (a *Allocation) StartRepair(localRootPath, pathToRepair string, statusCB StatusCallback) error

func (*Allocation) UpdateFile

func (a *Allocation) UpdateFile(workdir, localpath string, remotepath string,
	attrs fileref.Attributes, status StatusCallback) error

UpdateFile [Deprecated]please use CreateChunkedUpload

func (*Allocation) UpdateFileWithThumbnail

func (a *Allocation) UpdateFileWithThumbnail(workdir, localpath string, remotepath string,
	thumbnailpath string, attrs fileref.Attributes, status StatusCallback) error

UpdateFileWithThumbnail [Deprecated]please use CreateChunkedUpload

func (*Allocation) UpdateObjectAttributes

func (a *Allocation) UpdateObjectAttributes(path string,
	attrs fileref.Attributes) (err error)

func (*Allocation) UploadAuthTicketToBlobber

func (a *Allocation) UploadAuthTicketToBlobber(authTicket string, clientEncPubKey string) error

func (*Allocation) UploadFile

func (a *Allocation) UploadFile(workdir, localpath string, remotepath string,
	attrs fileref.Attributes, status StatusCallback) error

UploadFile [Deprecated]please use CreateChunkedUpload

func (*Allocation) UploadFileWithThumbnail

func (a *Allocation) UploadFileWithThumbnail(workdir string, localpath string,
	remotepath string, thumbnailpath string, attrs fileref.Attributes,
	status StatusCallback) error

UploadFileWithThumbnail [Deprecated]please use CreateChunkedUpload

type AllocationPoolStat

type AllocationPoolStat struct {
	ID           string             `json:"id"`
	Balance      common.Balance     `json:"balance"`
	ExpireAt     common.Timestamp   `json:"expire_at"`
	AllocationID common.Key         `json:"allocation_id"`
	Blobbers     []*BlobberPoolStat `json:"blobbers"`
	Locked       bool               `json:"locked"`
}

type AllocationPoolStats

type AllocationPoolStats struct {
	Pools []*AllocationPoolStat `json:"pools"`
	Back  *BackPool             `json:"back,omitempty"`
}

AllocationPoolsStat represents read or write pool statistic.

func GetReadPoolInfo

func GetReadPoolInfo(clientID string) (info *AllocationPoolStats, err error)

GetReadPoolInfo for given client, or, if the given clientID is empty, for current client of the sdk.

func GetWritePoolInfo

func GetWritePoolInfo(clientID string) (info *AllocationPoolStats, err error)

GetWritePoolInfo for given client, or, if the given clientID is empty, for current client of the sdk.

func (*AllocationPoolStats) AllocFilter

func (aps *AllocationPoolStats) AllocFilter(allocID string)

type AllocationStats

type AllocationStats struct {
	UsedSize                  int64  `json:"used_size"`
	NumWrites                 int64  `json:"num_of_writes"`
	NumReads                  int64  `json:"num_of_reads"`
	TotalChallenges           int64  `json:"total_challenges"`
	OpenChallenges            int64  `json:"num_open_challenges"`
	SuccessChallenges         int64  `json:"num_success_challenges"`
	FailedChallenges          int64  `json:"num_failed_challenges"`
	LastestClosedChallengeTxn string `json:"latest_closed_challenge"`
}

type AttributesRequest

type AttributesRequest struct {
	Attributes fileref.Attributes // new attributes

	Consensus //
	// contains filtered or unexported fields
}

func (*AttributesRequest) ProcessAttributes

func (ar *AttributesRequest) ProcessAttributes() (err error)

type AuthTicket

type AuthTicket struct {
	// contains filtered or unexported fields
}

func InitAuthTicket

func InitAuthTicket(authTicket string) *AuthTicket

func (*AuthTicket) GetFileName

func (at *AuthTicket) GetFileName() (string, error)

func (*AuthTicket) GetLookupHash

func (at *AuthTicket) GetLookupHash() (string, error)

func (*AuthTicket) IsDir

func (at *AuthTicket) IsDir() (bool, error)

func (*AuthTicket) Unmarshall

func (at *AuthTicket) Unmarshall() (*marker.AuthTicket, error)

type BackPool

type BackPool struct {
	ID      string         `json:"id"`
	Balance common.Balance `json:"balance"`
}

type Blobber

type Blobber struct {
	ID                common.Key        `json:"id"`
	BaseURL           string            `json:"url"`
	Terms             Terms             `json:"terms"`
	Capacity          common.Size       `json:"capacity"`
	Used              common.Size       `json:"used"`
	LastHealthCheck   common.Timestamp  `json:"last_health_check"`
	PublicKey         string            `json:"-"`
	StakePoolSettings StakePoolSettings `json:"stake_pool_settings"`
}

func GetBlobber

func GetBlobber(blobberID string) (blob *Blobber, err error)

GetBlobber instance.

func GetBlobbers

func GetBlobbers() (bs []*Blobber, err error)

type BlobberAllocation

type BlobberAllocation struct {
	BlobberID       string         `json:"blobber_id"`
	Size            int64          `json:"size"`
	Terms           Terms          `json:"terms"`
	MinLockDemand   common.Balance `json:"min_lock_demand"`
	Spent           common.Balance `json:"spent"`
	Penalty         common.Balance `json:"penalty"`
	ReadReward      common.Balance `json:"read_reward"`
	Returned        common.Balance `json:"returned"`
	ChallengeReward common.Balance `json:"challenge_reward"`
	FinalReward     common.Balance `json:"final_reward"`
}

type BlobberAllocationStats

type BlobberAllocationStats struct {
	BlobberID        string
	BlobberURL       string
	ID               string `json:"ID"`
	Tx               string `json:"Tx"`
	TotalSize        int64  `json:"TotalSize"`
	UsedSize         int    `json:"UsedSize"`
	OwnerID          string `json:"OwnerID"`
	OwnerPublicKey   string `json:"OwnerPublicKey"`
	Expiration       int    `json:"Expiration"`
	AllocationRoot   string `json:"AllocationRoot"`
	BlobberSize      int    `json:"BlobberSize"`
	BlobberSizeUsed  int    `json:"BlobberSizeUsed"`
	LatestRedeemedWM string `json:"LatestRedeemedWM"`
	IsRedeemRequired bool   `json:"IsRedeemRequired"`
	CleanedUp        bool   `json:"CleanedUp"`
	Finalized        bool   `json:"Finalized"`
	Terms            []struct {
		ID           int    `json:"ID"`
		BlobberID    string `json:"BlobberID"`
		AllocationID string `json:"AllocationID"`
		ReadPrice    int    `json:"ReadPrice"`
		WritePrice   int    `json:"WritePrice"`
	} `json:"Terms"`
}

type BlobberPoolStat

type BlobberPoolStat struct {
	BlobberID common.Key     `json:"blobber_id"`
	Balance   common.Balance `json:"balance"`
}

type BlockDownloadRequest

type BlockDownloadRequest struct {
	// contains filtered or unexported fields
}

type ChallengePoolInfo

type ChallengePoolInfo struct {
	ID         string           `json:"id"`
	Balance    common.Balance   `json:"balance"`
	StartTime  common.Timestamp `json:"start_time"`
	Expiration common.Timestamp `json:"expiration"`
	Finalized  bool             `json:"finalized"`
}

ChallengePoolInfo represents a challenge pool stat.

func GetChallengePoolInfo

func GetChallengePoolInfo(allocID string) (info *ChallengePoolInfo, err error)

GetChallengePoolInfo for given allocation.

type ChunkData added in v1.2.88

type ChunkData struct {
	// Index current index of chunks
	Index int
	// IsFinal last chunk or not
	IsFinal bool

	// ReadSize total size read from original reader (un-encoded, un-encrypted)
	ReadSize int64
	// FragmentSize fragment size for a blobber (un-encrypted)
	FragmentSize int64
	// Fragments data shared for bloobers
	Fragments [][]byte
}

ChunkData data of a chunk

type ChunkedUpload added in v1.2.88

type ChunkedUpload struct {
	// contains filtered or unexported fields
}

ChunkedUpload upload manager with chunked upload feature

func CreateChunkedUpload added in v1.2.88

func CreateChunkedUpload(workdir string, allocationObj *Allocation, fileMeta FileMeta, fileReader io.Reader, isUpdate, isRepair bool, opts ...ChunkedUploadOption) (*ChunkedUpload, error)
    CreateChunkedUpload create a ChunkedUpload instance

	Caller should be careful about fileReader parameter
	io.ErrUnexpectedEOF might mean that source has completely been exhausted or there is some error
	so that source could not fill up the buffer. Due this ambiguity it is responsibility of
	developer to provide new io.Reader that sends io.EOF when source has been all read.
	For example:
		func newReader(source io.Reader) *EReader {
			return &EReader{source}
		}

		type EReader struct {
			io.Reader
		}

		func (r *EReader) Read(p []byte) (n int, err error) {
			if n, err = io.ReadAtLeast(r.Reader, p, len(p)); err != nil {
				if errors.Is(err, io.ErrUnexpectedEOF) {
					return n, io.EOF
				}
			}
			return
		}

func (*ChunkedUpload) Start added in v1.2.88

func (su *ChunkedUpload) Start() error

Start start/resume upload

type ChunkedUploadBlobber added in v1.3.5

type ChunkedUploadBlobber struct {
	// contains filtered or unexported fields
}

ChunkedUploadBlobber client of blobber's upload

type ChunkedUploadChunkReader added in v1.2.88

type ChunkedUploadChunkReader interface {
	// Next read, encode and encrypt next chunk
	Next() (*ChunkData, error)

	// Read read, encode and encrypt all bytes
	Read(buf []byte) ([][]byte, error)
}

type ChunkedUploadFormBuilder added in v1.2.88

type ChunkedUploadFormBuilder interface {
	// build form data
	Build(fileMeta *FileMeta, hasher Hasher, connectionID string, chunkSize int64, chunkIndex int, isFinal bool, encryptedKey string, fileBytes, thumbnailBytes []byte) (*bytes.Buffer, ChunkedUploadFormMetadata, error)
}

ChunkedUploadFormBuilder build form data for uploading

func CreateChunkedUploadFormBuilder added in v1.2.88

func CreateChunkedUploadFormBuilder() ChunkedUploadFormBuilder

CreateChunkedUploadFormBuilder create ChunkedUploadFormBuilder instance

type ChunkedUploadFormMetadata added in v1.2.88

type ChunkedUploadFormMetadata struct {
	FileBytesLen         int
	ThumbnailBytesLen    int
	ContentType          string
	ChunkHash            string
	ChallengeHash        string
	ContentHash          string
	ThumbnailContentHash string
}

ChunkedUploadFormMetadata upload form metadata

type ChunkedUploadOption added in v1.2.88

type ChunkedUploadOption func(su *ChunkedUpload)

ChunkedUploadOption set stream option

func WithChunkSize added in v1.2.88

func WithChunkSize(size int64) ChunkedUploadOption

WithChunkSize set custom chunk size. ignore if size <=0

func WithEncrypt added in v1.2.88

func WithEncrypt(status bool) ChunkedUploadOption

WithEncrypt trun on/off encrypt on upload. It is turn off as default.

func WithProgressStorer added in v1.3.6

func WithProgressStorer(progressStorer ChunkedUploadProgressStorer) ChunkedUploadOption

func WithStatusCallback added in v1.2.88

func WithStatusCallback(callback StatusCallback) ChunkedUploadOption

WithStatusCallback register StatusCallback instance

func WithThumbnail added in v1.2.88

func WithThumbnail(buf []byte) ChunkedUploadOption

WithThumbnail add thumbnail. stream mode is unnecessary for thumbnail

func WithThumbnailFile added in v1.2.88

func WithThumbnailFile(fileName string) ChunkedUploadOption

WithThumbnailFile add thumbnail from file. stream mode is unnecessary for thumbnail

type ChunkedUploadProgressStorer added in v1.2.88

type ChunkedUploadProgressStorer interface {
	// Load load upload progress by id
	Load(id string) *UploadProgress
	// Save save upload progress
	Save(up UploadProgress)
	// Remove remove upload progress by id
	Remove(id string) error
}

ChunkedUploadProgressStorer load and save upload progress

type CollaboratorRequest

type CollaboratorRequest struct {
	// contains filtered or unexported fields
}

func (*CollaboratorRequest) RemoveCollaboratorFromBlobbers

func (req *CollaboratorRequest) RemoveCollaboratorFromBlobbers() bool

func (*CollaboratorRequest) UpdateCollaboratorToBlobbers

func (req *CollaboratorRequest) UpdateCollaboratorToBlobbers() bool

type CommitFolderData

type CommitFolderData struct {
	OpType    string
	PreValue  string
	CurrValue string
}

type CommitFolderResponse

type CommitFolderResponse struct {
	TxnID string
	Data  *CommitFolderData
}

type CommitMetaData

type CommitMetaData struct {
	CrudType string
	MetaData *ConsolidatedFileMeta
}

type CommitMetaRequest

type CommitMetaRequest struct {
	CommitMetaData
	// contains filtered or unexported fields
}

type CommitMetaResponse

type CommitMetaResponse struct {
	TxnID    string
	MetaData *ConsolidatedFileMeta
}

type CommitRequest

type CommitRequest struct {
	// contains filtered or unexported fields
}

type CommitResult

type CommitResult struct {
	Success      bool   `json:"success"`
	ErrorMessage string `json:"error_msg,omitempty"`
}

func ErrorCommitResult

func ErrorCommitResult(errMsg string) *CommitResult

func SuccessCommitResult

func SuccessCommitResult() *CommitResult

type Consensus

type Consensus struct {
	sync.RWMutex
	// contains filtered or unexported fields
}

func (*Consensus) Done added in v1.2.88

func (req *Consensus) Done()

Done increase consensus by 1

func (*Consensus) Reset added in v1.2.88

func (req *Consensus) Reset()

Reset reset consensus to 0

type ConsolidatedFileMeta

type ConsolidatedFileMeta struct {
	Name            string
	Type            string
	Path            string
	LookupHash      string
	Hash            string
	MimeType        string
	Size            int64
	ActualFileSize  int64
	ActualNumBlocks int64
	EncryptedKey    string
	CommitMetaTxns  []fileref.CommitMetaTxn
	Collaborators   []fileref.Collaborator
	Attributes      fileref.Attributes
}

type CopyRequest

type CopyRequest struct {
	Consensus
	// contains filtered or unexported fields
}

func (*CopyRequest) ProcessCopy

func (req *CopyRequest) ProcessCopy() error

type DeleteRequest

type DeleteRequest struct {
	Consensus
	// contains filtered or unexported fields
}

func (*DeleteRequest) ProcessDelete

func (req *DeleteRequest) ProcessDelete() error

type DirRequest

type DirRequest struct {
	Consensus
	// contains filtered or unexported fields
}

func (*DirRequest) ProcessDir

func (req *DirRequest) ProcessDir(a *Allocation) error

type DownloadOption added in v1.4.3

type DownloadOption func(do *DownloadOptions)

DownloadOption set download option

func WithAllocation added in v1.4.3

func WithAllocation(obj *Allocation) DownloadOption

func WithAuthticket added in v1.4.3

func WithAuthticket(authTicket, lookupHash string) DownloadOption

func WithBlocks added in v1.4.3

func WithBlocks(start, end int64, blocksPerMarker int) DownloadOption

func WithOnlyThumbnail added in v1.4.3

func WithOnlyThumbnail(thumbnail bool) DownloadOption

func WithRxPay added in v1.4.3

func WithRxPay(rxPay bool) DownloadOption

type DownloadOptions added in v1.4.3

type DownloadOptions struct {
	// contains filtered or unexported fields
}

DownloadOptions download options

type DownloadRequest

type DownloadRequest struct {
	Consensus
	// contains filtered or unexported fields
}

type DownloadRequestHeader added in v1.7.2

type DownloadRequestHeader struct {
	PathHash     string
	Path         string
	BlockNum     int64
	NumBlocks    int64
	ReadMarker   []byte
	AuthToken    []byte
	RxPay        bool
	DownloadMode string
}

DownloadRequestHeader download request header

func (*DownloadRequestHeader) ToHeader added in v1.7.2

func (h *DownloadRequestHeader) ToHeader(req *http.Request)

ToHeader update header

type Downloader added in v1.4.3

type Downloader interface {
	GetAllocation() *Allocation
	Start(status StatusCallback) error
}

Downloader downloader for file, blocks and thumbnail

func CreateDownloader added in v1.4.3

func CreateDownloader(allocationID, localPath, remotePath string, opts ...DownloadOption) (Downloader, error)

CreateDownloader create a downloander

type FfmpegRecorder added in v1.2.88

type FfmpegRecorder struct {
	// contains filtered or unexported fields
}

FfmpegRecorder wrap ffmpeg command to capture video and audio from local camera and microphone

func CreateFfmpegRecorder added in v1.2.88

func CreateFfmpegRecorder(file string, delay int) (*FfmpegRecorder, error)

CreateFfmpegRecorder create a ffmpeg commander to capture video and audio local camera and microphone

func (*FfmpegRecorder) Close added in v1.2.88

func (r *FfmpegRecorder) Close() error

Close implements io.Closer

func (*FfmpegRecorder) GetClipsFile added in v1.2.88

func (r *FfmpegRecorder) GetClipsFile(clipsIndex int) string

GetClipsFile get clips file

func (*FfmpegRecorder) GetClipsFileName added in v1.2.88

func (r *FfmpegRecorder) GetClipsFileName(clipsIndex int) string

GetClipsFileName get clips file name

func (*FfmpegRecorder) GetFileContentType added in v1.2.88

func (r *FfmpegRecorder) GetFileContentType() (string, error)

GetFileContentType get MIME type

func (*FfmpegRecorder) Read added in v1.2.88

func (r *FfmpegRecorder) Read(p []byte) (int, error)

Read implements io.Raader

func (*FfmpegRecorder) Size added in v1.2.88

func (r *FfmpegRecorder) Size() int64

Size get current clips size

type FileDiff

type FileDiff struct {
	Op         string             `json:"operation"`
	Path       string             `json:"path"`
	Type       string             `json:"type"`
	Attributes fileref.Attributes `json:"attributes"`
}

type FileMeta added in v1.0.3

type FileMeta struct {
	// Mimetype mime type of source file
	MimeType string

	// Path local path of source file
	Path string
	// ThumbnailPath local path of source thumbnail
	ThumbnailPath string

	// ActualHash hash of original file (un-encoded, un-encrypted)
	ActualHash string
	// ActualSize total bytes of  original file (unencoded, un-encrypted).  it is 0 if input is live stream.
	ActualSize int64
	// ActualThumbnailSize total bytes of original thumbnail (un-encoded, un-encrypted)
	ActualThumbnailSize int64
	// ActualThumbnailHash hash of original thumbnail (un-encoded, un-encrypted)
	ActualThumbnailHash string

	//RemoteName remote file name
	RemoteName string
	// RemotePath remote path
	RemotePath string
	// Attributes file attributes in blockchain
	Attributes fileref.Attributes
}

FileMeta metadata of stream input/local

func (*FileMeta) FileID added in v1.2.88

func (meta *FileMeta) FileID() string

FileID generate id of progress on local cache

type FileNameBuilder added in v1.2.88

type FileNameBuilder interface {
	OutDir() string
	FileExt() string
	OutFile() string
	ClipsFile(index int) string
	ClipsFileName(index int) string
}

FileNameBuilder build file name based output format

type FileStats

type FileStats struct {
	Name                     string    `json:"name"`
	Size                     int64     `json:"size"`
	PathHash                 string    `json:"path_hash"`
	Path                     string    `json:"path"`
	NumBlocks                int64     `json:"num_of_blocks"`
	NumUpdates               int64     `json:"num_of_updates"`
	NumBlockDownloads        int64     `json:"num_of_block_downloads"`
	SuccessChallenges        int64     `json:"num_of_challenges"`
	FailedChallenges         int64     `json:"num_of_failed_challenges"`
	LastChallengeResponseTxn string    `json:"last_challenge_txn"`
	WriteMarkerRedeemTxn     string    `json:"write_marker_txn"`
	BlobberID                string    `json:"blobber_id"`
	BlobberURL               string    `json:"blobber_url"`
	BlockchainAware          bool      `json:"blockchain_aware"`
	CreatedAt                time.Time `json:"CreatedAt"`
}

type Hasher added in v1.2.88

type Hasher interface {

	// GetFileHash get file hash
	GetFileHash() (string, error)
	// WriteToFile write bytes to file hasher
	WriteToFile(buf []byte, chunkIndex int) error

	// GetChallengeHash get challenge hash
	GetChallengeHash() (string, error)
	// WriteToChallenge write bytes to challenge hasher
	WriteToChallenge(buf []byte, chunkIndex int) error

	// GetContentHash get content hash
	GetContentHash() (string, error)
	// WriteHashToContent write hash leaf to content hasher
	WriteHashToContent(hash string, chunkIndex int) error
}

func CreateHasher added in v1.2.88

func CreateHasher(chunkSize int) Hasher

CreateHasher creat Hasher instance

type InputMap added in v1.2.86

type InputMap struct {
	Fields map[string]interface{} `json:"fields"`
}

func GetStorageSCConfig

func GetStorageSCConfig() (conf *InputMap, err error)

type ListRequest

type ListRequest struct {
	Consensus
	// contains filtered or unexported fields
}

func (*ListRequest) GetListFromBlobbers

func (req *ListRequest) GetListFromBlobbers() *ListResult

type ListResult

type ListResult struct {
	Name            string             `json:"name"`
	Path            string             `json:"path,omitempty"`
	Type            string             `json:"type"`
	Size            int64              `json:"size"`
	Hash            string             `json:"hash,omitempty"`
	MimeType        string             `json:"mimetype,omitempty"`
	NumBlocks       int64              `json:"num_blocks"`
	LookupHash      string             `json:"lookup_hash"`
	EncryptionKey   string             `json:"encryption_key"`
	Attributes      fileref.Attributes `json:"attributes"`
	ActualSize      int64              `json:"actual_size"`
	ActualNumBlocks int64              `json:"actual_num_blocks"`
	CreatedAt       string             `json:"created_at"`
	UpdatedAt       string             `json:"updated_at"`
	Children        []*ListResult      `json:"list"`
	Consensus       `json:"-"`
}

type LiveMeta added in v1.2.88

type LiveMeta struct {
	// Mimetype mime type of source file
	MimeType string

	//RemoteName remote file name
	RemoteName string
	// RemotePath remote path
	RemotePath string
	// Attributes file attributes in blockchain
	Attributes fileref.Attributes
}

LiveMeta metadata of live stream input

type LiveUpload added in v1.2.88

type LiveUpload struct {
	// contains filtered or unexported fields
}

LiveUpload live streaming video upload manager

func CreateLiveUpload added in v1.2.88

func CreateLiveUpload(homedir string, allocationObj *Allocation, liveMeta LiveMeta, liveReader LiveUploadReader, opts ...LiveUploadOption) *LiveUpload

CreateLiveUpload create a LiveChunkedUpload instance

func (*LiveUpload) Start added in v1.2.88

func (lu *LiveUpload) Start() error

Start start live streaming upload

type LiveUploadOption added in v1.2.88

type LiveUploadOption func(lu *LiveUpload)

LiveUploadOption set live upload option

func WithLiveChunkSize added in v1.2.88

func WithLiveChunkSize(size int) LiveUploadOption

WithLiveChunkSize set custom chunk size. ignore if size <=0

func WithLiveDelay added in v1.2.88

func WithLiveDelay(delaySeconds int) LiveUploadOption

WithLiveDelay set delayed . ignore if delayed <=0

func WithLiveEncrypt added in v1.2.88

func WithLiveEncrypt(status bool) LiveUploadOption

WithLiveEncrypt trun on/off encrypt on upload. It is turn off as default.

func WithLiveStatusCallback added in v1.2.88

func WithLiveStatusCallback(callback func() StatusCallback) LiveUploadOption

WithLiveStatusCallback register StatusCallback instance

type LiveUploadReader added in v1.2.88

type LiveUploadReader interface {
	io.Reader
	Size() int64
	GetClipsFile(clipsIndex int) string
	GetClipsFileName(cliipsIndex int) string
}

LiveUploadReader implements io.Reader and Size for live stream upload

type M3u8Writer added in v1.2.88

type M3u8Writer interface {
	io.WriteSeeker
	Truncate(size int64) error
	Sync() error
}

M3u8Writer m3u8 writer

type MediaPlaylist added in v1.2.88

type MediaPlaylist struct {
	// contains filtered or unexported fields
}

MediaPlaylist m3u8 encoder and decoder

func NewMediaPlaylist added in v1.2.88

func NewMediaPlaylist(delay int, dir string, writer M3u8Writer) *MediaPlaylist

NewMediaPlaylist create media playlist(.m3u8)

func (*MediaPlaylist) Append added in v1.2.88

func (m *MediaPlaylist) Append(item string)

Append append new item

func (*MediaPlaylist) Encode added in v1.2.88

func (m *MediaPlaylist) Encode() []byte

Encode encode m3u8

func (*MediaPlaylist) Play added in v1.2.88

func (m *MediaPlaylist) Play()

Play start to push item into playlist

func (*MediaPlaylist) String added in v1.2.88

func (m *MediaPlaylist) String() string

String implement Stringer

type Network

type Network struct {
	Miners   []string `json:"miners"`
	Sharders []string `json:"sharders"`
}

func GetNetwork

func GetNetwork() *Network

func GetNetworkDetails

func GetNetworkDetails() (*Network, error)

type ORef added in v1.3.0

type ORef struct {
	SimilarField
	ID        int64     `json:"id"`
	CreatedAt time.Time `json:"created_at"` //It cannot be considered for consensus calculation as blobbers can have
	UpdatedAt time.Time `json:"updated_at"` //minor difference and will fail in concensus
}

Blobber response will be different from each other so we should only consider similar fields i.e. we cannot calculate hash of response and have consensus on it

type ObjectTreeRequest added in v1.3.0

type ObjectTreeRequest struct {
	Consensus
	// contains filtered or unexported fields
}

func (*ObjectTreeRequest) GetRefs added in v1.3.0

func (o *ObjectTreeRequest) GetRefs() (*ObjectTreeResult, error)

Paginated tree should not be collected as this will stall the client It should rather be handled by application that uses gosdk

type ObjectTreeResult added in v1.3.0

type ObjectTreeResult struct {
	TotalPages int64               `json:"total_pages"`
	OffsetPath string              `json:"offset_path"`
	OffsetDate string              `json:"offset_date"`
	Refs       []ORef              `json:"refs"`
	LatestWM   *marker.WriteMarker `json:"latest_write_marker"`
}

type PriceRange

type PriceRange struct {
	Min int64 `json:"min"`
	Max int64 `json:"max"`
}

PriceRange represents a price range allowed by user to filter blobbers.

func (*PriceRange) IsValid

func (pr *PriceRange) IsValid() bool

IsValid price range.

type ProviderType added in v1.7.1

type ProviderType int
const (
	ProviderMiner ProviderType = iota
	ProviderSharder
	ProviderBlobber
	ProviderValidator
	ProviderAuthorizer
)

type ReferencePathResult

type ReferencePathResult struct {
	*fileref.ReferencePath
	LatestWM *marker.WriteMarker `json:"latest_write_marker"`
}

type RenameRequest

type RenameRequest struct {
	Consensus
	// contains filtered or unexported fields
}

func (*RenameRequest) ProcessRename

func (req *RenameRequest) ProcessRename() error

type RepairRequest

type RepairRequest struct {
	// contains filtered or unexported fields
}

type RepairStatusCB

type RepairStatusCB struct {
	// contains filtered or unexported fields
}

func (*RepairStatusCB) CommitMetaCompleted

func (cb *RepairStatusCB) CommitMetaCompleted(request, response string, txn *transaction.Transaction, err error)

func (*RepairStatusCB) Completed

func (cb *RepairStatusCB) Completed(allocationId, filePath string, filename string, mimetype string, size int, op int)

func (*RepairStatusCB) Error

func (cb *RepairStatusCB) Error(allocationID string, filePath string, op int, err error)

func (*RepairStatusCB) InProgress

func (cb *RepairStatusCB) InProgress(allocationId, filePath string, op int, completedBytes int, data []byte)

func (*RepairStatusCB) RepairCompleted

func (cb *RepairStatusCB) RepairCompleted(filesRepaired int)

func (*RepairStatusCB) Started

func (cb *RepairStatusCB) Started(allocationId, filePath string, op int, totalBytes int)

type ShareRequest

type ShareRequest struct {
	// contains filtered or unexported fields
}

func (*ShareRequest) GetFileRef

func (req *ShareRequest) GetFileRef() (*fileref.FileRef, error)

type SignalContext added in v1.3.0

type SignalContext struct {
	context.Context
}

SignalContext listen syscall signal to cancel context

type SimilarField added in v1.3.0

type SimilarField struct {
	Type                string `json:"type"`
	AllocationID        string `json:"allocation_id"`
	LookupHash          string `json:"lookup_hash"`
	Name                string `json:"name"`
	Path                string `json:"path"`
	PathHash            string `json:"path_hash"`
	ParentPath          string `json:"parent_path"`
	PathLevel           int    `json:"level"`
	Size                int64  `json:"size"`
	ActualFileSize      int64  `json:"actual_file_size"`
	ActualFileHash      string `json:"actual_file_hash"`
	MimeType            string `json:"mimetype"`
	ActualThumbnailSize int64  `json:"actual_thumbnail_size"`
	ActualThumbnailHash string `json:"actual_thumbnail_hash"`
}

type StakePoolDelegatePoolInfo

type StakePoolDelegatePoolInfo struct {
	ID         common.Key     `json:"id"`          // blobber ID
	Balance    common.Balance `json:"balance"`     // current balance
	DelegateID common.Key     `json:"delegate_id"` // wallet
	Rewards    common.Balance `json:"rewards"`     // current
	UnStake    bool           `json:"unstake"`     // want to unstake

	TotalReward  common.Balance `json:"total_reward"`
	TotalPenalty common.Balance `json:"total_penalty"`
	Status       string         `json:"status"`
	RoundCreated int64          `json:"round_created"`
}

StakePoolDelegatePoolInfo represents delegate pool of a stake pool info.

type StakePoolInfo

type StakePoolInfo struct {
	ID      common.Key     `json:"pool_id"` // pool ID
	Balance common.Balance `json:"balance"` // total balance
	Unstake common.Balance `json:"unstake"` // total unstake amount

	Free       int64          `json:"free"`        // free staked space
	Capacity   int64          `json:"capacity"`    // blobber bid
	WritePrice common.Balance `json:"write_price"` // its write price

	OffersTotal  common.Balance `json:"offers_total"` //
	UnstakeTotal common.Balance `json:"unstake_total"`
	// delegate pools
	Delegate []StakePoolDelegatePoolInfo `json:"delegate"`
	Penalty  common.Balance              `json:"penalty"` // total for all
	// rewards
	Rewards common.Balance `json:"rewards"`

	// Settings of the stake pool
	Settings StakePoolSettings `json:"settings"`
}

StakePool full info.

func GetStakePoolInfo

func GetStakePoolInfo(blobberID string) (info *StakePoolInfo, err error)

GetStakePoolInfo for given client, or, if the given clientID is empty, for current client of the sdk.

type StakePoolOfferInfo

type StakePoolOfferInfo struct {
	Lock         common.Balance   `json:"lock"`
	Expire       common.Timestamp `json:"expire"`
	AllocationID common.Key       `json:"allocation_id"`
	IsExpired    bool             `json:"is_expired"`
}

StakePoolOfferInfo represents stake pool offer information.

type StakePoolRewardsInfo

type StakePoolRewardsInfo struct {
	Charge    common.Balance `json:"charge"`    // total for all time
	Blobber   common.Balance `json:"blobber"`   // total for all time
	Validator common.Balance `json:"validator"` // total for all time
}

StakePoolRewardsInfo represents stake pool rewards.

type StakePoolSettings

type StakePoolSettings struct {
	// DelegateWallet for pool owner.
	DelegateWallet string `json:"delegate_wallet"`
	// MinStake allowed.
	MinStake common.Balance `json:"min_stake"`
	// MaxStake allowed.
	MaxStake common.Balance `json:"max_stake"`
	// NumDelegates maximum allowed.
	NumDelegates int `json:"num_delegates"`
	// ServiceCharge is blobber service charge.
	ServiceCharge float64 `json:"service_charge"`
}

StakePoolSettings information.

type StakePoolUnlockUnstake

type StakePoolUnlockUnstake struct {
	// one of the fields is set in a response, the Unstake if can't unstake
	// for now and the TokenPoolTransferResponse if has a pool had unlocked
	Unstake bool  `json:"unstake"` // max time to wait to unstake
	Balance int64 `json:"balance"`
}

StakePoolUnlockUnstake is stake pool unlock response in case where tokens can't be unlocked due to opened offers.

type StakePoolUserInfo

type StakePoolUserInfo struct {
	Pools map[common.Key][]*StakePoolDelegatePoolInfo `json:"pools"`
}

StakePoolUserInfo represents user stake pools statistic.

func GetStakePoolUserInfo

func GetStakePoolUserInfo(clientID string) (info *StakePoolUserInfo, err error)

GetStakePoolUserInfo obtains blobbers/validators delegate pools statistic for a user. If given clientID is empty string, then current client used.

type StatusCallback

type StatusCallback interface {
	Started(allocationId, filePath string, op int, totalBytes int)
	InProgress(allocationId, filePath string, op int, completedBytes int, data []byte)
	Error(allocationID string, filePath string, op int, err error)
	Completed(allocationId, filePath string, filename string, mimetype string, size int, op int)
	CommitMetaCompleted(request, response string, txn *transaction.Transaction, err error)
	RepairCompleted(filesRepaired int)
}

type Terms

type Terms struct {
	ReadPrice               common.Balance `json:"read_price"`  // tokens / GB
	WritePrice              common.Balance `json:"write_price"` // tokens / GB
	MinLockDemand           float64        `json:"min_lock_demand"`
	MaxOfferDuration        time.Duration  `json:"max_offer_duration"`
	ChallengeCompletionTime time.Duration  `json:"challenge_completion_time"`
}

Terms represents Blobber terms. A Blobber can update its terms, but any existing offer will use terms of offer signing time.

type UploadBlobberStatus added in v1.2.88

type UploadBlobberStatus struct {
	Hasher Hasher

	// UploadLength total bytes that has been uploaded to blobbers
	UploadLength int64 `json:"upload_length,omitempty"`
}

UploadBlobberStatus the status of blobber's upload progress

func (*UploadBlobberStatus) UnmarshalJSON added in v1.5.2

func (s *UploadBlobberStatus) UnmarshalJSON(b []byte) error

type UploadFileMeta

type UploadFileMeta struct {
	// Name remote file name
	Name string
	// Path remote path
	Path string
	// Hash hash of entire source file
	Hash     string
	MimeType string
	// Size total bytes of entire source file
	Size int64

	// ThumbnailSize total bytes of entire thumbnail
	ThumbnailSize int64
	// ThumbnailHash hash code of entire thumbnail
	ThumbnailHash string

	// Attributes file attributes in blockchain
	Attributes fileref.Attributes
}

type UploadFormData added in v1.2.88

type UploadFormData struct {
	ConnectionID string `json:"connection_id,omitempty"`
	// Filename remote file name
	Filename string `json:"filename,omitempty"`
	// Path remote path
	Path string `json:"filepath,omitempty"`

	// ContentHash hash of shard data (encoded,encrypted) when it is last chunk. it is ChunkHash if it is not last chunk.
	ContentHash string `json:"content_hash,omitempty"`
	// Hash hash of shard thumbnail  (encoded,encrypted)
	ThumbnailContentHash string `json:"thumbnail_content_hash,omitempty"`

	// ChallengeHash challenge hash of shard data (encoded, encrypted)
	ChallengeHash string `json:"merkle_root,omitempty"`

	// ActualHash hash of original file (un-encoded, un-encrypted)
	ActualHash string `json:"actual_hash,omitempty"`
	// ActualSize total bytes of original file (un-encoded, un-encrypted)
	ActualSize int64 `json:"actual_size,omitempty"`
	// ActualThumbnailSize total bytes of original thumbnail (un-encoded, un-encrypted)
	ActualThumbSize int64 `json:"actual_thumb_size,omitempty"`
	// ActualThumbnailHash hash of original thumbnail (un-encoded, un-encrypted)
	ActualThumbHash string `json:"actual_thumb_hash,omitempty"`

	MimeType     string             `json:"mimetype,omitempty"`
	CustomMeta   string             `json:"custom_meta,omitempty"`
	EncryptedKey string             `json:"encrypted_key,omitempty"`
	Attributes   fileref.Attributes `json:"attributes,omitempty"`

	IsFinal      bool   `json:"is_final,omitempty"`      // current chunk is last or not
	ChunkHash    string `json:"chunk_hash"`              // hash of current chunk
	ChunkIndex   int    `json:"chunk_index,omitempty"`   // the seq of current chunk. all chunks MUST be uploaded one by one because of streaming merkle hash
	ChunkSize    int64  `json:"chunk_size,omitempty"`    // the size of a chunk. 64*1024 is default
	UploadOffset int64  `json:"upload_offset,omitempty"` // It is next position that new incoming chunk should be append to

}

UploadFormData form data of upload

type UploadProgress added in v1.2.88

type UploadProgress struct {
	ID string `json:"id"`

	// ChunkSize size of chunk
	ChunkSize int64 `json:"chunk_size,omitempty"`
	// EncryptOnUpload encrypt data on upload or not
	EncryptOnUpload   bool   `json:"is_encrypted,omitempty"`
	EncryptPrivateKey string `json:"-"`

	// ConnectionID chunked upload connection_id
	ConnectionID string `json:"connection_id,omitempty"`
	// ChunkIndex index of last updated chunk
	ChunkIndex int `json:"chunk_index,omitempty"`
	// UploadLength total bytes that has been uploaded to blobbers
	UploadLength int64 `json:"-"`

	Blobbers []*UploadBlobberStatus `json:"merkle_hashers,omitempty"`
}

UploadProgress progress of upload

type UploadRequest

type UploadRequest struct {
	Consensus
	// contains filtered or unexported fields
}

func (*UploadRequest) GetMaxBlobbersSupported

func (req *UploadRequest) GetMaxBlobbersSupported() int

func (*UploadRequest) IsFullConsensusSupported

func (req *UploadRequest) IsFullConsensusSupported() bool

type UploadResult added in v1.2.88

type UploadResult struct {
	Filename   string `json:"filename"`
	ShardSize  int64  `json:"size"`
	Hash       string `json:"content_hash,omitempty"`
	MerkleRoot string `json:"merkle_root,omitempty"`
}

type WMLockResult added in v1.7.3

type WMLockResult struct {
	Status    WMLockStatus `json:"status,omitempty"`
	CreatedAt int64        `json:"created_at,omitempty"`
}

type WMLockStatus added in v1.7.3

type WMLockStatus int

WMLockStatus

const (
	WMLockStatusFailed WMLockStatus = iota
	WMLockStatusPending
	WMLockStatusOK
)

type WriteMarkerMutex added in v1.7.3

type WriteMarkerMutex struct {
	// contains filtered or unexported fields
}

WriteMarkerMutex blobber WriteMarkerMutex client

func CreateWriteMarkerMutex added in v1.7.3

func CreateWriteMarkerMutex(client *client.Client, allocationObj *Allocation) (*WriteMarkerMutex, error)

CreateWriteMarkerMutex create WriteMarkerMutex for allocation

func (*WriteMarkerMutex) GetRootHashnode added in v1.7.3

func (m *WriteMarkerMutex) GetRootHashnode(ctx context.Context, blobberBaseUrl string) (*fileref.Hashnode, error)

GetRootHashnode get root hash node from blobber

func (*WriteMarkerMutex) Lock added in v1.7.3

func (m *WriteMarkerMutex) Lock(ctx context.Context, connectionID string) error

Lock acquire WriteMarker lock from blobbers

func (*WriteMarkerMutex) Unlock added in v1.7.3

func (m *WriteMarkerMutex) Unlock(ctx context.Context, connectionID string) error

Unlock release WriteMarker lock on blobbers

type YoutubeDL added in v1.2.88

type YoutubeDL struct {
	// contains filtered or unexported fields
}

YoutubeDL wrap youtube-dl to download video from youtube

func CreateYoutubeDL added in v1.2.88

func CreateYoutubeDL(ctx context.Context, localPath string, feedURL string, downloadArgs []string, ffmpegArgs []string, delay int) (*YoutubeDL, error)

CreateYoutubeDL create a youtube-dl instance to download video file from youtube

func (*YoutubeDL) Close added in v1.2.88

func (r *YoutubeDL) Close() error

Close implements io.Closer

func (*YoutubeDL) GetClipsFile added in v1.2.88

func (r *YoutubeDL) GetClipsFile(clipsIndex int) string

GetClipsFile get clips file

func (*YoutubeDL) GetClipsFileName added in v1.2.88

func (r *YoutubeDL) GetClipsFileName(clipsIndex int) string

GetClipsFileName get clips file name

func (*YoutubeDL) GetFileContentType added in v1.2.88

func (r *YoutubeDL) GetFileContentType() (string, error)

GetFileContentType get MIME type

func (*YoutubeDL) Read added in v1.2.88

func (r *YoutubeDL) Read(p []byte) (int, error)

Read implements io.Raader

func (*YoutubeDL) Size added in v1.2.88

func (r *YoutubeDL) Size() int64

Size get current clips size

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL