Documentation ¶
Overview ¶
Package accounting providers an accounting and limiting reader
Index ¶
- Variables
- func LimitTPS(ctx context.Context)
- func Start(ctx context.Context)
- func StartLimitTPS(ctx context.Context)
- func StatsGroupFromContext(ctx context.Context) (string, bool)
- func WithStatsGroup(parent context.Context, group string) context.Context
- type Account
- func (acc *Account) Abandon()
- func (acc *Account) AccountRead(n int) (err error)
- func (acc *Account) Close() error
- func (acc *Account) Done()
- func (acc *Account) DryRun(n int64)
- func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader
- func (acc *Account) GetReader() io.ReadCloser
- func (acc *Account) HasBuffer() bool
- func (acc *Account) OldStream() io.Reader
- func (acc *Account) Read(p []byte) (n int, err error)
- func (acc *Account) ServerSideCopyEnd(n int64)
- func (acc *Account) ServerSideMoveEnd(n int64)
- func (acc *Account) ServerSideTransferEnd(n int64)
- func (acc *Account) ServerSideTransferStart()
- func (acc *Account) SetStream(in io.Reader)
- func (acc *Account) StopBuffering()
- func (acc *Account) String() string
- func (acc *Account) UpdateReader(ctx context.Context, in io.ReadCloser)
- func (acc *Account) WithBuffer() *Account
- func (acc *Account) WrapStream(in io.Reader) io.Reader
- func (acc *Account) WriteTo(w io.Writer) (n int64, err error)
- type Accounter
- type RcloneCollector
- type StatsInfo
- func (s *StatsInfo) AddServerSideCopy(n int64)
- func (s *StatsInfo) AddServerSideMove(n int64)
- func (s *StatsInfo) AddTransfer(transfer *Transfer)
- func (s *StatsInfo) Bytes(bytes int64)
- func (s *StatsInfo) BytesNoNetwork(bytes int64)
- func (s *StatsInfo) DeleteFile(ctx context.Context, size int64) error
- func (s *StatsInfo) DeletedDirs(deletedDirs int64) int64
- func (s *StatsInfo) DoneChecking(remote string)
- func (s *StatsInfo) DoneTransferring(remote string, ok bool)
- func (s *StatsInfo) Error(err error) error
- func (s *StatsInfo) Errored() bool
- func (s *StatsInfo) Errors(errors int64)
- func (s *StatsInfo) FatalError()
- func (s *StatsInfo) GetBytes() int64
- func (s *StatsInfo) GetBytesWithPending() int64
- func (s *StatsInfo) GetChecks() int64
- func (s *StatsInfo) GetDeletes() int64
- func (s *StatsInfo) GetErrors() int64
- func (s *StatsInfo) GetLastError() error
- func (s *StatsInfo) GetTransfers() int64
- func (s *StatsInfo) HadFatalError() bool
- func (s *StatsInfo) HadRetryError() bool
- func (s *StatsInfo) Log()
- func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry, what string) *Transfer
- func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer
- func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64, srcFs, dstFs fs.Fs) *Transfer
- func (s *StatsInfo) PruneTransfers()
- func (s *StatsInfo) RemoteStats() (out rc.Params, err error)
- func (s *StatsInfo) RemoveTransfer(transfer *Transfer)
- func (s *StatsInfo) Renames(renames int64) int64
- func (s *StatsInfo) ResetCounters()
- func (s *StatsInfo) ResetErrors()
- func (s *StatsInfo) RetryAfter() time.Time
- func (s *StatsInfo) RetryError()
- func (s *StatsInfo) SetCheckQueue(n int, size int64)
- func (s *StatsInfo) SetRenameQueue(n int, size int64)
- func (s *StatsInfo) SetTransferQueue(n int, size int64)
- func (s *StatsInfo) String() string
- func (s *StatsInfo) Transferred() []TransferSnapshot
- type TokenBucketSlot
- type Transfer
- func (tr *Transfer) Account(ctx context.Context, in io.ReadCloser) *Account
- func (tr *Transfer) Done(ctx context.Context, err error)
- func (tr *Transfer) IsDone() bool
- func (tr *Transfer) Reset(ctx context.Context)
- func (tr *Transfer) Snapshot() TransferSnapshot
- func (tr *Transfer) TimeRange() (time.Time, time.Time)
- type TransferSnapshot
- type WrapFn
Constants ¶
This section is empty.
Variables ¶
var ErrorMaxTransferLimitReached = errors.New("max transfer limit reached as set by --max-transfer")
ErrorMaxTransferLimitReached defines error when transfer limit is reached. Used for checking on exit and matching to correct exit code.
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
ErrorMaxTransferLimitReachedFatal is returned from Read when the max transfer limit is reached.
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max transfer limit is reached and a graceful stop is required.
var MaxCompletedTransfers = 100
MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
var TokenBucket tokenBucket
TokenBucket holds the global token bucket limiter
Functions ¶
func LimitTPS ¶ added in v1.54.0
LimitTPS limits the number of transactions per second if enabled. It should be called once per transaction.
func StartLimitTPS ¶ added in v1.54.0
StartLimitTPS starts the token bucket for transactions per second limiting if necessary
func StatsGroupFromContext ¶
StatsGroupFromContext returns group from the context if it's available. Returns false if group is empty.
Types ¶
type Account ¶
type Account struct {
// contains filtered or unexported fields
}
Account limits and accounts for one transfer
func UnWrapAccounting ¶ added in v1.64.0
UnWrapAccounting unwraps a reader returning unwrapped and acc a pointer to the accounting.
The caller is expected to manage the accounting at this point.
func (*Account) Abandon ¶ added in v1.53.0
func (acc *Account) Abandon()
Abandon stops the async buffer doing any more buffering
func (*Account) AccountRead ¶
AccountRead account having read n bytes
func (*Account) Done ¶ added in v1.49.4
func (acc *Account) Done()
Done with accounting - must be called to free accounting goroutine
func (*Account) DryRun ¶ added in v1.54.0
DryRun accounts for statistics without running the operation
func (*Account) GetAsyncReader ¶
func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader
GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
func (*Account) GetReader ¶
func (acc *Account) GetReader() io.ReadCloser
GetReader returns the underlying io.ReadCloser under any Buffer
func (*Account) HasBuffer ¶ added in v1.53.0
HasBuffer - returns true if this Account has an AsyncReader with a buffer
func (*Account) ServerSideCopyEnd ¶ added in v1.50.0
ServerSideCopyEnd accounts for a read of n bytes in a sever side copy
func (*Account) ServerSideMoveEnd ¶ added in v1.64.0
ServerSideMoveEnd accounts for a read of n bytes in a sever side move
func (*Account) ServerSideTransferEnd ¶ added in v1.64.0
ServerSideTransferEnd accounts for a read of n bytes in a sever side transfer to be treated as a normal transfer.
func (*Account) ServerSideTransferStart ¶ added in v1.64.0
func (acc *Account) ServerSideTransferStart()
ServerSideTransferStart should be called at the start of a server-side transfer
This pretends a transfer has started
func (*Account) StopBuffering ¶
func (acc *Account) StopBuffering()
StopBuffering stops the async buffer doing any more buffering
func (*Account) UpdateReader ¶
func (acc *Account) UpdateReader(ctx context.Context, in io.ReadCloser)
UpdateReader updates the underlying io.ReadCloser stopping the async buffer (if any) and re-adding it
func (*Account) WithBuffer ¶
WithBuffer - If the file is above a certain size it adds an Async reader
func (*Account) WrapStream ¶
WrapStream wraps an io Reader so it will be accounted in the same way as account
type Accounter ¶
type Accounter interface { io.Reader OldStream() io.Reader SetStream(io.Reader) WrapStream(io.Reader) io.Reader }
Accounter accounts a stream allowing the accounting to be removed and re-added
type RcloneCollector ¶ added in v1.52.0
type RcloneCollector struct {
// contains filtered or unexported fields
}
RcloneCollector is a Prometheus collector for Rclone
func NewRcloneCollector ¶ added in v1.52.0
func NewRcloneCollector(ctx context.Context) *RcloneCollector
NewRcloneCollector make a new RcloneCollector
func (*RcloneCollector) Collect ¶ added in v1.52.0
func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric)
Collect is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
func (*RcloneCollector) Describe ¶ added in v1.52.0
func (c *RcloneCollector) Describe(ch chan<- *prometheus.Desc)
Describe is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
type StatsInfo ¶
type StatsInfo struct {
// contains filtered or unexported fields
}
StatsInfo accounts all transfers N.B.: if this struct is modified, please remember to also update sum() function in stats_groups to correctly count the updated fields
func GlobalStats ¶
func GlobalStats() *StatsInfo
GlobalStats returns special stats used for global accounting.
func NewStatsGroup ¶
NewStatsGroup creates new stats under named group.
func StatsGroup ¶
StatsGroup gets stats by group name.
func (*StatsInfo) AddServerSideCopy ¶ added in v1.64.0
AddServerSideCopy counts a server side copy
func (*StatsInfo) AddServerSideMove ¶ added in v1.64.0
AddServerSideMove counts a server side move
func (*StatsInfo) AddTransfer ¶
AddTransfer adds reference to the started transfer.
func (*StatsInfo) BytesNoNetwork ¶ added in v1.65.1
BytesNoNetwork updates the stats for bytes bytes but doesn't include the transfer stats
func (*StatsInfo) DeleteFile ¶ added in v1.62.0
DeleteFile updates the stats for deleting a file
It may return fatal errors if the threshold for --max-delete or --max-delete-size have been reached.
func (*StatsInfo) DeletedDirs ¶ added in v1.54.0
DeletedDirs updates the stats for deletedDirs
func (*StatsInfo) DoneChecking ¶
DoneChecking removes a check from the stats
func (*StatsInfo) DoneTransferring ¶
DoneTransferring removes a transfer from the stats
if ok is true and it was in the transfermap (to avoid incrementing in case of nested calls, #6213) then it increments the transfers count
func (*StatsInfo) Error ¶
Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError
func (*StatsInfo) GetBytesWithPending ¶ added in v1.52.0
GetBytesWithPending returns the number of bytes transferred and remaining transfers
func (*StatsInfo) GetDeletes ¶ added in v1.62.0
GetDeletes returns the number of deletes
func (*StatsInfo) GetLastError ¶
GetLastError returns the lastError
func (*StatsInfo) GetTransfers ¶
GetTransfers reads the number of transfers
func (*StatsInfo) HadFatalError ¶
HadFatalError returns whether there has been at least one FatalError
func (*StatsInfo) HadRetryError ¶
HadRetryError returns whether there has been at least one non-NoRetryError
func (*StatsInfo) NewCheckingTransfer ¶
NewCheckingTransfer adds a checking transfer to the stats, from the object.
func (*StatsInfo) NewTransfer ¶
NewTransfer adds a transfer to the stats from the object.
The obj is uses as the srcFs, the dstFs must be supplied
func (*StatsInfo) NewTransferRemoteSize ¶
NewTransferRemoteSize adds a transfer to the stats based on remote and size.
func (*StatsInfo) PruneTransfers ¶ added in v1.50.0
func (s *StatsInfo) PruneTransfers()
PruneTransfers makes sure there aren't too many old transfers by removing single finished transfer.
func (*StatsInfo) RemoteStats ¶
RemoteStats returns stats for rc
func (*StatsInfo) RemoveTransfer ¶ added in v1.50.0
RemoveTransfer removes a reference to the started transfer.
func (*StatsInfo) ResetCounters ¶
func (s *StatsInfo) ResetCounters()
ResetCounters sets the counters (bytes, checks, errors, transfers, deletes, renames) to 0 and resets lastError, fatalError and retryError
func (*StatsInfo) ResetErrors ¶
func (s *StatsInfo) ResetErrors()
ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
func (*StatsInfo) RetryAfter ¶
RetryAfter returns the time to retry after if it is set. It will be Zero if it isn't set.
func (*StatsInfo) SetCheckQueue ¶
SetCheckQueue sets the number of queued checks
func (*StatsInfo) SetRenameQueue ¶
SetRenameQueue sets the number of queued transfers
func (*StatsInfo) SetTransferQueue ¶
SetTransferQueue sets the number of queued transfers
func (*StatsInfo) Transferred ¶
func (s *StatsInfo) Transferred() []TransferSnapshot
Transferred returns list of all completed transfers including checked and failed ones.
type TokenBucketSlot ¶ added in v1.54.0
type TokenBucketSlot int
TokenBucketSlot is the type to select which token bucket to use
const ( TokenBucketSlotAccounting TokenBucketSlot = iota TokenBucketSlotTransportRx TokenBucketSlotTransportTx TokenBucketSlots )
Slots for the token bucket
type Transfer ¶
type Transfer struct {
// contains filtered or unexported fields
}
Transfer keeps track of initiated transfers and provides access to accounting functions. Transfer needs to be closed on completion.
func (*Transfer) Account ¶
Account returns reader that knows how to keep track of transfer progress.
func (*Transfer) Done ¶
Done ends the transfer. Must be called after transfer is finished to run proper cleanups.
func (*Transfer) Reset ¶ added in v1.50.0
Reset allows to switch the Account to another transfer method.
func (*Transfer) Snapshot ¶
func (tr *Transfer) Snapshot() TransferSnapshot
Snapshot produces stats for this account at point in time.
type TransferSnapshot ¶
type TransferSnapshot struct { Name string `json:"name"` Size int64 `json:"size"` Bytes int64 `json:"bytes"` Checked bool `json:"checked"` StartedAt time.Time `json:"started_at"` CompletedAt time.Time `json:"completed_at,omitempty"` Error error `json:"-"` Group string `json:"group"` SrcFs string `json:"srcFs,omitempty"` DstFs string `json:"dstFs,omitempty"` }
TransferSnapshot represents state of an account at point in time.
func (TransferSnapshot) MarshalJSON ¶
func (as TransferSnapshot) MarshalJSON() ([]byte, error)
MarshalJSON implements json.Marshaler interface.
type WrapFn ¶
WrapFn wraps an io.Reader (for accounting purposes usually)
func UnWrap ¶
UnWrap unwraps a reader returning unwrapped and wrap, a function to wrap it back up again. If `in` is an Accounter then this function will take the accounting unwrapped and wrap will put it back on again the new Reader passed in.
This allows functions which wrap io.Readers to move the accounting to the end of the wrapped chain of readers. This is very important if buffering is being introduced and if the Reader might be wrapped again.