Documentation ¶
Index ¶
- Variables
- type Config
- type Containment
- type Cursor
- type PathCollector
- func (collector *PathCollector) InlineSegment(ctx context.Context, path storj.Path, pointer *pb.Pointer) (err error)
- func (collector *PathCollector) RemoteObject(ctx context.Context, path storj.Path, pointer *pb.Pointer) (err error)
- func (collector *PathCollector) RemoteSegment(ctx context.Context, path storj.Path, pointer *pb.Pointer) (err error)
- type PendingAudit
- type Report
- type Reporter
- type Reservoir
- type ReservoirService
- type Service
- type Share
- type Stripe
- type Verifier
- func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, ...) (shares map[int]Share, err error)
- func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrderLimit, ...) (share Share, err error)
- func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report *Report, err error)
- func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[storj.NodeID]bool) (report *Report, err error)
Constants ¶
This section is empty.
Variables ¶
var ( // ContainError is the containment errs class ContainError = errs.Class("containment error") // ErrContainedNotFound is the errs class for when a pending audit isn't found ErrContainedNotFound = errs.Class("pending audit not found") // ErrContainDelete is the errs class for when a pending audit can't be deleted ErrContainDelete = errs.Class("unable to delete pending audit") // ErrAlreadyExists is the errs class for when a pending audit with the same nodeID but different share data already exists ErrAlreadyExists = errs.Class("pending audit already exists for nodeID") )
var ( errs.Class("not enough shares for successful audit") // ErrSegmentDeleted is the errs class when the audited segment was deleted during the audit ErrSegmentDeleted = errs.Class("segment deleted during audit") )ErrNotEnoughShares =
var Error = errs.Class("audit error")
Error is the default audit errs class
Functions ¶
This section is empty.
Types ¶
type Config ¶
type Config struct { MaxRetriesStatDB int `help:"max number of times to attempt updating a statdb batch" default:"3"` Interval time.Duration `help:"how frequently segments are audited" default:"30s"` MinBytesPerSecond memory.Size `help:"the minimum acceptable bytes that storage nodes can transfer per second to the satellite" default:"128B"` MinDownloadTimeout time.Duration `help:"the minimum duration for downloading a share from storage nodes before timing out" default:"25s"` MaxReverifyCount int `help:"limit above which we consider an audit is failed" default:"3"` Slots int `help:"number of reservoir slots allotted for nodes, currently capped at 2" default:"1"` }
Config contains configurable values for audit service
type Containment ¶
type Containment interface { Get(ctx context.Context, nodeID pb.NodeID) (*PendingAudit, error) IncrementPending(ctx context.Context, pendingAudit *PendingAudit) error Delete(ctx context.Context, nodeID pb.NodeID) (bool, error) }
Containment holds information about pending audits for contained nodes
type Cursor ¶
type Cursor struct {
// contains filtered or unexported fields
}
Cursor keeps track of audit location in pointer db
func (*Cursor) NextStripe ¶
NextStripe returns a random stripe to be audited. "more" is true except when we have completed iterating over metainfo. It can be disregarded if there is an error or stripe returned
type PathCollector ¶ added in v0.19.0
type PathCollector struct { Reservoirs map[storj.NodeID]*Reservoir // contains filtered or unexported fields }
PathCollector uses the metainfo loop to add paths to node reservoirs
func NewPathCollector ¶ added in v0.19.0
func NewPathCollector(reservoirSlots int, r *rand.Rand) *PathCollector
NewPathCollector instantiates a path collector
func (*PathCollector) InlineSegment ¶ added in v0.19.0
func (collector *PathCollector) InlineSegment(ctx context.Context, path storj.Path, pointer *pb.Pointer) (err error)
InlineSegment returns nil because we're only auditing for storage nodes for now
func (*PathCollector) RemoteObject ¶ added in v0.19.0
func (collector *PathCollector) RemoteObject(ctx context.Context, path storj.Path, pointer *pb.Pointer) (err error)
RemoteObject returns nil because the audit service does not interact with remote objects
func (*PathCollector) RemoteSegment ¶ added in v0.19.0
func (collector *PathCollector) RemoteSegment(ctx context.Context, path storj.Path, pointer *pb.Pointer) (err error)
RemoteSegment takes a remote segment found in metainfo and creates a reservoir for it if it doesn't exist already
type PendingAudit ¶
type PendingAudit struct { NodeID storj.NodeID PieceID storj.PieceID StripeIndex int64 ReverifyCount int32 Path storj.Path }
PendingAudit contains info needed for retrying an audit for a contained node
type Report ¶
type Report struct { Successes storj.NodeIDList Fails storj.NodeIDList Offlines storj.NodeIDList PendingAudits []*PendingAudit }
Report contains audit result lists for nodes that succeeded, failed, were offline, or have pending audits
type Reporter ¶
type Reporter struct {
// contains filtered or unexported fields
}
Reporter records audit reports in overlay and implements the reporter interface
func NewReporter ¶
func NewReporter(log *zap.Logger, overlay *overlay.Service, containment Containment, maxRetries int, maxReverifyCount int32) *Reporter
NewReporter instantiates a reporter
func (*Reporter) RecordAudits ¶
RecordAudits saves audit results to overlay. When no error, it returns nil for both return values, otherwise it returns the report with the fields set to the values which have been saved and the error.
type Reservoir ¶ added in v0.19.0
type Reservoir struct { Paths [maxReservoirSize]storj.Path // contains filtered or unexported fields }
Reservoir holds a certain number of segments to reflect a random sample
func NewReservoir ¶ added in v0.19.0
NewReservoir instantiates a Reservoir
type ReservoirService ¶ added in v0.19.0
type ReservoirService struct { Reservoirs map[storj.NodeID]*Reservoir MetainfoLoop *metainfo.Loop Loop sync2.Cycle // contains filtered or unexported fields }
ReservoirService is a temp name for the service struct during the audit 2.0 refactor. Once V3-2363 and V3-2364 are implemented, ReservoirService will replace the existing Service struct.
func NewReservoirService ¶ added in v0.19.0
NewReservoirService instantiates ReservoirService
func (*ReservoirService) Close ¶ added in v0.19.1
func (service *ReservoirService) Close() error
Close halts the reservoir service loop
type Service ¶
type Service struct { Cursor *Cursor Verifier *Verifier Reporter reporter Loop sync2.Cycle // contains filtered or unexported fields }
Service helps coordinate Cursor and Verifier to run the audit process continuously
func NewService ¶
func NewService(log *zap.Logger, config Config, metainfo *metainfo.Service, orders *orders.Service, transport transport.Client, overlay *overlay.Service, containment Containment, identity *identity.FullIdentity) (*Service, error)
NewService instantiates a Service with access to a Cursor and Verifier
type Verifier ¶
type Verifier struct {
// contains filtered or unexported fields
}
Verifier helps verify the correctness of a given stripe
func NewVerifier ¶
func NewVerifier(log *zap.Logger, metainfo *metainfo.Service, transport transport.Client, overlay *overlay.Service, containment Containment, orders *orders.Service, id *identity.FullIdentity, minBytesPerSecond memory.Size, minDownloadTimeout time.Duration) *Verifier
NewVerifier creates a Verifier
func (*Verifier) DownloadShares ¶
func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32) (shares map[int]Share, err error)
DownloadShares downloads shares from the nodes where remote pieces are located
func (*Verifier) GetShare ¶
func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32, pieceNum int) (share Share, err error)
GetShare use piece store client to download shares from nodes