metainfo

package
v0.34.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 25, 2020 License: AGPL-3.0 Imports: 43 Imported by: 1

Documentation

Index

Constants

View Source
const (
	// BoltPointerBucket is the string representing the bucket used for `PointerEntries` in BoltDB
	BoltPointerBucket = "pointers"
)

Variables

View Source
var (
	// LoopError is a standard error class for this component.
	LoopError = errs.Class("metainfo loop error")
	// LoopClosedError is a loop closed error.
	LoopClosedError = LoopError.New("loop closed")
)
View Source
var (

	// Error general metainfo error
	Error = errs.Class("metainfo error")
	// ErrNodeAlreadyExists pointer already has a piece for a node err
	ErrNodeAlreadyExists = errs.Class("metainfo error: node already exists")
)
View Source
var (
	// ErrBucketNotEmpty is returned when bucket is required to be empty for an operation.
	ErrBucketNotEmpty = errs.Class("bucket not empty")
)
View Source
var ErrDeletePieces = errs.Class("metainfo storage node service")

ErrDeletePieces is the general error class for DeletePiecesService

Functions

func CreatePath

func CreatePath(ctx context.Context, projectID uuid.UUID, segmentIndex int64, bucket, path []byte) (_ storj.Path, err error)

CreatePath creates a Segment path.

func IterateDatabase added in v0.26.0

func IterateDatabase(ctx context.Context, rateLimit float64, db PointerDB, observers ...Observer) error

IterateDatabase iterates over PointerDB and notifies specified observers about results.

Types

type APIKeys

type APIKeys interface {
	GetByHead(ctx context.Context, head []byte) (*console.APIKeyInfo, error)
}

APIKeys is api keys store methods used by endpoint

architecture: Database

type BucketsDB added in v0.15.0

type BucketsDB interface {
	// Create creates a new bucket
	CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error)
	// Get returns an existing bucket
	GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket storj.Bucket, err error)
	// UpdateBucket updates an existing bucket
	UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error)
	// Delete deletes a bucket
	DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error)
	// List returns all buckets for a project
	ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error)
}

BucketsDB is the interface for the database to interact with buckets

architecture: Database

type Config added in v0.11.0

type Config struct {
	DatabaseURL          string                    `help:"the database connection string to use" default:"postgres://"`
	MinRemoteSegmentSize memory.Size               `default:"1240" help:"minimum remote segment size"`
	MaxInlineSegmentSize memory.Size               `default:"8000" help:"maximum inline segment size"`
	MaxCommitInterval    time.Duration             `default:"48h" help:"maximum time allowed to pass between creating and committing a segment"`
	Overlay              bool                      `default:"true" help:"toggle flag if overlay is enabled"`
	RS                   RSConfig                  `help:"redundancy scheme configuration"`
	Loop                 LoopConfig                `help:"metainfo loop configuration"`
	RateLimiter          RateLimiterConfig         `help:"metainfo rate limiter configuration"`
	DeletePiecesService  DeletePiecesServiceConfig `help:"metainfo delete pieces service configuration"`
}

Config is a configuration struct that is everything you need to start a metainfo

type DeletePiecesService added in v0.30.0

type DeletePiecesService struct {
	// contains filtered or unexported fields
}

DeletePiecesService is the metainfo service in charge of deleting pieces of storage nodes.

architecture: Service

func NewDeletePiecesService added in v0.30.0

func NewDeletePiecesService(log *zap.Logger, dialer rpc.Dialer, config DeletePiecesServiceConfig) (*DeletePiecesService, error)

NewDeletePiecesService creates a new DeletePiecesService. maxConcurrentConns is the maximum number of connections that each single method call uses.

It returns an error if maxConcurrentConns is less or equal than 0, dialer is a zero value or log is nil.

func (*DeletePiecesService) Close added in v0.30.0

func (service *DeletePiecesService) Close() error

Close wait until all the resources used by the service are closed before returning.

func (*DeletePiecesService) DeletePieces added in v0.30.0

func (service *DeletePiecesService) DeletePieces(
	ctx context.Context, nodes NodesPieces, successThreshold float64,
) (err error)

DeletePieces deletes all the indicated pieces of the nodes which are online stopping 300 milliseconds after reaching the successThreshold of the total number of pieces otherwise when trying to delete all the pieces finishes.

It only returns an error if sync2.NewSuccessThreshold returns an error.

type DeletePiecesServiceConfig added in v0.34.1

type DeletePiecesServiceConfig struct {
	MaxConcurrentConnection int           `help:"maximum number of concurrent connection for the entire service." default:"100"`
	NodeOperationTimeout    time.Duration `help:"how long to wait for a node to complete a delete request." releaseDefault:"5m" devDefault:"2s"`
}

DeletePiecesServiceConfig is a configuration struct for delete pieces service

type Endpoint

type Endpoint struct {
	// contains filtered or unexported fields
}

Endpoint metainfo endpoint.

architecture: Endpoint

func NewEndpoint

func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *DeletePiecesService,
	orders *orders.Service, cache *overlay.Service, attributions attribution.DB,
	partners *rewards.PartnersService, peerIdentities overlay.PeerIdentities,
	apiKeys APIKeys, projectUsage *accounting.Service, projects console.Projects,
	rsConfig RSConfig, satellite signing.Signer, maxCommitInterval time.Duration,
	limiterConfig RateLimiterConfig) *Endpoint

NewEndpoint creates new metainfo endpoint instance.

func (*Endpoint) Batch added in v0.17.0

func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp *pb.BatchResponse, err error)

Batch handle requests sent in batch

func (*Endpoint) BeginDeleteObject added in v0.16.0

func (endpoint *Endpoint) BeginDeleteObject(ctx context.Context, req *pb.ObjectBeginDeleteRequest) (resp *pb.ObjectBeginDeleteResponse, err error)

BeginDeleteObject begins object deletion process.

func (*Endpoint) BeginDeleteSegment added in v0.16.0

func (endpoint *Endpoint) BeginDeleteSegment(ctx context.Context, req *pb.SegmentBeginDeleteRequest) (resp *pb.SegmentBeginDeleteResponse, err error)

BeginDeleteSegment begins segment deletion process

func (*Endpoint) BeginObject added in v0.16.0

func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRequest) (resp *pb.ObjectBeginResponse, err error)

BeginObject begins object

func (*Endpoint) BeginSegment added in v0.16.0

func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBeginRequest) (resp *pb.SegmentBeginResponse, err error)

BeginSegment begins segment uploading

func (*Endpoint) Close

func (endpoint *Endpoint) Close() error

Close closes resources

func (*Endpoint) CommitObject added in v0.16.0

func (endpoint *Endpoint) CommitObject(ctx context.Context, req *pb.ObjectCommitRequest) (resp *pb.ObjectCommitResponse, err error)

CommitObject commits an object when all its segments have already been committed.

func (*Endpoint) CommitSegment

func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentCommitRequest) (resp *pb.SegmentCommitResponse, err error)

CommitSegment commits segment after uploading

func (*Endpoint) CommitSegmentOld added in v0.15.0

func (endpoint *Endpoint) CommitSegmentOld(ctx context.Context, req *pb.SegmentCommitRequestOld) (resp *pb.SegmentCommitResponseOld, err error)

CommitSegmentOld commits segment metadata

func (*Endpoint) CreateBucket added in v0.15.0

func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreateRequest) (resp *pb.BucketCreateResponse, err error)

CreateBucket creates a new bucket

func (*Endpoint) CreateSegmentOld added in v0.15.0

func (endpoint *Endpoint) CreateSegmentOld(ctx context.Context, req *pb.SegmentWriteRequestOld) (resp *pb.SegmentWriteResponseOld, err error)

CreateSegmentOld will generate requested number of OrderLimit with coresponding node addresses for them

func (*Endpoint) DeleteBucket added in v0.15.0

func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDeleteRequest) (resp *pb.BucketDeleteResponse, err error)

DeleteBucket deletes a bucket

func (*Endpoint) DeleteObjectPieces added in v0.29.0

func (endpoint *Endpoint) DeleteObjectPieces(
	ctx context.Context, projectID uuid.UUID, bucket, encryptedPath []byte,
) (err error)

DeleteObjectPieces deletes all the pieces of the storage nodes that belongs to the specified object.

NOTE: this method is exported for being able to individually test it without having import cycles.

func (*Endpoint) DeleteSegmentOld added in v0.15.0

func (endpoint *Endpoint) DeleteSegmentOld(ctx context.Context, req *pb.SegmentDeleteRequestOld) (resp *pb.SegmentDeleteResponseOld, err error)

DeleteSegmentOld deletes segment metadata from satellite and returns OrderLimit array to remove them from storage node

func (*Endpoint) DownloadSegment

func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDownloadRequest) (resp *pb.SegmentDownloadResponse, err error)

DownloadSegment returns data necessary to download segment

func (*Endpoint) DownloadSegmentOld added in v0.15.0

func (endpoint *Endpoint) DownloadSegmentOld(ctx context.Context, req *pb.SegmentDownloadRequestOld) (resp *pb.SegmentDownloadResponseOld, err error)

DownloadSegmentOld gets Pointer incase of INLINE data or list of OrderLimit necessary to download remote data

func (*Endpoint) FinishDeleteObject added in v0.16.0

func (endpoint *Endpoint) FinishDeleteObject(ctx context.Context, req *pb.ObjectFinishDeleteRequest) (resp *pb.ObjectFinishDeleteResponse, err error)

FinishDeleteObject finishes object deletion

func (*Endpoint) FinishDeleteSegment added in v0.16.0

func (endpoint *Endpoint) FinishDeleteSegment(ctx context.Context, req *pb.SegmentFinishDeleteRequest) (resp *pb.SegmentFinishDeleteResponse, err error)

FinishDeleteSegment finishes segment deletion process

func (*Endpoint) GetBucket added in v0.15.0

func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetRequest) (resp *pb.BucketGetResponse, err error)

GetBucket returns a bucket

func (*Endpoint) GetObject added in v0.16.0

func (endpoint *Endpoint) GetObject(ctx context.Context, req *pb.ObjectGetRequest) (resp *pb.ObjectGetResponse, err error)

GetObject gets single object

func (*Endpoint) ListBuckets added in v0.15.0

func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListRequest) (resp *pb.BucketListResponse, err error)

ListBuckets returns buckets in a project where the bucket name matches the request cursor

func (*Endpoint) ListObjects added in v0.16.0

func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListRequest) (resp *pb.ObjectListResponse, err error)

ListObjects list objects according to specific parameters

func (*Endpoint) ListSegments

func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListRequest) (resp *pb.SegmentListResponse, err error)

ListSegments list object segments

func (*Endpoint) ListSegmentsOld added in v0.15.0

func (endpoint *Endpoint) ListSegmentsOld(ctx context.Context, req *pb.ListSegmentsRequestOld) (resp *pb.ListSegmentsResponseOld, err error)

ListSegmentsOld returns all Path keys in the Pointers bucket

func (*Endpoint) MakeInlineSegment added in v0.16.0

func (endpoint *Endpoint) MakeInlineSegment(ctx context.Context, req *pb.SegmentMakeInlineRequest) (resp *pb.SegmentMakeInlineResponse, err error)

MakeInlineSegment makes inline segment on satellite

func (*Endpoint) ProjectInfo added in v0.14.0

func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRequest) (_ *pb.ProjectInfoResponse, err error)

ProjectInfo returns allowed ProjectInfo for the provided API key

func (*Endpoint) SegmentInfoOld added in v0.15.0

func (endpoint *Endpoint) SegmentInfoOld(ctx context.Context, req *pb.SegmentInfoRequestOld) (resp *pb.SegmentInfoResponseOld, err error)

SegmentInfoOld returns segment metadata info

func (*Endpoint) SetAttributionOld added in v0.15.0

func (endpoint *Endpoint) SetAttributionOld(ctx context.Context, req *pb.SetAttributionRequestOld) (_ *pb.SetAttributionResponseOld, err error)

SetAttributionOld tries to add attribution to the bucket.

func (*Endpoint) SetBucketAttribution added in v0.15.0

func (endpoint *Endpoint) SetBucketAttribution(ctx context.Context, req *pb.BucketSetAttributionRequest) (resp *pb.BucketSetAttributionResponse, err error)

SetBucketAttribution sets the bucket attribution.

type Loop added in v0.16.0

type Loop struct {
	// contains filtered or unexported fields
}

Loop is a metainfo loop service.

architecture: Service

func NewLoop added in v0.16.0

func NewLoop(config LoopConfig, db PointerDB) *Loop

NewLoop creates a new metainfo loop service.

func (*Loop) Close added in v0.22.0

func (loop *Loop) Close() (err error)

Close closes the looping services.

func (*Loop) Join added in v0.16.0

func (loop *Loop) Join(ctx context.Context, observer Observer) (err error)

Join will join the looper for one full cycle until completion and then returns. On ctx cancel the observer will return without completely finishing. Only on full complete iteration it will return nil. Safe to be called concurrently.

func (*Loop) Run added in v0.16.0

func (loop *Loop) Run(ctx context.Context) (err error)

Run starts the looping service. It can only be called once, otherwise a panic will occur.

func (*Loop) Wait added in v0.16.0

func (loop *Loop) Wait()

Wait waits for run to be finished. Safe to be called concurrently.

type LoopConfig added in v0.16.0

type LoopConfig struct {
	CoalesceDuration time.Duration `help:"how long to wait for new observers before starting iteration" releaseDefault:"5s" devDefault:"5s"`
	RateLimit        float64       `help:"metainfo loop rate limit (default is 0 which is unlimited segments per second)" default:"0"`
}

LoopConfig contains configurable values for the metainfo loop.

type NodePieces added in v0.30.0

type NodePieces struct {
	Node   *pb.Node
	Pieces []storj.PieceID
}

NodePieces indicates a list of pieces that belong to a storage node.

type NodesPieces added in v0.30.0

type NodesPieces []NodePieces

NodesPieces is a slice of NodePieces

func (NodesPieces) NumPieces added in v0.30.0

func (nodes NodesPieces) NumPieces() int

NumPieces sums the number of pieces of all the storage nodes of the slice and returns it.

type NullObserver added in v0.33.3

type NullObserver struct{}

NullObserver is an observer that does nothing. This is useful for joining and ensuring the metainfo loop runs once before you use a real observer

func (NullObserver) InlineSegment added in v0.33.3

InlineSegment implements the Observer interface

func (NullObserver) Object added in v0.33.3

Object implements the Observer interface

func (NullObserver) RemoteSegment added in v0.33.3

RemoteSegment implements the Observer interface

type Observer added in v0.16.0

type Observer interface {
	Object(context.Context, ScopedPath, *pb.Pointer) error
	RemoteSegment(context.Context, ScopedPath, *pb.Pointer) error
	InlineSegment(context.Context, ScopedPath, *pb.Pointer) error
}

Observer is an interface defining an observer that can subscribe to the metainfo loop.

architecture: Observer

type PointerDB added in v0.21.0

type PointerDB interface {
	storage.KeyValueStore
}

PointerDB stores pointers.

architecture: Database

func NewStore added in v0.11.0

func NewStore(logger *zap.Logger, dbURLString string) (db PointerDB, err error)

NewStore returns database for storing pointer data

type RSConfig added in v0.14.0

type RSConfig struct {
	MaxSegmentSize   memory.Size `help:"maximum segment size" default:"64MiB"`
	MaxBufferMem     memory.Size `help:"maximum buffer memory to be allocated for read buffers" default:"4MiB"`
	ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"256B"`
	MinThreshold     int         `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4"`
	RepairThreshold  int         `help:"the minimum safe pieces before a repair is triggered. m." releaseDefault:"35" devDefault:"6"`
	SuccessThreshold int         `help:"the desired total pieces for a segment. o." releaseDefault:"80" devDefault:"8"`
	TotalThreshold   int         `help:"the largest amount of pieces to encode to. n." releaseDefault:"110" devDefault:"10"`

	// TODO left for validation until we will remove CreateSegmentOld
	MinTotalThreshold int  `help:"the largest amount of pieces to encode to. n (lower bound for validation)." releaseDefault:"95" devDefault:"10"`
	MaxTotalThreshold int  `help:"the largest amount of pieces to encode to. n (upper bound for validation)." releaseDefault:"130" devDefault:"10"`
	Validate          bool `help:"validate redundancy scheme configuration" default:"true"`
}

RSConfig is a configuration struct that keeps details about default redundancy strategy information

type RateLimiterConfig added in v0.31.0

type RateLimiterConfig struct {
	Enabled         bool          `help:"whether rate limiting is enabled." releaseDefault:"true" devDefault:"true"`
	Rate            float64       `help:"request rate per project per second." releaseDefault:"1000" devDefault:"100"`
	CacheCapacity   int           `help:"number of projects to cache." releaseDefault:"10000" devDefault:"10"`
	CacheExpiration time.Duration `help:"how long to cache the projects limiter." releaseDefault:"10m" devDefault:"10s"`
}

RateLimiterConfig is a configuration struct for endpoint rate limiting

type Revocations added in v0.12.0

type Revocations interface {
	GetByProjectID(ctx context.Context, projectID uuid.UUID) ([][]byte, error)
}

Revocations is the revocations store methods used by the endpoint

architecture: Database

type ScopedPath added in v0.21.0

type ScopedPath struct {
	ProjectID           uuid.UUID
	ProjectIDString     string
	Segment             string
	BucketName          string
	EncryptedObjectPath string

	// Raw is the same path as pointerDB is using.
	Raw storj.Path
}

ScopedPath contains full expanded information about the path.

type Service added in v0.11.0

type Service struct {
	// contains filtered or unexported fields
}

Service structure

architecture: Service

func NewService added in v0.11.0

func NewService(logger *zap.Logger, db PointerDB, bucketsDB BucketsDB) *Service

NewService creates new metainfo service.

func (*Service) CreateBucket added in v0.15.0

func (s *Service) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error)

CreateBucket creates a new bucket in the buckets db

func (*Service) Delete added in v0.11.0

func (s *Service) Delete(ctx context.Context, path string, oldPointerBytes []byte) (err error)

Delete deletes a pointer bytes when it matches oldPointerBytes, otherwise it'll fail.

func (*Service) DeleteBucket added in v0.15.0

func (s *Service) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error)

DeleteBucket deletes a bucket from the bucekts db

func (*Service) Get added in v0.11.0

func (s *Service) Get(ctx context.Context, path string) (_ *pb.Pointer, err error)

Get gets decoded pointer from DB.

func (*Service) GetBucket added in v0.15.0

func (s *Service) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (_ storj.Bucket, err error)

GetBucket returns an existing bucket in the buckets db

func (*Service) GetWithBytes added in v0.25.1

func (s *Service) GetWithBytes(ctx context.Context, path string) (pointerBytes []byte, pointer *pb.Pointer, err error)

GetWithBytes gets the protocol buffers encoded and decoded pointer from the DB.

func (*Service) IsBucketEmpty added in v0.34.1

func (s *Service) IsBucketEmpty(ctx context.Context, projectID uuid.UUID, bucketName []byte) (bool, error)

IsBucketEmpty returns whether bucket is empty.

func (*Service) List added in v0.11.0

func (s *Service) List(ctx context.Context, prefix string, startAfter string, recursive bool, limit int32,
	metaFlags uint32) (items []*pb.ListResponse_Item, more bool, err error)

List returns all Path keys in the pointers bucket

func (*Service) ListBuckets added in v0.15.0

func (s *Service) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error)

ListBuckets returns a list of buckets for a project

func (*Service) Put added in v0.11.0

func (s *Service) Put(ctx context.Context, path string, pointer *pb.Pointer) (err error)

Put puts pointer to db under specific path.

func (*Service) UnsynchronizedDelete added in v0.25.1

func (s *Service) UnsynchronizedDelete(ctx context.Context, path string) (err error)

UnsynchronizedDelete deletes from item from db without verifying whether the pointer has changed in the database.

func (*Service) UnsynchronizedPut added in v0.33.3

func (s *Service) UnsynchronizedPut(ctx context.Context, path string, pointer *pb.Pointer) (err error)

UnsynchronizedPut puts pointer to db under specific path without verifying for existing pointer under the same path.

func (*Service) UpdateBucket added in v0.16.0

func (s *Service) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error)

UpdateBucket returns an updated bucket in the buckets db

func (*Service) UpdatePieces added in v0.16.0

func (s *Service) UpdatePieces(ctx context.Context, path string, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece) (pointer *pb.Pointer, err error)

UpdatePieces calls UpdatePiecesCheckDuplicates with checkDuplicates equal to false.

func (*Service) UpdatePiecesCheckDuplicates added in v0.26.0

func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, path string, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece, checkDuplicates bool) (pointer *pb.Pointer, err error)

UpdatePiecesCheckDuplicates atomically adds toAdd pieces and removes toRemove pieces from the pointer under path. ref is the pointer that caller received via Get prior to calling this method.

It will first check if the pointer has been deleted or replaced. Then if checkDuplicates is true it will return an error if the nodes to be added are already in the pointer. Then it will remove the toRemove pieces and then it will add the toAdd pieces. Replacing the node ID and the hash of a piece can be done by adding the piece to both toAdd and toRemove.

type TTLItem added in v0.13.0

type TTLItem struct {
	// contains filtered or unexported fields
}

TTLItem keeps association between serial number and ttl

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL