Documentation ¶
Overview ¶
Package uplink provides variety of functions to access the objects using storj's uplink library
Example (Interface) ¶
package main import ( "bytes" "context" "fmt" "io/ioutil" "log" "storj.io/storj/lib/uplink" "storj.io/storj/pkg/storj" ) func logClose(fn func() error) { err := fn() if err != nil { fmt.Println(err) } } // WorkWithLibUplink uploads the specified data to the specified path in the // specified bucket, using the specified Satellite, encryption key, and API key. func WorkWithLibUplink(satelliteAddress string, encryptionKey *storj.Key, apiKey uplink.APIKey, bucketName, uploadPath string, dataToUpload []byte) error { ctx := context.Background() // Create an Uplink object with a default config upl, err := uplink.NewUplink(ctx, nil) if err != nil { return fmt.Errorf("could not create new Uplink object: %v", err) } defer logClose(upl.Close) // Open up the Project we will be working with proj, err := upl.OpenProject(ctx, satelliteAddress, apiKey) if err != nil { return fmt.Errorf("could not open project: %v", err) } defer logClose(proj.Close) // Create the desired Bucket within the Project _, err = proj.CreateBucket(ctx, bucketName, nil) if err != nil { return fmt.Errorf("could not create bucket: %v", err) } // Open up the desired Bucket within the Project bucket, err := proj.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *encryptionKey}) if err != nil { return fmt.Errorf("could not open bucket %q: %v", bucketName, err) } defer logClose(bucket.Close) // Upload our Object to the specified path buf := bytes.NewBuffer(dataToUpload) err = bucket.UploadObject(ctx, uploadPath, buf, nil) if err != nil { return fmt.Errorf("could not upload: %v", err) } // Initiate a download of the same object again readBack, err := bucket.OpenObject(ctx, uploadPath) if err != nil { return fmt.Errorf("could not open object at %q: %v", uploadPath, err) } defer logClose(readBack.Close) // We want the whole thing, so range from 0 to -1 strm, err := readBack.DownloadRange(ctx, 0, -1) if err != nil { return fmt.Errorf("could not initiate download: %v", err) } defer logClose(strm.Close) // Read everything from the stream receivedContents, err := ioutil.ReadAll(strm) if err != nil { return fmt.Errorf("could not read object: %v", err) } if !bytes.Equal(receivedContents, dataToUpload) { return fmt.Errorf("got different object back: %q != %q", dataToUpload, receivedContents) } return nil } func main() { const ( myAPIKey = "change-me-to-the-api-key-created-in-satellite-gui" satellite = "mars.tardigrade.io:7777" myBucket = "my-first-bucket" myUploadPath = "foo/bar/baz" myData = "one fish two fish red fish blue fish" myEncryptionKey = "you'll never guess this" ) var encryptionKey storj.Key copy(encryptionKey[:], []byte(myEncryptionKey)) apiKey, err := uplink.ParseAPIKey(myAPIKey) if err != nil { log.Fatal("could not parse api key:", err) } err = WorkWithLibUplink(satellite, &encryptionKey, apiKey, myBucket, myUploadPath, []byte(myData)) if err != nil { log.Fatal("error:", err) } fmt.Println("success!") }
Output:
Index ¶
- Variables
- type APIKey
- type Bucket
- func (b *Bucket) Close() error
- func (b *Bucket) DeleteObject(ctx context.Context, path storj.Path) (err error)
- func (b *Bucket) ListObjects(ctx context.Context, cfg *ListOptions) (list storj.ObjectList, err error)
- func (b *Bucket) NewReader(ctx context.Context, path storj.Path) (_ ReadSeekCloser, err error)
- func (b *Bucket) NewWriter(ctx context.Context, path storj.Path, opts *UploadOptions) (_ io.WriteCloser, err error)
- func (b *Bucket) OpenObject(ctx context.Context, path storj.Path) (o *Object, err error)
- func (b *Bucket) UploadObject(ctx context.Context, path storj.Path, data io.Reader, opts *UploadOptions) (err error)
- type BucketConfig
- type BucketListOptions
- type Config
- type EncryptionAccess
- type ListOptions
- type Object
- type ObjectMeta
- type Project
- func (p *Project) Close() error
- func (p *Project) CreateBucket(ctx context.Context, name string, cfg *BucketConfig) (bucket storj.Bucket, err error)
- func (p *Project) DeleteBucket(ctx context.Context, bucket string) (err error)
- func (p *Project) GetBucketInfo(ctx context.Context, bucket string) (b storj.Bucket, bi *BucketConfig, err error)
- func (p *Project) ListBuckets(ctx context.Context, opts *BucketListOptions) (bl storj.BucketList, err error)
- func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *EncryptionAccess) (b *Bucket, err error)
- type ReadSeekCloser
- type Uplink
- type UploadOptions
Examples ¶
Constants ¶
This section is empty.
Variables ¶
var ( // Error is the toplevel class of errors for the uplink library. Error = errs.Class("libuplink") )
Functions ¶
This section is empty.
Types ¶
type APIKey ¶
type APIKey struct {
// contains filtered or unexported fields
}
APIKey represents an access credential to certain resources
type Bucket ¶
type Bucket struct { BucketConfig Name string Created time.Time // contains filtered or unexported fields }
Bucket represents operations you can perform on a bucket
func (*Bucket) DeleteObject ¶
DeleteObject removes an object, if authorized.
func (*Bucket) ListObjects ¶
func (b *Bucket) ListObjects(ctx context.Context, cfg *ListOptions) (list storj.ObjectList, err error)
ListObjects lists objects a user is authorized to see.
func (*Bucket) NewReader ¶ added in v0.12.0
NewReader creates a new reader that downloads the object data.
func (*Bucket) NewWriter ¶ added in v0.12.0
func (b *Bucket) NewWriter(ctx context.Context, path storj.Path, opts *UploadOptions) (_ io.WriteCloser, err error)
NewWriter creates a writer which uploads the object.
func (*Bucket) OpenObject ¶
OpenObject returns an Object handle, if authorized.
type BucketConfig ¶
type BucketConfig struct { // PathCipher indicates which cipher suite is to be used for path // encryption within the new Bucket. If not set, AES-GCM encryption // will be used. PathCipher storj.CipherSuite // EncryptionParameters specifies the default encryption parameters to // be used for data encryption of new Objects in this bucket. EncryptionParameters storj.EncryptionParameters // Volatile groups config values that are likely to change semantics // or go away entirely between releases. Be careful when using them! Volatile struct { // RedundancyScheme defines the default Reed-Solomon and/or // Forward Error Correction encoding parameters to be used by // objects in this Bucket. RedundancyScheme storj.RedundancyScheme // SegmentsSize is the default segment size to use for new // objects in this Bucket. SegmentsSize memory.Size } }
BucketConfig holds information about a bucket's configuration. This is filled in by the caller for use with CreateBucket(), or filled in by the library as Bucket.Config when a bucket is returned from OpenBucket().
type BucketListOptions ¶
type BucketListOptions = storj.BucketListOptions
BucketListOptions controls options to the ListBuckets() call.
type Config ¶
type Config struct { // Volatile groups config values that are likely to change semantics // or go away entirely between releases. Be careful when using them! Volatile struct { // TLS defines options that affect TLS negotiation for outbound // connections initiated by this uplink. TLS struct { // SkipPeerCAWhitelist determines whether to require all // remote hosts to have identity certificates signed by // Certificate Authorities in the default whitelist. If // set to true, the whitelist will be ignored. SkipPeerCAWhitelist bool // PeerCAWhitelistPath gives the path to a CA cert // whitelist file. It is ignored if SkipPeerCAWhitelist // is set. If empty, the internal default peer whitelist // is used. PeerCAWhitelistPath string } // PeerIDVersion is the identity versions remote peers to this node // will be supported by this node. PeerIDVersion string // MaxInlineSize determines whether the uplink will attempt to // store a new object in the satellite's metainfo. Objects at // or below this size will be marked for inline storage, and // objects above this size will not. (The satellite may reject // the inline storage and require remote storage, still.) MaxInlineSize memory.Size // MaxMemory is the default maximum amount of memory to be // allocated for read buffers while performing decodes of // objects. (This option is overrideable per Bucket if the user // so desires.) If set to zero, the library default (4 MiB) will // be used. If set to a negative value, the system will use the // smallest amount of memory it can. MaxMemory memory.Size } }
Config represents configuration options for an Uplink
type EncryptionAccess ¶
type EncryptionAccess struct { // Key is the base encryption key to be used for decrypting objects. Key storj.Key // EncryptedPathPrefix is the (possibly empty) encrypted version of the // path from the top of the storage Bucket to this point. This is // necessary to have in order to derive further encryption keys. EncryptedPathPrefix storj.Path }
EncryptionAccess specifies the encryption details needed to encrypt or decrypt objects.
type ListOptions ¶
type ListOptions = storj.ListOptions
ListOptions controls options for the ListObjects() call.
type Object ¶
type Object struct { // Meta holds the metainfo associated with the Object. Meta ObjectMeta // contains filtered or unexported fields }
An Object is a sequence of bytes with associated metadata, stored in the Storj network (or being prepared for such storage). It belongs to a specific bucket, and has a path and a size. It is comparable to a "file" in a conventional filesystem.
func (*Object) DownloadRange ¶
func (o *Object) DownloadRange(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error)
DownloadRange returns an Object's data. A length of -1 will mean (Object.Size - offset).
type ObjectMeta ¶
type ObjectMeta struct { // Bucket gives the name of the bucket in which an Object is placed. Bucket string // Path is the path of the Object within the Bucket. Path components are // forward-slash-separated, like Unix file paths ("one/two/three"). Path storj.Path // IsPrefix is true if this ObjectMeta does not refer to a specific // Object, but to some arbitrary point in the path hierarchy. This would // be called a "folder" or "directory" in a typical filesystem. IsPrefix bool // ContentType, if set, gives a MIME content-type for the Object, as // set when the object was created. ContentType string // Metadata contains the additional information about an Object that was // set when the object was created. See UploadOptions.Metadata for more // information. Metadata map[string]string // Created is the time at which the Object was created. Created time.Time // Modified is the time at which the Object was last modified. Modified time.Time // Expires is the time at which the Object expires (after which it will // be automatically deleted from storage nodes). Expires time.Time // Size gives the size of the Object in bytes. Size int64 // Checksum gives a checksum of the contents of the Object. Checksum []byte // Volatile groups config values that are likely to change semantics // or go away entirely between releases. Be careful when using them! Volatile struct { // EncryptionParameters gives the encryption parameters being // used for the Object's data encryption. EncryptionParameters storj.EncryptionParameters // RedundancyScheme determines the Reed-Solomon and/or Forward // Error Correction encoding parameters to be used for this // Object. RedundancyScheme storj.RedundancyScheme // SegmentsSize gives the segment size being used for the // Object's data storage. SegmentsSize int64 } }
ObjectMeta contains metadata about a specific Object.
type Project ¶
type Project struct {
// contains filtered or unexported fields
}
Project represents a specific project access session.
func (*Project) CreateBucket ¶
func (p *Project) CreateBucket(ctx context.Context, name string, cfg *BucketConfig) (bucket storj.Bucket, err error)
CreateBucket creates a new bucket if authorized.
func (*Project) DeleteBucket ¶
DeleteBucket deletes a bucket if authorized. If the bucket contains any Objects at the time of deletion, they may be lost permanently.
func (*Project) GetBucketInfo ¶
func (p *Project) GetBucketInfo(ctx context.Context, bucket string) (b storj.Bucket, bi *BucketConfig, err error)
GetBucketInfo returns info about the requested bucket if authorized.
func (*Project) ListBuckets ¶
func (p *Project) ListBuckets(ctx context.Context, opts *BucketListOptions) (bl storj.BucketList, err error)
ListBuckets will list authorized buckets.
func (*Project) OpenBucket ¶
func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *EncryptionAccess) (b *Bucket, err error)
OpenBucket returns a Bucket handle with the given EncryptionAccess information.
type ReadSeekCloser ¶ added in v0.12.0
ReadSeekCloser combines interfaces io.Reader, io.Seeker, io.Closer
type Uplink ¶
type Uplink struct {
// contains filtered or unexported fields
}
Uplink represents the main entrypoint to Storj V3. An Uplink connects to a specific Satellite and caches connections and resources, allowing one to create sessions delineated by specific access controls.
func NewUplink ¶
NewUplink creates a new Uplink. This is the first step to create an uplink session with a user specified config or with default config, if nil config
type UploadOptions ¶
type UploadOptions struct { // ContentType, if set, gives a MIME content-type for the Object. ContentType string // Metadata contains additional information about an Object. It can // hold arbitrary textual fields and can be retrieved together with the // Object. Field names can be at most 1024 bytes long. Field values are // not individually limited in size, but the total of all metadata // (fields and values) can not exceed 4 kiB. Metadata map[string]string // Expires is the time at which the new Object can expire (be deleted // automatically from storage nodes). Expires time.Time // Volatile groups config values that are likely to change semantics // or go away entirely between releases. Be careful when using them! Volatile struct { // EncryptionParameters determines the cipher suite to use for // the Object's data encryption. If not set, the Bucket's // defaults will be used. EncryptionParameters storj.EncryptionParameters // RedundancyScheme determines the Reed-Solomon and/or Forward // Error Correction encoding parameters to be used for this // Object. RedundancyScheme storj.RedundancyScheme } }
UploadOptions controls options about uploading a new Object, if authorized.