xy3

package module
v0.0.0-...-a6dfa66 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 24, 2024 License: MIT Imports: 13 Imported by: 0

README

xy3

Go Reference

xy3 is born out of my need to create S3 backups while using XYplorer. Here are the XYplorer's file associations that I use:

|"Download from S3" s3>"xy3.exe" "download"
|"Delete from S3" s3>"xy3.exe" "remove"
|"Compress and upload to S3" \>"xy3.exe" "upload" -b "bucket-name" -k "<curfolder>/"
|"Upload to S3" *>"xy3.exe" "upload" -b "bucket-name" -k "<curfolder>/"
|"Extract files" 7z;rar;zip>"xy3.exe" extract

CLI

# Uploading a file will generate a local .s3 (JSON) file that stores metadata about how to retrieve the file.
# For example, this command will create doc.txt.s3 and log.zip.s3.
xy3 up -b "bucket-name" -k "key-prefix/" --expected-bucket-owner "1234" doc.txt log.zip

# Downloading from the JSON .s3 files will create unique names to prevent duplicates.
# For example, since doc.txt and log.zip still exist, this command will create doc-1.txt and log-1.zip.
xy3 down doc.txt.s3 log.zip.s3

# To remove both local and remote files, use this command.
xy3 remove doc.txt.s3 log.zip.s3

Go Package

package main

import (
	"context"
	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/config"
	"github.com/aws/aws-sdk-go-v2/service/s3"
	"github.com/nguyengg/xy3"
	"log"
	"os"
	"os/signal"
)

func main() {
	ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
	defer stop()

	cfg, err := config.LoadDefaultConfig(ctx)
	if err != nil {
		log.Panicf("create SDK default config error: %v", err)
	}

	client := s3.NewFromConfig(cfg)

	if _, err = xy3.Upload(ctx, client, "path/to/file.zip", &s3.CreateMultipartUploadInput{
		Bucket: aws.String("my-bucket"),
		Key:    aws.String("my-key"),
	}); err != nil {
		log.Panicf("upload error: %v", err)
	}
}

If you want to use github.com/aws/aws-sdk-go-v2/feature/s3/manager that comes with the SDK and adds logging:

package main

import (
	"bytes"
	"context"
	"github.com/aws/aws-sdk-go-v2/aws"
	"github.com/aws/aws-sdk-go-v2/config"
	"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
	"github.com/aws/aws-sdk-go-v2/service/s3"
	"github.com/nguyengg/xy3/managerlogging"
	"log"
	"os"
	"os/signal"
)

func main() {
	ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
	defer stop()

	cfg, err := config.LoadDefaultConfig(ctx)
	if err != nil {
		log.Panicf("create SDK default config error: %v", err)
	}

	client := s3.NewFromConfig(cfg)

	// use a logger for all UploadPart.
	uploader := manager.NewUploader(client, managerlogging.LogSuccessfulUploadPart(log.Default()))

	// or specify them on a specific upload call.
	_, _ = uploader.Upload(ctx, &s3.PutObjectInput{
		Bucket: aws.String("my-bucket"),
		Key:    aws.String("my-key"),
		Body:   bytes.NewReader([]byte("hello, world!")),
	}, managerlogging.LogSuccessfulUploadPartWithExpectedPartCount(log.Default(), 100))

	// same for download.
	downloader := manager.NewDownloader(client, managerlogging.LogSuccessfulDownloadPart(log.Default()))
	_, _ = downloader.Download(ctx, nil, &s3.GetObjectInput{
		Bucket: aws.String("my-bucket"),
		Key:    aws.String("my-key"),
	}, managerlogging.LogSuccessfulDownloadPartWithExpectedPartCount(log.Default(), 100))
}

Documentation

Index

Constants

View Source
const (
	MaxFileSize        = int64(5_497_558_138_880)
	MaxPartCount       = 10_000
	MinPartSize        = int64(5_242_880)
	MaxPartSize        = int64(5_368_709_120)
	DefaultConcurrency = 3
)

Amazon S3 multipart upload limits https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html

Variables

This section is empty.

Functions

func AddExpectedBucketOwnerToGetObject

func AddExpectedBucketOwnerToGetObject(expectedBucketOwner string) func(*s3.GetObjectInput)

AddExpectedBucketOwnerToGetObject modifies the s3.GetObjectInput by adding the expected bucket owner.

func AddExpectedBucketOwnerToHeadObject

func AddExpectedBucketOwnerToHeadObject(expectedBucketOwner string) func(*s3.HeadObjectInput)

AddExpectedBucketOwnerToHeadObject modifies the s3.HeadObjectInput by adding the expected bucket owner.

func Download

func Download(ctx context.Context, client DownloadAPIClient, bucket, key string, w io.Writer, optFns ...func(*Downloader)) error

Download downloads the S3 object specified by bucket and key and writes to the given io.Writer.

func Upload

Upload uploads the named file to S3 using multipart upload with progress report.

Unlike manager.Uploader which receives an io.Reader which in turn can upload objects of unknown size (good for streaming object on the fly), this method requires the object to be entirely contained in a file with known definite size. If you would like to use manager.Uploader, check out WrapUploadAPIClient which provides progress logging by decorating the manager.UploadAPIClient instance. manager.DownloadAPIClient

Unlike manager.Uploader which knows to use a single S3 PutObject if the file is small enough, this method always uses S3 Multipart Upload.

Types

type AbortAttempt

type AbortAttempt int
const (
	AbortNotAttempted AbortAttempt = iota
	AbortSuccess
	AbortFailure
)

type DownloadAPIClient

type DownloadAPIClient interface {
	HeadObject(context.Context, *s3.HeadObjectInput, ...func(*s3.Options)) (*s3.HeadObjectOutput, error)
	GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error)
}

DownloadAPIClient declares a subset of S3 methods that is required by Download.

type Downloader

type Downloader struct {
	// PartSize is the size of each part.
	//
	// Defaults to MinPartSize. Cannot be non-positive.
	PartSize int64

	// Concurrency is the number of goroutines responsible for uploading the parts in parallel.
	//
	// Defaults to DefaultConcurrency. Cannot be non-positive.
	Concurrency int

	// PostGetPart is called after every successful ranged [s3.Client.GetObject].
	//
	// By default, `log.Printf` will be used to print messages in format `downloaded %d/%d parts`. This hook will only
	// be called from the main goroutine that calls Download; the hook will be called right after the data slice have
	// been written to file. It can be used to hash the file as there is a guaranteed ordering (ascending part number
	// starting at 1, ending at partCount inclusive) to these callbacks, though it would be preferable to wrap the
	// io.Writer passed into Download as an io.MultiWriter instead.
	//
	// Implementations must not retain the data slice. Size is the file's Content-Length determined from the S3
	// HeadObject request.
	PostGetPart func(data []byte, size int64, partNumber, partCount int)

	// ModifyHeadObjectInput provides ways to customise the initial S3 HeadObject call to retrieve the size.
	//
	// See AddExpectedBucketOwnerToHeadObject for an example.
	ModifyHeadObjectInput func(*s3.HeadObjectInput)

	// ModifyGetObjectInput provides ways to customise the S3 GetObject calls to download each part.
	//
	// See AddExpectedBucketOwnerToGetObject for an example.
	ModifyGetObjectInput func(*s3.GetObjectInput)
	// contains filtered or unexported fields
}

Downloader is used to download files from S3 using ranged get with progress report.

type MultipartUploadError

type MultipartUploadError struct {
	Err      error
	UploadID string
	Abort    AbortAttempt
	AbortErr error
}

MultipartUploadError is returned only if an error occurs after s3.Client.CreateMultipartUpload has been called successfully to provide an upload Id as well as the result of the abort attempt.

func (MultipartUploadError) Error

func (e MultipartUploadError) Error() string

func (MultipartUploadError) Unwrap

func (e MultipartUploadError) Unwrap() error

type MultipartUploader

type MultipartUploader struct {
	// PartSize is the size of each part.
	//
	// Defaults to MinPartSize which is also the minimum. Cannot exceed MaxPartSize.
	PartSize int64

	// Concurrency is the number of goroutines responsible for uploading the parts in parallel.
	//
	// Defaults to DefaultConcurrency. Cannot be non-positive.
	Concurrency int

	// DisableAbortOnFailure controls whether upload failure will result in an attempt to call
	// [s3.Client.AbortMultipartUpload].
	//
	// By default, an abort attempt will be made.
	DisableAbortOnFailure bool

	// PreUploadPart is called before a [s3.ClientUploadPart] attempt.
	//
	// The data slice should not be modified nor retained lest it impacts the actual data uploaded to S3. This hook will
	// only be called from the main goroutine that calls Upload. It can be used to hash the file as there is a
	// guaranteed ordering (ascending part number starting at 1, ending at partCount inclusive) to these calls.
	PreUploadPart func(partNumber int32, data []byte)

	// PostUploadPart is called after every successful [s3.Client.UploadPart].
	//
	// By default, `log.Printf` will be used to print messages in format `uploaded %d/%d parts`. This hook will only be
	// called from the main goroutine that calls Upload. Unlike PreUploadPart, there is no guarantee to the ordering of
	// the parts being completed.
	PostUploadPart func(part s3types.CompletedPart, partCount int32)
	// contains filtered or unexported fields
}

MultipartUploader is used to upload files to S3 using multipart upload with progress report.

type UploadAPIClient

type UploadAPIClient interface {
	UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error)
	CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error)
	CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error)
	AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error)
}

UploadAPIClient declares a subset of S3 methods that is required by Upload.

Directories

Path Synopsis
cmd
xy3

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL