Documentation ¶
Index ¶
- func DefineFlags(flags *pflag.FlagSet)
- func ExtractQueryParameters(u *url.URL, options interface{})
- func FormatBackendURL(backend *backup.StorageBackend) (u url.URL)
- func ParseBackend(rawURL string, options *BackendOptions) (*backup.StorageBackend, error)
- type BackendOptions
- type BufferWriter
- type CompressType
- type ExternalStorage
- type ExternalStorageOptions
- type GCSBackendOptions
- type LocalStorage
- func (l *LocalStorage) CreateUploader(ctx context.Context, name string) (Uploader, error)
- func (l *LocalStorage) FileExists(ctx context.Context, name string) (bool, error)
- func (l *LocalStorage) Open(ctx context.Context, path string) (ReadSeekCloser, error)
- func (l *LocalStorage) Read(ctx context.Context, name string) ([]byte, error)
- func (l *LocalStorage) URI() string
- func (l *LocalStorage) WalkDir(ctx context.Context, opt *WalkOption, fn func(string, int64) error) error
- func (l *LocalStorage) Write(ctx context.Context, name string, data []byte) error
- type RangeInfo
- type ReadSeekCloser
- type S3BackendOptions
- type S3Storage
- func (rs *S3Storage) CreateUploader(ctx context.Context, name string) (Uploader, error)
- func (rs *S3Storage) FileExists(ctx context.Context, file string) (bool, error)
- func (rs *S3Storage) Open(ctx context.Context, path string) (ReadSeekCloser, error)
- func (rs *S3Storage) Read(ctx context.Context, file string) ([]byte, error)
- func (rs *S3Storage) URI() string
- func (rs *S3Storage) WalkDir(ctx context.Context, opt *WalkOption, fn func(string, int64) error) error
- func (rs *S3Storage) Write(ctx context.Context, file string, data []byte) error
- type S3Uploader
- type Uploader
- type WalkOption
- type Writer
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func DefineFlags ¶
DefineFlags adds flags to the flag set corresponding to all backend options.
func ExtractQueryParameters ¶
ExtractQueryParameters moves the query parameters of the URL into the options using reflection.
The options must be a pointer to a struct which contains only string or bool fields (more types will be supported in the future), and tagged for JSON serialization.
All of the URL's query parameters will be removed after calling this method.
func FormatBackendURL ¶
func FormatBackendURL(backend *backup.StorageBackend) (u url.URL)
FormatBackendURL obtains the raw URL which can be used the reconstruct the backend. The returned URL does not contain options for further configurating the backend. This is to avoid exposing secret tokens.
func ParseBackend ¶
func ParseBackend(rawURL string, options *BackendOptions) (*backup.StorageBackend, error)
ParseBackend constructs a structured backend description from the storage URL.
Types ¶
type BackendOptions ¶
type BackendOptions struct { S3 S3BackendOptions `json:"s3" toml:"s3"` GCS GCSBackendOptions `json:"gcs" toml:"gcs"` }
BackendOptions further configures the storage backend not expressed by the storage URL.
func (*BackendOptions) ParseFromFlags ¶
func (options *BackendOptions) ParseFromFlags(flags *pflag.FlagSet) error
ParseFromFlags obtains the backend options from the flag set.
type BufferWriter ¶
type BufferWriter struct {
// contains filtered or unexported fields
}
BufferWriter is a Writer implementation on top of bytes.Buffer that is useful for testing.
func NewBufferWriter ¶
func NewBufferWriter() *BufferWriter
NewBufferWriter creates a Writer that simply writes to a buffer (useful for testing).
func (*BufferWriter) Close ¶
func (u *BufferWriter) Close(ctx context.Context) error
Close delegates to bytes.Buffer.
func (*BufferWriter) String ¶
func (u *BufferWriter) String() string
String delegates to bytes.Buffer.
type CompressType ¶
type CompressType uint8
CompressType represents the type of compression.
const ( // NoCompression won't compress given bytes. NoCompression CompressType = iota // Gzip will compress given bytes in gzip format. Gzip )
type ExternalStorage ¶
type ExternalStorage interface { // Write file to storage Write(ctx context.Context, name string, data []byte) error // Read storage file Read(ctx context.Context, name string) ([]byte, error) // FileExists return true if file exists FileExists(ctx context.Context, name string) (bool, error) // Open a Reader by file path. path is relative path to storage base path Open(ctx context.Context, path string) (ReadSeekCloser, error) // WalkDir traverse all the files in a dir. // // fn is the function called for each regular file visited by WalkDir. // The argument `path` is the file path that can be used in `Open` // function; the argument `size` is the size in byte of the file determined // by path. WalkDir(ctx context.Context, opt *WalkOption, fn func(path string, size int64) error) error // URI returns the base path as a URI URI() string // CreateUploader create a uploader that will upload chunks data to storage. // It's design for s3 multi-part upload currently. e.g. cdc log backup use this to do multi part upload // to avoid generate small fragment files. CreateUploader(ctx context.Context, name string) (Uploader, error) }
ExternalStorage represents a kind of file system storage.
func Create ¶
func Create(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) (ExternalStorage, error)
Create creates ExternalStorage.
Please consider using `New` in the future.
func New ¶
func New(ctx context.Context, backend *backup.StorageBackend, opts *ExternalStorageOptions) (ExternalStorage, error)
New creates an ExternalStorage with options.
type ExternalStorageOptions ¶
type ExternalStorageOptions struct { // SendCredentials marks whether to send credentials downstream. // // This field should be set to false if the credentials are provided to // downstream via external key managers, e.g. on K8s or cloud provider. SendCredentials bool // SkipCheckPath marks whether to skip checking path's existence. // // This should only be set to true in testing, to avoid interacting with the // real world. // When this field is false (i.e. path checking is enabled), the New() // function will ensure the path referred by the backend exists by // recursively creating the folders. This will also throw an error if such // operation is impossible (e.g. when the bucket storing the path is missing). SkipCheckPath bool // HTTPClient to use. The created storage may ignore this field if it is not // directly using HTTP (e.g. the local storage). HTTPClient *http.Client }
ExternalStorageOptions are backend-independent options provided to New.
type GCSBackendOptions ¶
type GCSBackendOptions struct { Endpoint string `json:"endpoint" toml:"endpoint"` StorageClass string `json:"storage-class" toml:"storage-class"` PredefinedACL string `json:"predefined-acl" toml:"predefined-acl"` CredentialsFile string `json:"credentials-file" toml:"credentials-file"` }
GCSBackendOptions are options for configuration the GCS storage.
type LocalStorage ¶
type LocalStorage struct {
// contains filtered or unexported fields
}
LocalStorage represents local file system storage.
export for using in tests.
func NewLocalStorage ¶
func NewLocalStorage(base string) (*LocalStorage, error)
NewLocalStorage return a LocalStorage at directory `base`.
export for test.
func (*LocalStorage) CreateUploader ¶
CreateUploader implements ExternalStorage interface.
func (*LocalStorage) FileExists ¶
FileExists implement ExternalStorage.FileExists.
func (*LocalStorage) Open ¶
func (l *LocalStorage) Open(ctx context.Context, path string) (ReadSeekCloser, error)
Open a Reader by file path, path is a relative path to base path.
func (*LocalStorage) URI ¶
func (l *LocalStorage) URI() string
URI returns the base path as an URI with a file:/// prefix.
func (*LocalStorage) WalkDir ¶
func (l *LocalStorage) WalkDir(ctx context.Context, opt *WalkOption, fn func(string, int64) error) error
WalkDir traverse all the files in a dir.
fn is the function called for each regular file visited by WalkDir. The first argument is the file path that can be used in `Open` function; the second argument is the size in byte of the file determined by path.
type RangeInfo ¶
type RangeInfo struct { // Start is the absolute position of the first byte of the byte range, // starting from 0. Start int64 // End is the absolute position of the last byte of the byte range. This end // offset is inclusive, e.g. if the Size is 1000, the maximum value of End // would be 999. End int64 // Size is the total size of the original file. Size int64 }
RangeInfo represents the an HTTP Content-Range header value of the form `bytes [Start]-[End]/[Size]`.
func ParseRangeInfo ¶
ParseRangeInfo parses the Content-Range header and returns the offsets.
type ReadSeekCloser ¶
ReadSeekCloser is the interface that groups the basic Read, Seek and Close methods.
type S3BackendOptions ¶
type S3BackendOptions struct { Endpoint string `json:"endpoint" toml:"endpoint"` Region string `json:"region" toml:"region"` StorageClass string `json:"storage-class" toml:"storage-class"` Sse string `json:"sse" toml:"sse"` SseKmsKeyID string `json:"sse-kms-key-id" toml:"sse-kms-key-id"` ACL string `json:"acl" toml:"acl"` AccessKey string `json:"access-key" toml:"access-key"` SecretAccessKey string `json:"secret-access-key" toml:"secret-access-key"` Provider string `json:"provider" toml:"provider"` ForcePathStyle bool `json:"force-path-style" toml:"force-path-style"` UseAccelerateEndpoint bool `json:"use-accelerate-endpoint" toml:"use-accelerate-endpoint"` }
S3BackendOptions contains options for s3 storage.
type S3Storage ¶
type S3Storage struct {
// contains filtered or unexported fields
}
S3Storage info for s3 storage.
func NewS3StorageForTest ¶
NewS3StorageForTest creates a new S3Storage for testing only.
func (*S3Storage) CreateUploader ¶
CreateUploader create multi upload request.
func (*S3Storage) FileExists ¶
FileExists check if file exists on s3 storage.
func (*S3Storage) WalkDir ¶
func (rs *S3Storage) WalkDir(ctx context.Context, opt *WalkOption, fn func(string, int64) error) error
WalkDir traverse all the files in a dir.
fn is the function called for each regular file visited by WalkDir. The first argument is the file path that can be used in `Open` function; the second argument is the size in byte of the file determined by path.
type S3Uploader ¶
type S3Uploader struct {
// contains filtered or unexported fields
}
S3Uploader does multi-part upload to s3.
func (*S3Uploader) CompleteUpload ¶
func (u *S3Uploader) CompleteUpload(ctx context.Context) error
CompleteUpload complete multi upload request.
func (*S3Uploader) UploadPart ¶
func (u *S3Uploader) UploadPart(ctx context.Context, data []byte) error
UploadPart update partial data to s3, we should call CreateMultipartUpload to start it, and call CompleteMultipartUpload to finish it.
type Uploader ¶
type Uploader interface { // UploadPart upload part of file data to storage UploadPart(ctx context.Context, data []byte) error // CompleteUpload make the upload data to a complete file CompleteUpload(ctx context.Context) error }
Uploader upload file with chunks.
type WalkOption ¶
type WalkOption struct { // walk on SubDir of specify directory SubDir string // ListCount is the number of entries per page. // // In cloud storages such as S3 and GCS, the files listed and sent in pages. // Typically a page contains 1000 files, and if a folder has 3000 descendant // files, one would need 3 requests to retrieve all of them. This parameter // controls this size. Note that both S3 and GCS limits the maximum to 1000. // // Typically you want to leave this field unassigned (zero) to use the // default value (1000) to minimize the number of requests, unless you want // to reduce the possibility of timeout on an extremely slow connection, or // perform testing. ListCount int64 }
WalkOption is the option of storage.WalkDir.
type Writer ¶
type Writer interface { // Write writes to buffer and if chunk is filled will upload it Write(ctx context.Context, p []byte) (int, error) // Close writes final chunk and completes the upload Close(ctx context.Context) error }
Writer is like io.Writer but with Context, create a new writer on top of Uploader with NewUploaderWriter.
func NewUploaderWriter ¶
func NewUploaderWriter(uploader Uploader, chunkSize int, compressType CompressType) Writer
NewUploaderWriter wraps the Writer interface over an uploader.