Documentation ¶
Index ¶
- Constants
- Variables
- func ApplyFileIncrement(fileName string, increment io.Reader) error
- func CheckType(path string) string
- func Configure() (*TarUploader, *Prefix, error)
- func Connect() (*pgx.Conn, error)
- func CreateUploader(svc s3iface.S3API, partsize, concurrency int) s3manageriface.UploaderAPI
- func DecompressLz4(d io.Writer, s io.Reader) (int64, error)
- func DecompressLzo(d io.Writer, s io.Reader) error
- func DownloadWALFile(pre *Prefix, walFileName string, location string)
- func ExtractAll(ti TarInterpreter, files []ReaderMaker) error
- func FormatName(s string) (string, error)
- func GetBackupPath(prefix *Prefix) *string
- func GetKeyRingId() string
- func GetSentinelUserData() interface{}
- func HandleBackupFetch(backupName string, pre *Prefix, dirArc string, mem bool) (lsn *uint64)
- func HandleBackupList(pre *Prefix)
- func HandleBackupPush(dirArc string, tu *TarUploader, pre *Prefix)
- func HandleDelete(pre *Prefix, args []string)
- func HandleTar(bundle TarBundle, path string, info os.FileInfo, crypter Crypter) error
- func HandleWALFetch(pre *Prefix, walFileName string, location string, triggerPrefetch bool)
- func HandleWALPrefetch(pre *Prefix, walFileName string, location string)
- func HandleWALPush(tu *TarUploader, dirArc string, pre *Prefix, verify bool)
- func IsPagedFile(info os.FileInfo, fileName string) bool
- func MoveFileAndCreateDirs(incrementalPath string, targetPath string, fileName string) (err error)
- func NextWALFileName(name string) (nextname string, err error)
- func ParseLsn(lsnStr string) (lsn uint64, err error)
- func ParsePageHeader(data []byte) (lsn uint64, valid bool)
- func ParseWALFileName(name string) (timelineId uint32, logSegNo uint64, err error)
- func ReadDatabaseFile(fileName string, lsn *uint64, isNew bool) (io.ReadCloser, bool, int64, error)
- func ResolveSymlink(path string) string
- func UploadWALFile(tu *TarUploader, dirArc string, pre *Prefix, verify bool)
- func WALFileName(lsn uint64, conn *pgx.Conn) (string, uint32, error)
- type Archive
- type Backup
- type BackupFileDescription
- type BackupFileList
- type BackupTime
- type BgUploader
- type Bundle
- func (b *Bundle) CheckSizeAndEnqueueBack(tb TarBall) error
- func (b *Bundle) CheckTimelineChanged(conn *pgx.Conn) bool
- func (b *Bundle) Deque() TarBall
- func (b *Bundle) EnqueueBack(tb TarBall, parallelOpInProgress *bool)
- func (b *Bundle) FinishQueue() error
- func (b *Bundle) GetFiles() *sync.Map
- func (b *Bundle) GetIncrementBaseFiles() BackupFileList
- func (b *Bundle) GetIncrementBaseLsn() *uint64
- func (bundle *Bundle) HandleLabelFiles(conn *pgx.Conn) (uint64, error)
- func (bundle *Bundle) HandleSentinel() error
- func (b *Bundle) NewTarBall(dedicatedUploader bool)
- func (b *Bundle) StartBackup(conn *pgx.Conn, backup string) (backupName string, lsn uint64, version int, err error)
- func (b *Bundle) StartQueue()
- func (bundle *Bundle) TarWalker(path string, info os.FileInfo, err error) error
- type CachedKey
- type Cleaner
- type Crypter
- type DelayWriteCloser
- type DeleteCommandArguments
- type Empty
- type EmptyWriteIgnorer
- type ExponentialTicker
- type FileSystemCleaner
- type FileTarInterpreter
- type IncrementalPageReader
- type Lz4CascadeClose
- type Lz4CascadeClose2
- type Lz4Error
- type LzPipeWriter
- type NilWriter
- type NoMatchAvailableError
- type OpenPGPCrypter
- type PgQueryRunner
- func (queryRunner *PgQueryRunner) BuildGetVersion() string
- func (queryRunner *PgQueryRunner) BuildStartBackup() (string, error)
- func (queryRunner *PgQueryRunner) BuildStopBackup() (string, error)
- func (queryRunner *PgQueryRunner) StartBackup(backup string) (backupName string, lsnString string, inRecovery bool, err error)
- func (queryRunner *PgQueryRunner) StopBackup() (label string, offsetMap string, lsnStr string, err error)
- type Prefix
- type QueryRunner
- type RaskyReader
- type ReadCascadeClose
- type ReaderMaker
- type S3ReaderMaker
- type S3TarBall
- func (s *S3TarBall) AddSize(i int64)
- func (b *S3TarBall) AwaitUploads()
- func (s *S3TarBall) BaseDir() string
- func (s *S3TarBall) CloseTar() error
- func (s *S3TarBall) Finish(sentinel *S3TarBallSentinelDto) error
- func (s *S3TarBall) Nop() bool
- func (s *S3TarBall) Number() int
- func (s *S3TarBall) SetUp(crypter Crypter, names ...string)
- func (s *S3TarBall) Size() int64
- func (s *S3TarBall) StartUpload(name string, crypter Crypter) io.WriteCloser
- func (s *S3TarBall) Trim() string
- func (s *S3TarBall) Tw() *tar.Writer
- type S3TarBallMaker
- type S3TarBallSentinelDto
- type Sentinel
- type TarBall
- type TarBallMaker
- type TarBundle
- type TarInterpreter
- type TarUploader
- type TimeSlice
- type UnsetEnvVarError
- type UnsupportedFileTypeError
- type WalFiles
- type ZeroReader
Constants ¶
const ( // BlockSize is the PostgreSQL page size BlockSize uint16 = 8192 )
const SentinelSuffix = "_backup_stop_sentinel.json"
SentinelSuffix is a suffix of backup finish sentinel file
const ( // WalSegmentSize is the size of one WAL file WalSegmentSize = uint64(16 * 1024 * 1024) // xlog.c line 113ß )
Variables ¶
var Compressed uint32
Compressed is used to log compression ratio.
var DeleteUsage = "delete requires at least 2 parameters" + `
retain 5 keep 5 backups
retain FULL 5 keep 5 full backups and all deltas of them
retail FIND_FULL 5 find necessary full for 5th and keep everything after it
before base_0123 keep everything after base_0123 including itself
before FIND_FULL base_0123 keep everything after the base of base_0123`
DeleteUsage is a text message explaining how to use delete
var EXCLUDE = make(map[string]Empty)
EXCLUDE is a list of excluded members from the bundled backup.
var ErrCrypterUseMischief = errors.New("Crypter is not checked before use")
ErrCrypterUseMischief happens when crypter is used before initialization
var ErrInvalidBlock = errors.New("Block is not valid")
ErrInvalidBlock indicates that file contain invalid page and cannot be archived incrementally
var ErrLatestNotFound = errors.New("No backups found")
ErrLatestNotFound happens when users asks backup-fetch LATEST, but there is no backups
var ErrSentinelNotUploaded = errors.New("Sentinel was not uploaded due to timeline change during backup")
ErrSentinelNotUploaded happens when upload of json sentinel failed
var MAXRETRIES = 7
MAXRETRIES is the maximum number of retries for upload.
var Uncompressed uint32
Uncompressed is used to log compression ratio.
Functions ¶
func ApplyFileIncrement ¶ added in v0.1.3
ApplyFileIncrement changes pages according to supplied change map file
func Configure ¶
func Configure() (*TarUploader, *Prefix, error)
Configure connects to S3 and creates an uploader. It makes sure that a valid session has started; if invalid, returns AWS error and `<nil>` values.
Requires these environment variables to be set: WALE_S3_PREFIX
Able to configure the upload part size in the S3 uploader.
func Connect ¶
Connect establishes a connection to postgres using a UNIX socket. Must export PGHOST and run with `sudo -E -u postgres`. If PGHOST is not set or if the connection fails, an error is returned and the connection is `<nil>`.
Example: PGHOST=/var/run/postgresql or PGHOST=10.0.0.1
func CreateUploader ¶
func CreateUploader(svc s3iface.S3API, partsize, concurrency int) s3manageriface.UploaderAPI
CreateUploader returns an uploader with customizable concurrency and partsize.
func DecompressLz4 ¶
DecompressLz4 decompresses a .lz4 file. Returns an error upon failure.
func DecompressLzo ¶
DecompressLzo decompresses an .lzo file. Returns the first error encountered.
func DownloadWALFile ¶ added in v0.1.7
DownloadWALFile downloads a file and writes it to local file
func ExtractAll ¶
func ExtractAll(ti TarInterpreter, files []ReaderMaker) error
ExtractAll Handles all files passed in. Supports `.lzo`, `.lz4, and `.tar`. File type `.nop` is used for testing purposes. Each file is extracted in its own goroutine and ExtractAll will wait for all goroutines to finish. Returns the first error encountered.
func FormatName ¶
FormatName grabs the name of the WAL file and returns it in the form of `base_...`. If no match is found, returns an empty string and a `NoMatchAvailableError`.
func GetBackupPath ¶ added in v0.1.4
GetBackupPath gets path for basebackup in a bucket
func GetKeyRingId ¶ added in v0.1.3
func GetKeyRingId() string
GetKeyRingId extracts name of a key to use from env variable
func GetSentinelUserData ¶ added in v0.1.8
func GetSentinelUserData() interface{}
GetSentinelUserData tries to parse WALG_SENTINEL_USER_DATA env variable
func HandleBackupFetch ¶ added in v0.1.3
HandleBackupFetch is invoked to perform wal-g backup-fetch
func HandleBackupList ¶ added in v0.1.3
func HandleBackupList(pre *Prefix)
HandleBackupList is invoked to perform wal-g backup-list
func HandleBackupPush ¶ added in v0.1.3
func HandleBackupPush(dirArc string, tu *TarUploader, pre *Prefix)
HandleBackupPush is invoked to performa wal-g backup-push
func HandleDelete ¶ added in v0.1.3
HandleDelete is invoked to perform wal-g delete
func HandleTar ¶
HandleTar creates underlying tar writer and handles one given file. Does not follow symlinks. If file is in EXCLUDE, will not be included in the final tarball. EXCLUDED directories are created but their contents are not written to local disk.
func HandleWALFetch ¶ added in v0.1.3
HandleWALFetch is invoked to performa wal-g wal-fetch
func HandleWALPrefetch ¶ added in v0.1.3
HandleWALPrefetch is invoked by wal-fetch command to speed up database restoration
func HandleWALPush ¶ added in v0.1.3
func HandleWALPush(tu *TarUploader, dirArc string, pre *Prefix, verify bool)
HandleWALPush is invoked to perform wal-g wal-push
func IsPagedFile ¶ added in v0.1.3
IsPagedFile checks basic expectaions for paged file
func MoveFileAndCreateDirs ¶ added in v0.1.3
MoveFileAndCreateDirs moves file from incremental folder to target folder, creating necessary folders structure
func NextWALFileName ¶ added in v0.1.3
NextWALFileName computes name of next WAL segment
func ParsePageHeader ¶ added in v0.1.3
ParsePageHeader reads information from PostgreSQL page header. Exported for test reasons.
func ParseWALFileName ¶ added in v0.1.3
ParseWALFileName extracts numeric parts from WAL file name
func ReadDatabaseFile ¶ added in v0.1.3
ReadDatabaseFile tries to read file as an incremental data file if possible, otherwise just open the file
func ResolveSymlink ¶ added in v0.1.4
ResolveSymlink converts path to physical if it is symlink
func UploadWALFile ¶ added in v0.1.5
func UploadWALFile(tu *TarUploader, dirArc string, pre *Prefix, verify bool)
UploadWALFile from FS to the cloud
Types ¶
type Archive ¶
Archive contains information associated with a WAL archive.
func (*Archive) CheckExistence ¶
CheckExistence checks that the specified WAL file exists.
func (*Archive) GetArchive ¶
func (a *Archive) GetArchive() (io.ReadCloser, error)
GetArchive downloads the specified archive from S3.
type Backup ¶
Backup contains information about a valid backup generated and uploaded by WAL-G.
func (*Backup) CheckExistence ¶
CheckExistence checks that the specified backup exists.
func (*Backup) GetBackups ¶ added in v0.1.3
func (b *Backup) GetBackups() ([]BackupTime, error)
GetBackups receives backup descriptions and sorts them by time
type BackupFileDescription ¶ added in v0.1.3
type BackupFileDescription struct { IsIncremented bool // should never be both incremented and Skipped IsSkipped bool MTime time.Time }
BackupFileDescription contains properties of one backup file
type BackupFileList ¶ added in v0.1.3
type BackupFileList map[string]BackupFileDescription
BackupFileList is a map of file properties in a backup
type BackupTime ¶
BackupTime is used to sort backups by latest modified time.
func GetBackupTimeSlices ¶ added in v0.1.3
func GetBackupTimeSlices(backups []*s3.Object) []BackupTime
GetBackupTimeSlices converts S3 objects to backup description
type BgUploader ¶ added in v0.1.5
type BgUploader struct {
// contains filtered or unexported fields
}
BgUploader represents the state of concurrent WAL upload
func (*BgUploader) Start ¶ added in v0.1.5
func (u *BgUploader) Start(walFilePath string, maxParallelWorkers int32, tu *TarUploader, pre *Prefix, verify bool)
Start up checking what's inside archive_status
func (*BgUploader) Upload ¶ added in v0.1.5
func (u *BgUploader) Upload(info os.FileInfo)
Upload one WAL file
type Bundle ¶
type Bundle struct { MinSize int64 Sen *Sentinel Tb TarBall Tbm TarBallMaker Crypter OpenPGPCrypter Timeline uint32 Replica bool IncrementFromLsn *uint64 IncrementFromFiles BackupFileList Files *sync.Map // contains filtered or unexported fields }
A Bundle represents the directory to be walked. Contains at least one TarBall if walk has started. Each TarBall will be at least MinSize bytes. The Sentinel is used to ensure complete uploaded backups; in this case, pg_control is used as the sentinel.
func (*Bundle) CheckSizeAndEnqueueBack ¶ added in v0.1.8
func (*Bundle) CheckTimelineChanged ¶ added in v0.1.3
CheckTimelineChanged compares timelines of pg_backup_start() and pg_backup_stop()
func (*Bundle) EnqueueBack ¶ added in v0.1.8
func (*Bundle) FinishQueue ¶ added in v0.1.8
func (*Bundle) GetIncrementBaseFiles ¶ added in v0.1.3
func (b *Bundle) GetIncrementBaseFiles() BackupFileList
GetIncrementBaseFiles returns list of Files from previous backup
func (*Bundle) GetIncrementBaseLsn ¶ added in v0.1.3
GetIncrementBaseLsn returns LSN of previous backup
func (*Bundle) HandleLabelFiles ¶
HandleLabelFiles creates the `backup_label` and `tablespace_map` Files and uploads it to S3 by stopping the backup. Returns error upon failure.
func (*Bundle) HandleSentinel ¶
HandleSentinel uploads the compressed tar file of `pg_control`. Will only be called after the rest of the backup is successfully uploaded to S3. Returns an error upon failure.
func (*Bundle) NewTarBall ¶
NewTarBall starts writing new tarball
func (*Bundle) StartBackup ¶ added in v0.1.3
func (b *Bundle) StartBackup(conn *pgx.Conn, backup string) (backupName string, lsn uint64, version int, err error)
StartBackup starts a non-exclusive base backup immediately. When finishing the backup, `backup_label` and `tablespace_map` contents are not immediately written to a file but returned instead. Returns empty string and an error if backup fails.
func (*Bundle) StartQueue ¶ added in v0.1.8
func (b *Bundle) StartQueue()
func (*Bundle) TarWalker ¶
TarWalker walks files provided by the passed in directory and creates compressed tar members labeled as `part_00i.tar.lzo`.
To see which files and directories are Skipped, please consult 'structs.go'. Excluded directories will be created but their contents will not be included in the tar bundle.
type CachedKey ¶ added in v0.1.3
CachedKey is the data transfer object describing format of key ring cache
type Cleaner ¶ added in v0.1.3
Cleaner interface serves to separate file system logic from prefetch clean logic to make it testable
type Crypter ¶ added in v0.1.3
type Crypter interface { IsUsed() bool Encrypt(writer io.WriteCloser) (io.WriteCloser, error) Decrypt(reader io.ReadCloser) (io.Reader, error) }
Crypter is responsible for makeing cryptographical pipeline parts when needed
type DelayWriteCloser ¶ added in v0.1.3
type DelayWriteCloser struct {
// contains filtered or unexported fields
}
DelayWriteCloser delays first writes. Encryption starts writing header immediately. But there is a lot of places where writer is instantiated long before pipe is ready. This is why here is used special writer, which delays encryption initialization before actual write. If no write occurs, initialization still is performed, to handle zero-byte Files correctly
func (*DelayWriteCloser) Close ¶ added in v0.1.3
func (d *DelayWriteCloser) Close() error
Close DelayWriteCloser
type DeleteCommandArguments ¶ added in v0.1.3
type DeleteCommandArguments struct {
// contains filtered or unexported fields
}
DeleteCommandArguments incapsulates arguments for delete command
func ParseDeleteArguments ¶ added in v0.1.3
func ParseDeleteArguments(args []string, fallBackFunc func()) (result DeleteCommandArguments)
ParseDeleteArguments interprets arguments for delete command. TODO: use flags or cobra
type EmptyWriteIgnorer ¶
type EmptyWriteIgnorer struct {
io.WriteCloser
}
EmptyWriteIgnorer handles 0 byte write in LZ4 package to stop pipe reader/writer from blocking.
type ExponentialTicker ¶
type ExponentialTicker struct { MaxRetries int MaxWait float64 // contains filtered or unexported fields }
ExponentialTicker is used for exponential backoff for uploading to S3. If the max wait time is reached, retries will occur after max wait time intervals up to max retries.
func NewExpTicker ¶
func NewExpTicker(retries int, wait float64) *ExponentialTicker
NewExpTicker creates a new ExponentialTicker with configurable max number of retries and max wait time.
func (*ExponentialTicker) Update ¶
func (et *ExponentialTicker) Update()
Update increases running count of retries by 1 and exponentially increases the wait time until the max wait time is reached.
type FileSystemCleaner ¶ added in v0.1.3
type FileSystemCleaner struct{}
FileSystemCleaner actually performs it's functions on file system
func (FileSystemCleaner) GetFiles ¶ added in v0.1.3
func (c FileSystemCleaner) GetFiles(directory string) (files []string, err error)
GetFiles of a directory
func (FileSystemCleaner) Remove ¶ added in v0.1.3
func (c FileSystemCleaner) Remove(file string)
Remove file
type FileTarInterpreter ¶
type FileTarInterpreter struct { NewDir string Sentinel S3TarBallSentinelDto IncrementalBaseDir string }
FileTarInterpreter extracts input to disk.
type IncrementalPageReader ¶ added in v0.1.3
type IncrementalPageReader struct {
// contains filtered or unexported fields
}
IncrementalPageReader constructs difference map during initialization and than re-read file Diff map can be of 1Gb/PostgresBlockSize elements == 512Kb
func (*IncrementalPageReader) Close ¶ added in v0.1.3
func (pr *IncrementalPageReader) Close() error
Close IncrementalPageReader
type Lz4CascadeClose ¶
type Lz4CascadeClose struct { *lz4.Writer Underlying io.WriteCloser }
Lz4CascadeClose bundles multiple closures into one function. Calling Close() will close the lz4 and underlying writer.
func (*Lz4CascadeClose) Close ¶
func (lcc *Lz4CascadeClose) Close() error
Close returns the first encountered error from closing the lz4 writer or the underlying writer.
type Lz4CascadeClose2 ¶ added in v0.1.3
type Lz4CascadeClose2 struct { *lz4.Writer Underlying io.WriteCloser Underlying2 io.WriteCloser }
Lz4CascadeClose2 cascade closers with two independent closers. This peculiar behavior is required to handle OpenGPG Writer behavior
func (*Lz4CascadeClose2) Close ¶ added in v0.1.3
func (lcc *Lz4CascadeClose2) Close() error
Close returns the first encountered error from closing the lz4 writer or the underlying writer.
type Lz4Error ¶
type Lz4Error struct {
// contains filtered or unexported fields
}
Lz4Error is used to catch specific errors from Lz4PipeWriter when uploading to S3. Will not retry upload if this error occurs.
type LzPipeWriter ¶
LzPipeWriter allows for flexibility of using compressed output. Input is read and compressed to a pipe reader.
func (*LzPipeWriter) Compress ¶
func (p *LzPipeWriter) Compress(crypter Crypter)
Compress compresses input to a pipe reader. Output must be used or pipe will block.
type NoMatchAvailableError ¶
type NoMatchAvailableError struct {
// contains filtered or unexported fields
}
NoMatchAvailableError is used to signal no match found in string.
func (NoMatchAvailableError) Error ¶
func (e NoMatchAvailableError) Error() string
type OpenPGPCrypter ¶ added in v0.1.3
type OpenPGPCrypter struct {
// contains filtered or unexported fields
}
OpenPGPCrypter incapsulates specific of cypher method Includes keys, infrastructutre information etc If many encryption methods will be used it worth to extract interface
func (*OpenPGPCrypter) ConfigureGPGCrypter ¶ added in v0.1.3
func (crypter *OpenPGPCrypter) ConfigureGPGCrypter()
ConfigureGPGCrypter is OpenPGPCrypter internal initialization
func (*OpenPGPCrypter) Decrypt ¶ added in v0.1.3
func (crypter *OpenPGPCrypter) Decrypt(reader io.ReadCloser) (io.Reader, error)
Decrypt creates decrypted reader from ordinary reader
func (*OpenPGPCrypter) Encrypt ¶ added in v0.1.3
func (crypter *OpenPGPCrypter) Encrypt(writer io.WriteCloser) (io.WriteCloser, error)
Encrypt creates encryption writer from ordinary writer
func (*OpenPGPCrypter) IsUsed ¶ added in v0.1.3
func (crypter *OpenPGPCrypter) IsUsed() bool
IsUsed is to check necessity of Crypter use Must be called prior to any other crypter call
type PgQueryRunner ¶ added in v0.1.8
type PgQueryRunner struct { Version int // contains filtered or unexported fields }
PgQueryRunner is implementation for controlling PostgreSQL 9.0+
func NewPgQueryRunner ¶ added in v0.1.8
func NewPgQueryRunner(conn *pgx.Conn) (*PgQueryRunner, error)
NewPgQueryRunner builds QueryRunner from available connection
func (*PgQueryRunner) BuildGetVersion ¶ added in v0.1.8
func (queryRunner *PgQueryRunner) BuildGetVersion() string
BuildGetVersion formats a query to retrieve PostgreSQL numeric version
func (*PgQueryRunner) BuildStartBackup ¶ added in v0.1.8
func (queryRunner *PgQueryRunner) BuildStartBackup() (string, error)
BuildStartBackup formats a query that starts backup according to server features and version
func (*PgQueryRunner) BuildStopBackup ¶ added in v0.1.8
func (queryRunner *PgQueryRunner) BuildStopBackup() (string, error)
BuildStopBackup formats a query that stops backup according to server features and version
func (*PgQueryRunner) StartBackup ¶ added in v0.1.8
func (queryRunner *PgQueryRunner) StartBackup(backup string) (backupName string, lsnString string, inRecovery bool, err error)
StartBackup informs the database that we are starting copy of cluster contents
func (*PgQueryRunner) StopBackup ¶ added in v0.1.8
func (queryRunner *PgQueryRunner) StopBackup() (label string, offsetMap string, lsnStr string, err error)
StopBackup informs the database that copy is over
type QueryRunner ¶ added in v0.1.8
type QueryRunner interface { // This call should inform the database that we are going to copy cluster's contents // Should fail if backup is currently impossible StartBackup(backup string) (string, string, bool, error) // Inform database that contents are copied, get information on backup StopBackup() (string, string, string, error) }
The QueryRunner interface for controlling database during backup
type RaskyReader ¶
RaskyReader handles cases when the Rasky lzo package crashes. Occurs if byte size is too small (1-5).
type ReadCascadeClose ¶ added in v0.1.3
ReadCascadeClose composes io.ReadCloser from two parts
type ReaderMaker ¶
type ReaderMaker interface { Reader() (io.ReadCloser, error) Format() string Path() string }
ReaderMaker is the generic interface used by extract. It allows for ease of handling different file formats.
type S3ReaderMaker ¶
S3ReaderMaker handles cases where backups need to be uploaded to S3.
func (*S3ReaderMaker) Reader ¶
func (s *S3ReaderMaker) Reader() (io.ReadCloser, error)
Reader creates a new S3 reader for each S3 object.
type S3TarBall ¶
type S3TarBall struct { Lsn *uint64 IncrementFromLsn *uint64 IncrementFrom string Files BackupFileList // contains filtered or unexported fields }
S3TarBall represents a tar file that is going to be uploaded to S3.
func (*S3TarBall) AwaitUploads ¶ added in v0.1.8
func (b *S3TarBall) AwaitUploads()
func (*S3TarBall) CloseTar ¶
CloseTar closes the tar writer, flushing any unwritten data to the underlying writer before also closing the underlying writer.
func (*S3TarBall) Finish ¶
func (s *S3TarBall) Finish(sentinel *S3TarBallSentinelDto) error
Finish writes an empty .json file and uploads it with the the backup name. Finish will wait until all tar file parts have been uploaded. The json file will only be uploaded if all other parts of the backup are present in S3. an alert is given with the corresponding error.
func (*S3TarBall) SetUp ¶
SetUp creates a new tar writer and starts upload to S3. Upload will block until the tar file is finished writing. If a name for the file is not given, default name is of the form `part_....tar.lz4`.
func (*S3TarBall) StartUpload ¶
func (s *S3TarBall) StartUpload(name string, crypter Crypter) io.WriteCloser
StartUpload creates a lz4 writer and runs upload in the background once a compressed tar member is finished writing.
type S3TarBallMaker ¶
type S3TarBallMaker struct { BaseDir string Trim string BkupName string Tu *TarUploader Lsn *uint64 IncrementFromLsn *uint64 IncrementFrom string // contains filtered or unexported fields }
S3TarBallMaker creates tarballs that are uploaded to S3.
func (*S3TarBallMaker) Make ¶
func (s *S3TarBallMaker) Make(dedicatedUploader bool) TarBall
Make returns a tarball with required S3 fields.
type S3TarBallSentinelDto ¶ added in v0.1.3
type S3TarBallSentinelDto struct { LSN *uint64 IncrementFromLSN *uint64 `json:"DeltaFromLSN,omitempty"` IncrementFrom *string `json:"DeltaFrom,omitempty"` IncrementFullName *string `json:"DeltaFullName,omitempty"` IncrementCount *int `json:"DeltaCount,omitempty"` Files BackupFileList PgVersion int FinishLSN *uint64 UserData interface{} `json:"UserData,omitempty"` }
S3TarBallSentinelDto describes file structure of json sentinel
func (*S3TarBallSentinelDto) IsIncremental ¶ added in v0.1.3
func (dto *S3TarBallSentinelDto) IsIncremental() bool
IsIncremental checks that sentinel represents delta backup
func (*S3TarBallSentinelDto) SetFiles ¶ added in v0.1.8
func (s *S3TarBallSentinelDto) SetFiles(p *sync.Map)
type TarBall ¶
type TarBall interface { SetUp(crypter Crypter, args ...string) CloseTar() error Finish(sentinel *S3TarBallSentinelDto) error BaseDir() string Trim() string Nop() bool Number() int Size() int64 AddSize(int64) Tw() *tar.Writer AwaitUploads() }
A TarBall represents one tar file.
type TarBallMaker ¶
TarBallMaker is used to allow for flexible creation of different TarBalls.
type TarBundle ¶
type TarBundle interface { NewTarBall(dedicatedUploader bool) GetIncrementBaseLsn() *uint64 GetIncrementBaseFiles() BackupFileList StartQueue() Deque() TarBall EnqueueBack(tb TarBall, parallelOpInProgress *bool) CheckSizeAndEnqueueBack(tb TarBall) error FinishQueue() error GetFiles() *sync.Map }
TarBundle represents one completed directory.
type TarInterpreter ¶
TarInterpreter behaves differently for different file types.
type TarUploader ¶
type TarUploader struct { Upl s3manageriface.UploaderAPI StorageClass string Success bool // contains filtered or unexported fields }
TarUploader contains fields associated with uploading tarballs. Multiple tarballs can share one uploader. Must call CreateUploader() in 'upload.go'.
func NewTarUploader ¶
func NewTarUploader(svc s3iface.S3API, bucket, server, region string) *TarUploader
NewTarUploader creates a new tar uploader without the actual S3 uploader. CreateUploader() is used to configure byte size and concurrency streams for the uploader.
func (*TarUploader) Clone ¶ added in v0.1.7
func (tu *TarUploader) Clone() *TarUploader
Clone creates similar TarUploader with new WaitGroup
func (*TarUploader) Finish ¶
func (tu *TarUploader) Finish()
Finish waits for all waiting parts to be uploaded. If an error occurs, prints alert to stderr.
type TimeSlice ¶
type TimeSlice []BackupTime
TimeSlice represents a backup and its last modified time.
type UnsetEnvVarError ¶
type UnsetEnvVarError struct {
// contains filtered or unexported fields
}
UnsetEnvVarError is used to indicate required environment variables for WAL-G.
func (UnsetEnvVarError) Error ¶
func (e UnsetEnvVarError) Error() string
type UnsupportedFileTypeError ¶
UnsupportedFileTypeError is used to signal file types that are unsupported by WAL-G.
func (UnsupportedFileTypeError) Error ¶
func (e UnsupportedFileTypeError) Error() string
type ZeroReader ¶
type ZeroReader struct{}
ZeroReader generates a slice of zeroes. Used to pad tar in cases where length of file changes.