internal

package
v0.2.8 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 5, 2019 License: Apache-2.0 Imports: 43 Imported by: 0

Documentation

Index

Constants

View Source
const (
	PgControlPath = "/global/pg_control"
	LatestString  = "LATEST"
)
View Source
const (
	DefaultTarSizeThreshold = int64((1 << 30) - 1)
	PgControl               = "pg_control"
	BackupLabelFilename     = "backup_label"
	TablespaceMapFilename   = "tablespace_map"
)

It is made so to load big database files of size 1GB one by one

View Source
const (
	Lz4AlgorithmName    = "lz4"
	LzmaAlgorithmName   = "lzma"
	ZstdAlgorithmName   = "zstd"
	BrotliAlgorithmName = "brotli"

	Lz4FileExtension    = "lz4"
	LzmaFileExtension   = "lzma"
	ZstdFileExtension   = "zst"
	BrotliFileExtension = "br"
	LzoFileExtension    = "lzo"
)
View Source
const (
	DefaultDataBurstRateLimit = 8 * int64(DatabasePageSize)
	DefaultDataFolderPath     = "/tmp"
	WaleFileHost              = "file://localhost"
)
View Source
const (
	NoDeleteModifier = iota
	FullDeleteModifier
	FindFullDeleteModifier
)
View Source
const (
	RelFileSizeBound               = 1 << 30
	BlocksInRelFile                = RelFileSizeBound / int(DatabasePageSize)
	DefaultSpcNode   walparser.Oid = 1663
)
View Source
const (
	DatabasePageSize = walparser.BlockSize

	SignatureMagicNumber byte = 0x55

	DefaultTablespace    = "base"
	GlobalTablespace     = "global"
	NonDefaultTablespace = "pg_tblspc"
)
View Source
const (
	VersionStr     = "005"
	BaseBackupPath = "basebackups_" + VersionStr + "/"
	WalPath        = "wal_" + VersionStr + "/"

	// SentinelSuffix is a suffix of backup finish sentinel file
	SentinelSuffix         = "_backup_stop_sentinel.json"
	CompressedBlockMaxSize = 20 << 20
	NotFoundAWSErrorCode   = "NotFound"
)
View Source
const (
	WalFileInDelta      uint64 = 16
	DeltaFilenameSuffix        = "_delta"
	PartFilenameSuffix         = "_part"
)
View Source
const GpgBin = "gpg"
View Source
const LzopBlockSize = 256 * 1024
View Source
const (
	RecordPartFilename = "currentRecord.part"
)
View Source
const TarPartitionFolderName = "/tar_partitions/"
View Source
const TotalBgUploadedLimit = 1024
View Source
const (
	// WalSegmentSize is the size of one WAL file
	WalSegmentSize = uint64(16 * 1024 * 1024) // xlog.c line 113ß

)

Variables

View Source
var DiskLimiter *rate.Limiter
View Source
var ExcludedFilenames = make(map[string]Empty)

ExcludedFilenames is a list of excluded members from the bundled backup.

View Source
var Extensions []Extension
View Source
var IncrementFileHeader = []byte{'w', 'i', '1', SignatureMagicNumber}

"wi" at the head stands for "wal-g increment" format version "1", signature magic number

View Source
var MaxExtractRetryWait = 5 * time.Minute
View Source
var MinExtractRetryWait = time.Minute
View Source
var NetworkLimiter *rate.Limiter
View Source
var StorageAdapters = []StorageAdapter{
	{"WALE_S3_PREFIX", s3.SettingList, s3.ConfigureFolder, nil},
	{"WALE_FILE_PREFIX", nil, fs.ConfigureFolder, preprocessFilePrefix},
	{"WALE_GS_PREFIX", nil, gcs.ConfigureFolder, nil},
	{"WALE_AZ_PREFIX", azure.SettingList, azure.ConfigureFolder, nil},
	{"WALE_SWIFT_PREFIX", nil, swift.ConfigureFolder, nil},
}
View Source
var TerminalLocation = *walparser.NewBlockLocation(0, 0, 0, 0)
View Source
var UnwrapAll map[string]bool = nil
View Source
var UtilityFilePaths = map[string]bool{
	PgControlPath:         true,
	BackupLabelFilename:   true,
	TablespaceMapFilename: true,
}
View Source
var (
	WalgConfig *map[string]string
)

Functions

func ApplyFileIncrement

func ApplyFileIncrement(fileName string, increment io.Reader) error

ApplyFileIncrement changes pages according to supplied change map file

func CleanupPrefetchDirectories

func CleanupPrefetchDirectories(walFileName string, location string, cleaner Cleaner)

func ComputeDeletionSkiplineAndPrintIntentions

func ComputeDeletionSkiplineAndPrintIntentions(backups []BackupTime, target *Backup) (skipLine int, walSkipFileName string)

ComputeDeletionSkiplineAndPrintIntentions selects last backup and name of last necessary WAL

func ConfigureFolder added in v0.2.8

func ConfigureFolder() (storage.Folder, error)

TODO : unit tests

func ConfigureLimiters added in v0.2.8

func ConfigureLimiters() error

TODO : unit tests

func ConfigureLogging added in v0.2.8

func ConfigureLogging() error

TODO : unit tests

func Connect

func Connect() (*pgx.Conn, error)

Connect establishes a connection to postgres using a UNIX socket. Must export PGHOST and run with `sudo -E -u postgres`. If PGHOST is not set or if the connection fails, an error is returned and the connection is `<nil>`.

Example: PGHOST=/var/run/postgresql or PGHOST=10.0.0.1

func CreateFileWith

func CreateFileWith(filePath string, content io.Reader) error

func DecompressWALFile added in v0.2.7

func DecompressWALFile(dst io.Writer, archiveReader io.ReadCloser, decompressor Decompressor) error

TODO : unit tests

func DecryptAndDecompressTar

func DecryptAndDecompressTar(writer io.Writer, readerMaker ReaderMaker, crypter Crypter) error

TODO : unit tests Ensures that file extension is valid. Any subsequent behavior depends on file type.

func DecryptSecretKey added in v0.2.6

func DecryptSecretKey(entityList openpgp.EntityList, passphrase string) error

func DeleteWALBefore added in v0.2.7

func DeleteWALBefore(walSkipFileName string, walFolder storage.Folder)

TODO : unit tests

func DownloadWALFileTo added in v0.2.7

func DownloadWALFileTo(folder storage.Folder, walFileName string, dstPath string) error

TODO : unit tests downloadWALFileTo downloads a file and writes it to local file

func ExtractAll

func ExtractAll(tarInterpreter TarInterpreter, files []ReaderMaker) error

TODO : unit tests ExtractAll Handles all files passed in. Supports `.lzo`, `.lz4`, `.lzma`, and `.tar`. File type `.nop` is used for testing purposes. Each file is extracted in its own goroutine and ExtractAll will wait for all goroutines to finish. Returns the first error encountered.

func ExtractBlockLocations

func ExtractBlockLocations(records []walparser.XLogRecord) []walparser.BlockLocation

func FastCopy

func FastCopy(dst io.Writer, src io.Reader) (int64, error)

TODO : unit tests

func FindFirstLaterOrEqualTime added in v0.2.8

func FindFirstLaterOrEqualTime(folder storage.Folder,
	timeLine time.Time,
	less func(object1, object2 storage.Object) bool) (storage.Object, error)

func FindTargetBeforeName added in v0.2.8

func FindTargetBeforeName(folder storage.Folder, name string, modifier int) (storage.Object, error)

func GetBaseFilesToUnwrap

func GetBaseFilesToUnwrap(backupFileStates BackupFileList, currentFilesToUnwrap map[string]bool) (map[string]bool, error)

func GetDeltaFilenameFor

func GetDeltaFilenameFor(walFilename string) (string, error)

func GetFileExtension

func GetFileExtension(filePath string) string

TODO : unit tests

func GetFileRelativePath

func GetFileRelativePath(fileAbsPath string, directoryPath string) string

func GetKeyRingId

func GetKeyRingId() string

GetKeyRingId extracts name of a key to use from env variable

func GetLatestBackupName added in v0.2.7

func GetLatestBackupName(folder storage.Folder) (string, error)

TODO : unit tests

func GetNextWalFilename

func GetNextWalFilename(name string) (string, error)

GetNextWalFilename computes name of next WAL segment

func GetPositionInDelta

func GetPositionInDelta(walFilename string) int

func GetPrefetchLocations

func GetPrefetchLocations(location string, walFileName string) (prefetchLocation string, runningLocation string, runningFile string, fetchedFile string)

func GetRelFileIdFrom

func GetRelFileIdFrom(filePath string) (int, error)

func GetRelFileNodeFrom

func GetRelFileNodeFrom(filePath string) (*walparser.RelFileNode, error)

func GetRestoredBackupFilesToUnwrap

func GetRestoredBackupFilesToUnwrap(sentinelDto BackupSentinelDto) map[string]bool

func GetSentinelUserData

func GetSentinelUserData() interface{}

GetSentinelUserData tries to parse WALG_SENTINEL_USER_DATA env variable

func GetSettingValue added in v0.2.7

func GetSettingValue(key string) string

func HandleBackupFetch

func HandleBackupFetch(folder storage.Folder, dbDataDirectory string, backupName string)

TODO : unit tests HandleBackupFetch is invoked to perform wal-g backup-fetch

func HandleBackupList

func HandleBackupList(folder storage.Folder)

TODO : unit tests HandleBackupList is invoked to perform wal-g backup-list

func HandleBackupPush

func HandleBackupPush(uploader *Uploader, archiveDirectory string)

TODO : unit tests HandleBackupPush is invoked to perform a wal-g backup-push

func HandleDeleteRetain added in v0.2.8

func HandleDeleteRetain(folder storage.Folder, retantionCount int, modifier int, dryRun bool)

TODO : unit tests

func HandleWALFetch

func HandleWALFetch(folder storage.Folder, walFileName string, location string, triggerPrefetch bool)

TODO : unit tests HandleWALFetch is invoked to performa wal-g wal-fetch

func HandleWALPrefetch

func HandleWALPrefetch(uploader *Uploader, walFileName string, location string)

TODO : unit tests HandleWALPrefetch is invoked by wal-fetch command to speed up database restoration

func HandleWALPush

func HandleWALPush(uploader *Uploader, walFilePath string)

TODO : unit tests HandleWALPush is invoked to perform wal-g wal-push

func IsDirectoryEmpty

func IsDirectoryEmpty(directoryPath string) (bool, error)

TODO : unit tests

func IsPgControlRequired added in v0.2.4

func IsPgControlRequired(backup *Backup, sentinelDto BackupSentinelDto) bool

func LoadExtensions added in v0.2.7

func LoadExtensions(path string) error

func LookupConfigValue

func LookupConfigValue(key string) (value string, ok bool)

func NewDiskLimitReader

func NewDiskLimitReader(r io.ReadCloser) io.ReadCloser

NewDiskLimitReader returns a reader that is rate limited by disk limiter

func NewLzoReader

func NewLzoReader(r io.Reader) (io.ReadCloser, error)

func NewLzoWriter

func NewLzoWriter(w io.Writer) io.WriteCloser

func NewNetworkLimitReader

func NewNetworkLimitReader(r io.ReadCloser) io.ReadCloser

NewNetworkLimitReader returns a reader that is rate limited by network limiter

func PackFileTo

func PackFileTo(tarBall TarBall, fileInfoHeader *tar.Header, fileContent io.Reader) (fileSize int64, err error)

func ParseWALFilename

func ParseWALFilename(name string) (timelineId uint32, logSegNo uint64, err error)

TODO : unit tests ParseWALFilename extracts numeric parts from WAL file name

func ReadIncrementFileHeader

func ReadIncrementFileHeader(reader io.Reader) error

func ReadIncrementalFile

func ReadIncrementalFile(filePath string, fileSize int64, lsn uint64, deltaBitmap *roaring.Bitmap) (fileReader io.ReadCloser, size int64, err error)

func ReadKey added in v0.2.6

func ReadKey(path string) (io.Reader, error)

func ReadLocationsFrom

func ReadLocationsFrom(reader io.Reader) ([]walparser.BlockLocation, error)

func ReadPGPKey added in v0.2.6

func ReadPGPKey(path string) (openpgp.EntityList, error)

func ReaderMakersToFilePaths

func ReaderMakersToFilePaths(readerMakers []ReaderMaker) []string

func RegisterExtensionCommands added in v0.2.8

func RegisterExtensionCommands(rootCmd *cobra.Command)
func ResolveSymlink(path string) string

ResolveSymlink converts path to physical if it is symlink

func SanitizePath added in v0.2.7

func SanitizePath(path string) string

func SelectRelFileBlocks

func SelectRelFileBlocks(bitmap *roaring.Bitmap, relFileId int) *roaring.Bitmap

func ShouldPrefault

func ShouldPrefault(name string) (lsn uint64, shouldPrefault bool, timelineId uint32, err error)

func ToBytes

func ToBytes(x interface{}) []byte

func ToPartFilename

func ToPartFilename(deltaFilename string) string

func TryDownloadWALFile

func TryDownloadWALFile(folder storage.Folder, walPath string) (walFileReader io.ReadCloser, exists bool, err error)

func UploadSentinel added in v0.2.7

func UploadSentinel(uploader *Uploader, sentinelDto *BackupSentinelDto, backupName string) error

TODO : unit tests

func UploadWALFile added in v0.2.7

func UploadWALFile(uploader *Uploader, walFilePath string) error

TODO : unit tests uploadWALFile from FS to the cloud

func WriteLocationsTo

func WriteLocationsTo(writer io.Writer, locations []walparser.BlockLocation) error

Types

type ArchiveNonExistenceError

type ArchiveNonExistenceError struct {
	// contains filtered or unexported fields
}

func NewArchiveNonExistenceError

func NewArchiveNonExistenceError(archiveName string) ArchiveNonExistenceError

func (ArchiveNonExistenceError) Error

func (err ArchiveNonExistenceError) Error() string

type Backup

type Backup struct {
	BaseBackupFolder storage.Folder
	Name             string
}

Backup contains information about a valid backup generated and uploaded by WAL-G.

func GetBackupByName

func GetBackupByName(backupName string, folder storage.Folder) (*Backup, error)

func NewBackup

func NewBackup(baseBackupFolder storage.Folder, name string) *Backup

func (*Backup) CheckExistence

func (backup *Backup) CheckExistence() (bool, error)

CheckExistence checks that the specified backup exists.

func (*Backup) FetchSentinel added in v0.2.4

func (backup *Backup) FetchSentinel() (BackupSentinelDto, error)

func (*Backup) GetStopSentinelPath added in v0.2.7

func (backup *Backup) GetStopSentinelPath() string

func (*Backup) GetTarNames

func (backup *Backup) GetTarNames() ([]string, error)

type BackupFileDescription

type BackupFileDescription struct {
	IsIncremented bool // should never be both incremented and Skipped
	IsSkipped     bool
	MTime         time.Time
}

func NewBackupFileDescription

func NewBackupFileDescription(isIncremented, isSkipped bool, modTime time.Time) *BackupFileDescription

type BackupFileList

type BackupFileList map[string]BackupFileDescription

type BackupNonExistenceError

type BackupNonExistenceError struct {
	// contains filtered or unexported fields
}

func NewBackupNonExistenceError

func NewBackupNonExistenceError(backupName string) BackupNonExistenceError

func (BackupNonExistenceError) Error

func (err BackupNonExistenceError) Error() string

type BackupSentinelDto

type BackupSentinelDto struct {
	BackupStartLSN    *uint64 `json:"LSN"`
	IncrementFromLSN  *uint64 `json:"DeltaFromLSN,omitempty"`
	IncrementFrom     *string `json:"DeltaFrom,omitempty"`
	IncrementFullName *string `json:"DeltaFullName,omitempty"`
	IncrementCount    *int    `json:"DeltaCount,omitempty"`

	Files BackupFileList `json:"Files"`

	PgVersion       int     `json:"PgVersion"`
	BackupFinishLSN *uint64 `json:"FinishLSN"`

	UserData interface{} `json:"UserData,omitempty"`
}

BackupSentinelDto describes file structure of json sentinel

type BackupTime

type BackupTime struct {
	BackupName  string
	Time        time.Time
	WalFileName string
}

BackupTime is used to sort backups by latest modified time.

type BgUploader

type BgUploader struct {
	// contains filtered or unexported fields
}

BgUploader represents the state of concurrent WAL upload

func NewBgUploader

func NewBgUploader(walFilePath string, maxParallelWorkers int32, uploader *Uploader) *BgUploader

func (*BgUploader) Start

func (bgUploader *BgUploader) Start()

Start up checking what's inside archive_status

func (*BgUploader) Stop

func (bgUploader *BgUploader) Stop()

Stop pipeline

type BlockLocationReader

type BlockLocationReader struct {
	// contains filtered or unexported fields
}

func NewBlockLocationReader

func NewBlockLocationReader(underlying io.Reader) *BlockLocationReader

func (*BlockLocationReader) ReadNextLocation

func (reader *BlockLocationReader) ReadNextLocation() (*walparser.BlockLocation, error)

ReadNextLocation returns any reader error wrapped with errors.Wrap

type BlockLocationWriter

type BlockLocationWriter struct {
	Underlying io.Writer
}

func NewBlockLocationWriter

func NewBlockLocationWriter(underlying io.Writer) *BlockLocationWriter

func (*BlockLocationWriter) WriteLocation

func (locationWriter *BlockLocationWriter) WriteLocation(location walparser.BlockLocation) error

type BrotliCompressor

type BrotliCompressor struct{}

func (BrotliCompressor) FileExtension

func (compressor BrotliCompressor) FileExtension() string

func (BrotliCompressor) NewWriter

func (compressor BrotliCompressor) NewWriter(writer io.Writer) ReaderFromWriteCloser

type BrotliDecompressor

type BrotliDecompressor struct{}

func (BrotliDecompressor) Decompress

func (decompressor BrotliDecompressor) Decompress(dst io.Writer, src io.Reader) error

func (BrotliDecompressor) FileExtension

func (decompressor BrotliDecompressor) FileExtension() string

type BrotliReaderFromWriter

type BrotliReaderFromWriter struct {
	cbrotli.Writer
}

func NewBrotliReaderFromWriter

func NewBrotliReaderFromWriter(dst io.Writer) *BrotliReaderFromWriter

func (*BrotliReaderFromWriter) ReadFrom

func (writer *BrotliReaderFromWriter) ReadFrom(reader io.Reader) (n int64, err error)

type Bundle

type Bundle struct {
	ArchiveDirectory   string
	TarSizeThreshold   int64
	Sentinel           *Sentinel
	TarBall            TarBall
	TarBallMaker       TarBallMaker
	Crypter            OpenPGPCrypter
	Timeline           uint32
	Replica            bool
	IncrementFromLsn   *uint64
	IncrementFromFiles BackupFileList
	DeltaMap           PagedFileDeltaMap

	Files *sync.Map
	// contains filtered or unexported fields
}

A Bundle represents the directory to be walked. Contains at least one TarBall if walk has started. Each TarBall except for the last one will be at least TarSizeThreshold bytes. The Sentinel is used to ensure complete uploaded backups; in this case, pg_control is used as the sentinel.

func NewBundle

func NewBundle(archiveDirectory string, incrementFromLsn *uint64, incrementFromFiles BackupFileList) *Bundle

TODO: use DiskDataFolder

func (*Bundle) CheckSizeAndEnqueueBack

func (bundle *Bundle) CheckSizeAndEnqueueBack(tarBall TarBall) error

func (*Bundle) Deque

func (bundle *Bundle) Deque() TarBall

func (*Bundle) DownloadDeltaMap

func (bundle *Bundle) DownloadDeltaMap(folder storage.Folder, backupStartLSN uint64) error

func (*Bundle) EnqueueBack

func (bundle *Bundle) EnqueueBack(tarBall TarBall)

func (*Bundle) FinishQueue

func (bundle *Bundle) FinishQueue() error

func (*Bundle) GetFileRelPath

func (bundle *Bundle) GetFileRelPath(fileAbsPath string) string

func (*Bundle) GetFiles

func (bundle *Bundle) GetFiles() *sync.Map

func (*Bundle) GetIncrementBaseFiles

func (bundle *Bundle) GetIncrementBaseFiles() BackupFileList

GetIncrementBaseFiles returns list of Files from previous backup

func (*Bundle) GetIncrementBaseLsn

func (bundle *Bundle) GetIncrementBaseLsn() *uint64

GetIncrementBaseLsn returns LSN of previous backup

func (*Bundle) HandleWalkedFSObject

func (bundle *Bundle) HandleWalkedFSObject(path string, info os.FileInfo, err error) error

TODO : unit tests HandleWalkedFSObject walks files provided by the passed in directory and creates compressed tar members labeled as `part_00i.tar.*`, where '*' is compressor file extension.

To see which files and directories are Skipped, please consult ExcludedFilenames. Excluded directories will be created but their contents will not be included in the tar bundle.

func (*Bundle) NewTarBall

func (bundle *Bundle) NewTarBall(dedicatedUploader bool)

NewTarBall starts writing new tarball

func (*Bundle) PrefaultWalkedFSObject

func (bundle *Bundle) PrefaultWalkedFSObject(path string, info os.FileInfo, err error) error

TODO : unit tests

func (*Bundle) StartBackup

func (bundle *Bundle) StartBackup(conn *pgx.Conn, backup string) (backupName string, lsn uint64, version int, err error)

TODO : unit tests StartBackup starts a non-exclusive base backup immediately. When finishing the backup, `backup_label` and `tablespace_map` contents are not immediately written to a file but returned instead. Returns empty string and an error if backup fails.

func (*Bundle) StartQueue

func (bundle *Bundle) StartQueue()

func (*Bundle) UploadLabelFiles

func (bundle *Bundle) UploadLabelFiles(conn *pgx.Conn) (uint64, error)

TODO : unit tests UploadLabelFiles creates the `backup_label` and `tablespace_map` files by stopping the backup and uploads them to S3.

func (*Bundle) UploadPgControl

func (bundle *Bundle) UploadPgControl(compressorFileExtension string) error

TODO : unit tests UploadPgControl should only be called after the rest of the backup is successfully uploaded to S3.

type BytesPerWalSegmentError

type BytesPerWalSegmentError struct {
	// contains filtered or unexported fields
}

func NewBytesPerWalSegmentError

func NewBytesPerWalSegmentError() BytesPerWalSegmentError

func (BytesPerWalSegmentError) Error

func (err BytesPerWalSegmentError) Error() string

type CachedKey

type CachedKey struct {
	KeyId string `json:"keyId"`
	Body  []byte `json:"body"`
}

CachedKey is the data transfer object describing format of key ring cache

type CantDiscardWalDataError

type CantDiscardWalDataError struct {
	// contains filtered or unexported fields
}

func NewCantDiscardWalDataError

func NewCantDiscardWalDataError() CantDiscardWalDataError

func (CantDiscardWalDataError) Error

func (err CantDiscardWalDataError) Error() string

type CantOverwriteWalFileError

type CantOverwriteWalFileError struct {
	// contains filtered or unexported fields
}

func NewCantOverwriteWalFileError

func NewCantOverwriteWalFileError(walFilePath string) CantOverwriteWalFileError

func (CantOverwriteWalFileError) Error

func (err CantOverwriteWalFileError) Error() string

type CascadeWriteCloser

type CascadeWriteCloser struct {
	io.WriteCloser
	Underlying io.Closer
}

CascadeWriteCloser bundles multiple closures into one function. Calling Close() will close the main and underlying writers.

func (*CascadeWriteCloser) Close

func (cascadeCloser *CascadeWriteCloser) Close() error

Close returns the first encountered error from closing main or underlying writer.

type Cleaner

type Cleaner interface {
	GetFiles(directory string) ([]string, error)
	Remove(file string)
}

Cleaner interface serves to separate file system logic from prefetch clean logic to make it testable

type CompressingPipeWriter

type CompressingPipeWriter struct {
	Input                io.Reader
	Output               io.Reader
	NewCompressingWriter func(io.Writer) ReaderFromWriteCloser
}

CompressingPipeWriter allows for flexibility of using compressed output. Input is read and compressed to a pipe reader.

func (*CompressingPipeWriter) Compress

func (pipeWriter *CompressingPipeWriter) Compress(crypter Crypter)

Compress compresses input to a pipe reader. Output must be used or pipe will block.

type CompressingPipeWriterError

type CompressingPipeWriterError struct {
	// contains filtered or unexported fields
}

CompressingPipeWriterError is used to catch specific errors from CompressingPipeWriter when uploading to S3. Will not retry upload if this error occurs.

func NewCompressingPipeWriterError

func NewCompressingPipeWriterError(reason string) CompressingPipeWriterError

func (CompressingPipeWriterError) Error

func (err CompressingPipeWriterError) Error() string

type Compressor

type Compressor interface {
	NewWriter(writer io.Writer) ReaderFromWriteCloser
	FileExtension() string
}

type Crypter

type Crypter interface {
	IsUsed() bool
	Encrypt(writer io.WriteCloser) (io.WriteCloser, error)
	Decrypt(reader io.ReadCloser) (io.Reader, error)
}

Crypter is responsible for making cryptographical pipeline parts when needed

type CrypterUseMischiefError

type CrypterUseMischiefError struct {
	// contains filtered or unexported fields
}

CrypterUseMischiefError happens when crypter is used before initialization

func NewCrypterUseMischiefError

func NewCrypterUseMischiefError() CrypterUseMischiefError

func (CrypterUseMischiefError) Error

func (err CrypterUseMischiefError) Error() string

type DataFolder

type DataFolder interface {
	// OpenReadonlyFile should return NoSuchFileError if it cannot find desired file
	OpenReadonlyFile(filename string) (io.ReadCloser, error)
	OpenWriteOnlyFile(filename string) (io.WriteCloser, error)
	CleanFolder() error
}

type Decompressor

type Decompressor interface {
	Decompress(dst io.Writer, src io.Reader) error
	FileExtension() string
}

type DelayWriteCloser

type DelayWriteCloser struct {
	// contains filtered or unexported fields
}

DelayWriteCloser delays first writes. Encryption starts writing header immediately. But there is a lot of places where writer is instantiated long before pipe is ready. This is why here is used special writer, which delays encryption initialization before actual write. If no write occurs, initialization still is performed, to handle zero-byte Files correctly

func (*DelayWriteCloser) Close

func (delayWriteCloser *DelayWriteCloser) Close() error

Close DelayWriteCloser

func (*DelayWriteCloser) Write

func (delayWriteCloser *DelayWriteCloser) Write(p []byte) (n int, err error)

type DeltaFile

type DeltaFile struct {
	Locations []walparser.BlockLocation
	WalParser *walparser.WalParser
}

func LoadDeltaFile

func LoadDeltaFile(reader io.Reader) (*DeltaFile, error)

func NewDeltaFile

func NewDeltaFile(walParser *walparser.WalParser) (*DeltaFile, error)

func (*DeltaFile) Save

func (deltaFile *DeltaFile) Save(writer io.Writer) error

type DeltaFileChanWriter

type DeltaFileChanWriter struct {
	DeltaFile             *DeltaFile
	BlockLocationConsumer chan walparser.BlockLocation
}

func NewDeltaFileChanWriter

func NewDeltaFileChanWriter(deltaFile *DeltaFile) *DeltaFileChanWriter

func (*DeltaFileChanWriter) Consume

func (writer *DeltaFileChanWriter) Consume(waitGroup *sync.WaitGroup)

type DeltaFileManager

type DeltaFileManager struct {
	PartFiles        *LazyCache
	DeltaFileWriters *LazyCache

	CanceledDeltaFiles map[string]bool
	// contains filtered or unexported fields
}

func NewDeltaFileManager

func NewDeltaFileManager(dataFolder DataFolder) *DeltaFileManager

func (*DeltaFileManager) CancelRecording

func (manager *DeltaFileManager) CancelRecording(walFilename string)

func (*DeltaFileManager) CombinePartFile

func (manager *DeltaFileManager) CombinePartFile(deltaFilename string, partFile *WalPartFile) error

func (*DeltaFileManager) FlushDeltaFiles

func (manager *DeltaFileManager) FlushDeltaFiles(uploader *Uploader, completedPartFiles map[string]bool)

func (*DeltaFileManager) FlushFiles

func (manager *DeltaFileManager) FlushFiles(uploader *Uploader)

func (*DeltaFileManager) FlushPartFiles

func (manager *DeltaFileManager) FlushPartFiles() (completedPartFiles map[string]bool)

func (*DeltaFileManager) GetBlockLocationConsumer

func (manager *DeltaFileManager) GetBlockLocationConsumer(deltaFilename string) (chan walparser.BlockLocation, error)

func (*DeltaFileManager) GetPartFile

func (manager *DeltaFileManager) GetPartFile(deltaFilename string) (*WalPartFile, error)

func (*DeltaFileManager) LoadDeltaFileWriter

func (manager *DeltaFileManager) LoadDeltaFileWriter(deltaFilename string) (deltaFileWriter *DeltaFileChanWriter, err error)

TODO : unit tests

func (*DeltaFileManager) LoadPartFile

func (manager *DeltaFileManager) LoadPartFile(partFilename string) (*WalPartFile, error)

TODO : unit tests

type DeltaFileWriterNotFoundError

type DeltaFileWriterNotFoundError struct {
	// contains filtered or unexported fields
}

func NewDeltaFileWriterNotFoundError

func NewDeltaFileWriterNotFoundError(filename string) DeltaFileWriterNotFoundError

func (DeltaFileWriterNotFoundError) Error

type DiskDataFolder

type DiskDataFolder struct {
	// contains filtered or unexported fields
}

func NewDiskDataFolder

func NewDiskDataFolder(folderPath string) (*DiskDataFolder, error)

func (*DiskDataFolder) CleanFolder

func (folder *DiskDataFolder) CleanFolder() error

func (*DiskDataFolder) OpenReadonlyFile

func (folder *DiskDataFolder) OpenReadonlyFile(filename string) (io.ReadCloser, error)

func (*DiskDataFolder) OpenWriteOnlyFile

func (folder *DiskDataFolder) OpenWriteOnlyFile(filename string) (io.WriteCloser, error)

type Empty

type Empty struct{}

Empty is used for channel signaling.

type EmptyWriteIgnorer

type EmptyWriteIgnorer struct {
	io.WriteCloser
}

EmptyWriteIgnorer handles 0 byte write in LZ4 package to stop pipe reader/writer from blocking.

func (EmptyWriteIgnorer) Write

func (e EmptyWriteIgnorer) Write(p []byte) (int, error)

type ExponentialRetrier

type ExponentialRetrier struct {
	// contains filtered or unexported fields
}

func NewExponentialRetrier

func NewExponentialRetrier(startSleepDuration, sleepDurationBound time.Duration) *ExponentialRetrier

type Extension added in v0.2.7

type Extension interface {
	TryPrintHelp(command string, args []string) bool
	HasCommand(command string) bool
	Execute(command string, uploader *Uploader, folder storage.Folder, args []string)
	GetAllowedConfigKeys() map[string]*string
	Flush(time BackupTime, folder storage.Folder)
}

type FileSystemCleaner

type FileSystemCleaner struct{}

FileSystemCleaner actually performs it's functions on file system

func (FileSystemCleaner) GetFiles

func (cleaner FileSystemCleaner) GetFiles(directory string) (files []string, err error)

TODO : unit tests GetFiles of a directory

func (FileSystemCleaner) Remove

func (cleaner FileSystemCleaner) Remove(file string)

Remove file

type FileTarInterpreter

type FileTarInterpreter struct {
	DBDataDirectory string
	Sentinel        BackupSentinelDto
	FilesToUnwrap   map[string]bool
}

FileTarInterpreter extracts input to disk.

func NewFileTarInterpreter

func NewFileTarInterpreter(dbDataDirectory string, sentinel BackupSentinelDto, filesToUnwrap map[string]bool) *FileTarInterpreter

func (*FileTarInterpreter) Interpret

func (tarInterpreter *FileTarInterpreter) Interpret(fileReader io.Reader, fileInfo *tar.Header) error

TODO : unit tests Interpret extracts a tar file to disk and creates needed directories. Returns the first error encountered. Calls fsync after each file is written successfully.

type ForbiddenActionError added in v0.2.8

type ForbiddenActionError struct {
	// contains filtered or unexported fields
}

func NewForbiddenActionError added in v0.2.8

func NewForbiddenActionError(message string) ForbiddenActionError

func (ForbiddenActionError) Error added in v0.2.8

func (err ForbiddenActionError) Error() string

type GpgKeyExportError

type GpgKeyExportError struct {
	// contains filtered or unexported fields
}

func NewGpgKeyExportError

func NewGpgKeyExportError(text string) GpgKeyExportError

func (GpgKeyExportError) Error

func (err GpgKeyExportError) Error() string

type IncorrectLogSegNoError

type IncorrectLogSegNoError struct {
	// contains filtered or unexported fields
}

func NewIncorrectLogSegNoError

func NewIncorrectLogSegNoError(name string) IncorrectLogSegNoError

func (IncorrectLogSegNoError) Error

func (err IncorrectLogSegNoError) Error() string

type IncrementalPageReader

type IncrementalPageReader struct {
	PagedFile ReadSeekCloser
	FileSize  int64
	Lsn       uint64
	Next      []byte
	Blocks    []uint32
}

IncrementalPageReader constructs difference map during initialization and than re-read file Diff map may consist of 1Gb/PostgresBlockSize elements == 512Kb

func (*IncrementalPageReader) AdvanceFileReader

func (pageReader *IncrementalPageReader) AdvanceFileReader() error

func (*IncrementalPageReader) Close

func (pageReader *IncrementalPageReader) Close() error

Close IncrementalPageReader

func (*IncrementalPageReader) DeltaBitmapInitialize

func (pageReader *IncrementalPageReader) DeltaBitmapInitialize(deltaBitmap *roaring.Bitmap)

func (*IncrementalPageReader) DrainMoreData

func (pageReader *IncrementalPageReader) DrainMoreData() (succeed bool, err error)

func (*IncrementalPageReader) FullScanInitialize

func (pageReader *IncrementalPageReader) FullScanInitialize() error

func (*IncrementalPageReader) Read

func (pageReader *IncrementalPageReader) Read(p []byte) (n int, err error)

func (*IncrementalPageReader) SelectNewValidPage

func (pageReader *IncrementalPageReader) SelectNewValidPage(pageBytes []byte, blockNo uint32) (valid bool)

SelectNewValidPage checks whether page is valid and if it so, then blockNo is appended to Blocks list

func (*IncrementalPageReader) WriteDiffMapToHeader

func (pageReader *IncrementalPageReader) WriteDiffMapToHeader(headerWriter io.Writer)

WriteDiffMapToHeader is currently used only with buffers, so we don't handle any writing errors

type InvalidBlockError

type InvalidBlockError struct {
	// contains filtered or unexported fields
}

InvalidBlockError indicates that file contain invalid page and cannot be archived incrementally

func NewInvalidBlockError

func NewInvalidBlockError(blockNo uint32) InvalidBlockError

func (InvalidBlockError) Error

func (err InvalidBlockError) Error() string

type InvalidIncrementFileHeaderError

type InvalidIncrementFileHeaderError struct {
	// contains filtered or unexported fields
}

func NewInvalidIncrementFileHeaderError

func NewInvalidIncrementFileHeaderError() InvalidIncrementFileHeaderError

func (InvalidIncrementFileHeaderError) Error

type InvalidWalFileMagicError

type InvalidWalFileMagicError struct {
	// contains filtered or unexported fields
}

func NewInvalidWalFileMagicError

func NewInvalidWalFileMagicError() InvalidWalFileMagicError

func (InvalidWalFileMagicError) Error

func (err InvalidWalFileMagicError) Error() string

type LazyCache

type LazyCache struct {
	// contains filtered or unexported fields
}

func NewLazyCache

func NewLazyCache(load func(key interface{}) (value interface{}, err error)) *LazyCache

func (*LazyCache) Load

func (lazyCache *LazyCache) Load(key interface{}) (value interface{}, exists bool, err error)

func (*LazyCache) LoadExisting

func (lazyCache *LazyCache) LoadExisting(key interface{}) (value interface{}, exists bool)

func (*LazyCache) Range

func (lazyCache *LazyCache) Range(reduce func(key, value interface{}) bool)

Range calls reduce sequentially for each key and value present in the cache. If reduce returns false, range stops the iteration.

func (*LazyCache) Store

func (lazyCache *LazyCache) Store(key, value interface{})

type LimitedReader

type LimitedReader struct {
	// contains filtered or unexported fields
}

func (*LimitedReader) Close

func (r *LimitedReader) Close() error

func (*LimitedReader) Read

func (r *LimitedReader) Read(buf []byte) (int, error)

type Lz4Compressor

type Lz4Compressor struct{}

func (Lz4Compressor) FileExtension

func (compressor Lz4Compressor) FileExtension() string

func (Lz4Compressor) NewWriter

func (compressor Lz4Compressor) NewWriter(writer io.Writer) ReaderFromWriteCloser

type Lz4Decompressor

type Lz4Decompressor struct{}

func (Lz4Decompressor) Decompress

func (decompressor Lz4Decompressor) Decompress(dst io.Writer, src io.Reader) error

func (Lz4Decompressor) FileExtension

func (decompressor Lz4Decompressor) FileExtension() string

type Lz4ReaderFromWriter

type Lz4ReaderFromWriter struct {
	lz4.Writer
}

func NewLz4ReaderFromWriter

func NewLz4ReaderFromWriter(dst io.Writer) *Lz4ReaderFromWriter

func (*Lz4ReaderFromWriter) ReadFrom

func (writer *Lz4ReaderFromWriter) ReadFrom(reader io.Reader) (n int64, err error)

type LzmaCompressor

type LzmaCompressor struct{}

func (LzmaCompressor) FileExtension

func (compressor LzmaCompressor) FileExtension() string

func (LzmaCompressor) NewWriter

func (compressor LzmaCompressor) NewWriter(writer io.Writer) ReaderFromWriteCloser

type LzmaDecompressor

type LzmaDecompressor struct{}

func (LzmaDecompressor) Decompress

func (decompressor LzmaDecompressor) Decompress(dst io.Writer, src io.Reader) error

func (LzmaDecompressor) FileExtension

func (decompressor LzmaDecompressor) FileExtension() string

type LzmaReaderFromWriter

type LzmaReaderFromWriter struct {
	lzma.Writer
}

func NewLzmaReaderFromWriter

func NewLzmaReaderFromWriter(dst io.Writer) (*LzmaReaderFromWriter, error)

func (*LzmaReaderFromWriter) ReadFrom

func (writer *LzmaReaderFromWriter) ReadFrom(reader io.Reader) (n int64, err error)

type LzoDecompressor

type LzoDecompressor struct{}

func (LzoDecompressor) Decompress

func (decompressor LzoDecompressor) Decompress(dst io.Writer, src io.Reader) error

func (LzoDecompressor) FileExtension

func (decompressor LzoDecompressor) FileExtension() string

type NOPTarBall

type NOPTarBall struct {
	// contains filtered or unexported fields
}

NOPTarBall mocks a tarball. Used for prefault logic.

func (*NOPTarBall) AddSize

func (tarBall *NOPTarBall) AddSize(i int64)

func (*NOPTarBall) AwaitUploads

func (tarBall *NOPTarBall) AwaitUploads()

func (*NOPTarBall) CloseTar

func (tarBall *NOPTarBall) CloseTar() error

func (*NOPTarBall) SetUp

func (tarBall *NOPTarBall) SetUp(crypter Crypter, params ...string)

func (*NOPTarBall) Size

func (tarBall *NOPTarBall) Size() int64

func (*NOPTarBall) TarWriter

func (tarBall *NOPTarBall) TarWriter() *tar.Writer

type NOPTarBallMaker

type NOPTarBallMaker struct {
	// contains filtered or unexported fields
}

NOPTarBallMaker creates a new NOPTarBall. Used for testing purposes.

func (*NOPTarBallMaker) Make

func (tarBallMaker *NOPTarBallMaker) Make(inheritState bool) TarBall

Make creates a new NOPTarBall.

type NamedReader

type NamedReader interface {
	io.Reader
	Name() string
}

type NamedReaderImpl

type NamedReaderImpl struct {
	io.Reader
	// contains filtered or unexported fields
}

func (*NamedReaderImpl) Name

func (reader *NamedReaderImpl) Name() string

type NilWalParserError

type NilWalParserError struct {
	// contains filtered or unexported fields
}

func NewNilWalParserError

func NewNilWalParserError() NilWalParserError

func (NilWalParserError) Error

func (err NilWalParserError) Error() string

type NoBackupsFoundError

type NoBackupsFoundError struct {
	// contains filtered or unexported fields
}

func NewNoBackupsFoundError

func NewNoBackupsFoundError() NoBackupsFoundError

func (NoBackupsFoundError) Error

func (err NoBackupsFoundError) Error() string

type NoBitmapFoundError

type NoBitmapFoundError struct {
	// contains filtered or unexported fields
}

func NewNoBitmapFoundError

func NewNoBitmapFoundError() NoBitmapFoundError

func (NoBitmapFoundError) Error

func (err NoBitmapFoundError) Error() string

type NoFilesToExtractError

type NoFilesToExtractError struct {
	// contains filtered or unexported fields
}

func NewNoFilesToExtractError

func NewNoFilesToExtractError() NoFilesToExtractError

func (NoFilesToExtractError) Error

func (err NoFilesToExtractError) Error() string

type NoPostgresVersionError

type NoPostgresVersionError struct {
	// contains filtered or unexported fields
}

func NewNoPostgresVersionError

func NewNoPostgresVersionError() NoPostgresVersionError

func (NoPostgresVersionError) Error

func (err NoPostgresVersionError) Error() string

type NoSuchFileError

type NoSuchFileError struct {
	// contains filtered or unexported fields
}

func NewNoSuchFileError

func NewNoSuchFileError(filename string) NoSuchFileError

func (NoSuchFileError) Error

func (err NoSuchFileError) Error() string

type NonEmptyDbDataDirectoryError

type NonEmptyDbDataDirectoryError struct {
	// contains filtered or unexported fields
}

func NewNonEmptyDbDataDirectoryError

func NewNonEmptyDbDataDirectoryError(dbDataDirectory string) NonEmptyDbDataDirectoryError

func (NonEmptyDbDataDirectoryError) Error

type NotWalFilenameError

type NotWalFilenameError struct {
	// contains filtered or unexported fields
}

func NewNotWalFilenameError

func NewNotWalFilenameError(filename string) NotWalFilenameError

func (NotWalFilenameError) Error

func (err NotWalFilenameError) Error() string

type OpenPGPCrypter

type OpenPGPCrypter struct {
	Configured bool

	KeyRingId      string
	IsUseKeyRingId bool

	ArmoredKey      string
	IsUseArmoredKey bool

	ArmoredKeyPath      string
	IsUseArmoredKeyPath bool

	PubKey    openpgp.EntityList
	SecretKey openpgp.EntityList
	// contains filtered or unexported fields
}

OpenPGPCrypter incapsulates specific of cypher method Includes keys, infrastructutre information etc If many encryption methods will be used it worth to extract interface

func (*OpenPGPCrypter) ConfigurePGPCrypter added in v0.2.6

func (crypter *OpenPGPCrypter) ConfigurePGPCrypter()

ConfigurePGPCrypter internal initialization

func (*OpenPGPCrypter) Decrypt

func (crypter *OpenPGPCrypter) Decrypt(reader io.ReadCloser) (io.Reader, error)

Decrypt creates decrypted reader from ordinary reader

func (*OpenPGPCrypter) Encrypt

func (crypter *OpenPGPCrypter) Encrypt(writer io.WriteCloser) (io.WriteCloser, error)

Encrypt creates encryption writer from ordinary writer

func (*OpenPGPCrypter) IsArmed

func (crypter *OpenPGPCrypter) IsArmed() bool

func (*OpenPGPCrypter) IsUsed

func (crypter *OpenPGPCrypter) IsUsed() bool

IsUsed is to check necessity of Crypter use Must be called prior to any other crypter call

type PagedFileDeltaMap

type PagedFileDeltaMap map[walparser.RelFileNode]*roaring.Bitmap

func NewPagedFileDeltaMap

func NewPagedFileDeltaMap() PagedFileDeltaMap

func (*PagedFileDeltaMap) AddToDelta

func (deltaMap *PagedFileDeltaMap) AddToDelta(location walparser.BlockLocation)

func (*PagedFileDeltaMap) GetDeltaBitmapFor

func (deltaMap *PagedFileDeltaMap) GetDeltaBitmapFor(filePath string) (*roaring.Bitmap, error)

TODO : unit test no bitmap found

type PgControlNotFoundError

type PgControlNotFoundError struct {
	// contains filtered or unexported fields
}

func NewPgControlNotFoundError

func NewPgControlNotFoundError() PgControlNotFoundError

func (PgControlNotFoundError) Error

func (err PgControlNotFoundError) Error() string

type PgQueryRunner

type PgQueryRunner struct {
	Version int
	// contains filtered or unexported fields
}

PgQueryRunner is implementation for controlling PostgreSQL 9.0+

func NewPgQueryRunner

func NewPgQueryRunner(conn *pgx.Conn) (*PgQueryRunner, error)

NewPgQueryRunner builds QueryRunner from available connection

func (*PgQueryRunner) BuildGetVersion

func (queryRunner *PgQueryRunner) BuildGetVersion() string

BuildGetVersion formats a query to retrieve PostgreSQL numeric version

func (*PgQueryRunner) BuildStartBackup

func (queryRunner *PgQueryRunner) BuildStartBackup() (string, error)

BuildStartBackup formats a query that starts backup according to server features and version

func (*PgQueryRunner) BuildStopBackup

func (queryRunner *PgQueryRunner) BuildStopBackup() (string, error)

BuildStopBackup formats a query that stops backup according to server features and version

func (*PgQueryRunner) StartBackup

func (queryRunner *PgQueryRunner) StartBackup(backup string) (backupName string, lsnString string, inRecovery bool, err error)

StartBackup informs the database that we are starting copy of cluster contents

func (*PgQueryRunner) StopBackup

func (queryRunner *PgQueryRunner) StopBackup() (label string, offsetMap string, lsnStr string, err error)

StopBackup informs the database that copy is over

type PostgresPageHeader

type PostgresPageHeader struct {
	// contains filtered or unexported fields
}

func ParsePostgresPageHeader

func ParsePostgresPageHeader(reader io.Reader) (*PostgresPageHeader, error)

ParsePostgresPageHeader reads information from PostgreSQL page header. Exported for test reasons.

func (*PostgresPageHeader) IsNew

func (header *PostgresPageHeader) IsNew() bool

func (*PostgresPageHeader) IsValid

func (header *PostgresPageHeader) IsValid() bool

func (*PostgresPageHeader) Lsn

func (header *PostgresPageHeader) Lsn() uint64

type QueryRunner

type QueryRunner interface {
	// This call should inform the database that we are going to copy cluster's contents
	// Should fail if backup is currently impossible
	StartBackup(backup string) (string, string, bool, error)
	// Inform database that contents are copied, get information on backup
	StopBackup() (string, string, string, error)
}

The QueryRunner interface for controlling database during backup

type ReadCascadeCloser

type ReadCascadeCloser struct {
	io.Reader
	io.Closer
}

ReadCascadeCloser composes io.ReadCloser from two parts

type ReadSeekCloser

type ReadSeekCloser interface {
	io.Reader
	io.Seeker
	io.Closer
}

type ReadSeekCloserImpl

type ReadSeekCloserImpl struct {
	io.Reader
	io.Seeker
	io.Closer
}

type ReaderFromWriteCloser

type ReaderFromWriteCloser interface {
	io.ReaderFrom
	io.WriteCloser
}

type ReaderMaker

type ReaderMaker interface {
	Reader() (io.ReadCloser, error)
	Path() string
}

ReaderMaker is the generic interface used by extract. It allows for ease of handling different file formats.

type Saver

type Saver interface {
	Save(writer io.Writer) error
}

type Sentinel

type Sentinel struct {
	Info os.FileInfo
	// contains filtered or unexported fields
}

Sentinel is used to signal completion of a walked directory.

type SentinelMarshallingError added in v0.2.7

type SentinelMarshallingError struct {
	// contains filtered or unexported fields
}

func NewSentinelMarshallingError added in v0.2.7

func NewSentinelMarshallingError(sentinelName string, err error) SentinelMarshallingError

func (SentinelMarshallingError) Error added in v0.2.7

func (err SentinelMarshallingError) Error() string

type StorageAdapter added in v0.2.7

type StorageAdapter struct {
	// contains filtered or unexported fields
}

type StorageReaderMaker

type StorageReaderMaker struct {
	Folder       storage.Folder
	RelativePath string
}

StorageReaderMaker creates readers for downloading from storage

func NewStorageReaderMaker

func NewStorageReaderMaker(folder storage.Folder, relativePath string) *StorageReaderMaker

func (*StorageReaderMaker) Path

func (readerMaker *StorageReaderMaker) Path() string

func (*StorageReaderMaker) Reader

func (readerMaker *StorageReaderMaker) Reader() (io.ReadCloser, error)

type StorageTarBall

type StorageTarBall struct {
	// contains filtered or unexported fields
}

StorageTarBall represents a tar file that is going to be uploaded to storage.

func (*StorageTarBall) AddSize

func (tarBall *StorageTarBall) AddSize(i int64)

AddSize to total Size

func (*StorageTarBall) AwaitUploads

func (tarBall *StorageTarBall) AwaitUploads()

func (*StorageTarBall) CloseTar

func (tarBall *StorageTarBall) CloseTar() error

CloseTar closes the tar writer, flushing any unwritten data to the underlying writer before also closing the underlying writer.

func (*StorageTarBall) SetUp

func (tarBall *StorageTarBall) SetUp(crypter Crypter, names ...string)

SetUp creates a new tar writer and starts upload to storage. Upload will block until the tar file is finished writing. If a name for the file is not given, default name is of the form `part_....tar.[Compressor file extension]`.

func (*StorageTarBall) Size

func (tarBall *StorageTarBall) Size() int64

Size accumulated in this tarball

func (*StorageTarBall) TarWriter

func (tarBall *StorageTarBall) TarWriter() *tar.Writer

type StorageTarBallMaker

type StorageTarBallMaker struct {
	// contains filtered or unexported fields
}

StorageTarBallMaker creates tarballs that are uploaded to storage.

func NewStorageTarBallMaker

func NewStorageTarBallMaker(backupName string, uploader *Uploader) *StorageTarBallMaker

func (*StorageTarBallMaker) Make

func (tarBallMaker *StorageTarBallMaker) Make(dedicatedUploader bool) TarBall

Make returns a tarball with required storage fields.

type TarBall

type TarBall interface {
	SetUp(crypter Crypter, args ...string)
	CloseTar() error
	Size() int64
	AddSize(int64)
	TarWriter() *tar.Writer
	AwaitUploads()
}

A TarBall represents one tar file.

type TarBallMaker

type TarBallMaker interface {
	Make(dedicatedUploader bool) TarBall
}

TarBallMaker is used to allow for flexible creation of different TarBalls.

func NewNopTarBallMaker

func NewNopTarBallMaker() TarBallMaker

type TarInterpreter

type TarInterpreter interface {
	Interpret(reader io.Reader, header *tar.Header) error
}

TarInterpreter behaves differently for different file types.

type TarSizeError

type TarSizeError struct {
	// contains filtered or unexported fields
}

func NewTarSizeError

func NewTarSizeError(packedFileSize, expectedSize int64) TarSizeError

func (TarSizeError) Error

func (err TarSizeError) Error() string

type TimeSlice

type TimeSlice []BackupTime

TimeSlice represents a backup and its last modified time.

func (TimeSlice) Len

func (timeSlice TimeSlice) Len() int

func (TimeSlice) Less

func (timeSlice TimeSlice) Less(i, j int) bool

func (TimeSlice) Swap

func (timeSlice TimeSlice) Swap(i, j int)

type UnconfiguredStorageError added in v0.2.8

type UnconfiguredStorageError struct {
	// contains filtered or unexported fields
}

func NewUnconfiguredStorageError added in v0.2.8

func NewUnconfiguredStorageError(storagePrefixVariants []string) UnconfiguredStorageError

func (UnconfiguredStorageError) Error added in v0.2.8

func (err UnconfiguredStorageError) Error() string

type UnexpectedTarDataError

type UnexpectedTarDataError struct {
	// contains filtered or unexported fields
}

func NewUnexpectedTarDataError

func NewUnexpectedTarDataError() UnexpectedTarDataError

func (UnexpectedTarDataError) Error

func (err UnexpectedTarDataError) Error() string

type UnknownCompressionMethodError

type UnknownCompressionMethodError struct {
	// contains filtered or unexported fields
}

func NewUnknownCompressionMethodError

func NewUnknownCompressionMethodError() UnknownCompressionMethodError

func (UnknownCompressionMethodError) Error

type UnknownIncrementFileHeaderError

type UnknownIncrementFileHeaderError struct {
	// contains filtered or unexported fields
}

func NewUnknownIncrementFileHeaderError

func NewUnknownIncrementFileHeaderError() UnknownIncrementFileHeaderError

func (UnknownIncrementFileHeaderError) Error

type UnknownTableSpaceError

type UnknownTableSpaceError struct {
	// contains filtered or unexported fields
}

func NewUnknownTableSpaceError

func NewUnknownTableSpaceError() UnknownTableSpaceError

func (UnknownTableSpaceError) Error

func (err UnknownTableSpaceError) Error() string

type UnsupportedFileTypeError

type UnsupportedFileTypeError struct {
	// contains filtered or unexported fields
}

UnsupportedFileTypeError is used to signal file types that are unsupported by WAL-G.

func NewUnsupportedFileTypeError

func NewUnsupportedFileTypeError(path string, fileFormat string) UnsupportedFileTypeError

func (UnsupportedFileTypeError) Error added in v0.2.8

func (err UnsupportedFileTypeError) Error() string

type UnsupportedPostgresVersionError

type UnsupportedPostgresVersionError struct {
	// contains filtered or unexported fields
}

func NewUnsupportedPostgresVersionError

func NewUnsupportedPostgresVersionError(version int) UnsupportedPostgresVersionError

func (UnsupportedPostgresVersionError) Error

type UntilEOFReader

type UntilEOFReader struct {
	// contains filtered or unexported fields
}

func NewUntilEofReader

func NewUntilEofReader(underlying io.Reader) *UntilEOFReader

func (*UntilEOFReader) Read

func (reader *UntilEOFReader) Read(p []byte) (n int, err error)

type Uploader

type Uploader struct {
	UploadingFolder storage.Folder
	Compressor      Compressor

	Success bool
	// contains filtered or unexported fields
}

Uploader contains fields associated with uploading tarballs. Multiple tarballs can share one uploader.

func ConfigureUploader added in v0.2.8

func ConfigureUploader() (uploader *Uploader, err error)

ConfigureUploader connects to storage and creates an uploader. It makes sure that a valid session has started; if invalid, returns AWS error and `<nil>` values.

func NewUploader

func NewUploader(
	compressor Compressor,
	uploadingLocation storage.Folder,
	deltaDataFolder DataFolder,
	useWalDelta, preventWalOverwrite bool,
) *Uploader

func (*Uploader) Clone

func (uploader *Uploader) Clone() *Uploader

Clone creates similar Uploader with new WaitGroup

func (*Uploader) Upload added in v0.2.7

func (uploader *Uploader) Upload(path string, content io.Reader) error

TODO : unit tests

func (*Uploader) UploadFile

func (uploader *Uploader) UploadFile(file NamedReader) error

TODO : unit tests UploadFile compresses a file and uploads it.

func (*Uploader) UploadWalFile

func (uploader *Uploader) UploadWalFile(file NamedReader) error

TODO : unit tests

type WalDeltaRecorder

type WalDeltaRecorder struct {
	// contains filtered or unexported fields
}

func NewWalDeltaRecorder

func NewWalDeltaRecorder(blockLocationConsumer chan walparser.BlockLocation) *WalDeltaRecorder

type WalDeltaRecordingReader

type WalDeltaRecordingReader struct {
	PageReader       walparser.WalPageReader
	WalParser        walparser.WalParser
	PageDataLeftover []byte
	Recorder         *WalDeltaRecorder
	// contains filtered or unexported fields
}

In case of recording error WalDeltaRecordingReader stops recording, but continues reading data correctly

func NewWalDeltaRecordingReader

func NewWalDeltaRecordingReader(walFileReader io.Reader, walFilename string, manager *DeltaFileManager) (*WalDeltaRecordingReader, error)

func (*WalDeltaRecordingReader) Close

func (reader *WalDeltaRecordingReader) Close() error

func (*WalDeltaRecordingReader) Read

func (reader *WalDeltaRecordingReader) Read(p []byte) (n int, err error)

func (*WalDeltaRecordingReader) RecordBlockLocationsFromPage

func (reader *WalDeltaRecordingReader) RecordBlockLocationsFromPage() error

type WalPart

type WalPart struct {
	// contains filtered or unexported fields
}

func LoadWalPart

func LoadWalPart(reader io.Reader) (*WalPart, error)

func NewWalPart

func NewWalPart(dataType WalPartDataType, id uint8, data []byte) *WalPart

func (*WalPart) Save

func (part *WalPart) Save(writer io.Writer) error

type WalPartDataType

type WalPartDataType uint8
const (
	PreviousWalHeadType WalPartDataType = 0
	WalTailType         WalPartDataType = 1
	WalHeadType         WalPartDataType = 2
)

type WalPartFile

type WalPartFile struct {
	WalTails        [][]byte
	PreviousWalHead []byte
	WalHeads        [][]byte
}

func LoadPartFile

func LoadPartFile(reader io.Reader) (*WalPartFile, error)

func NewWalPartFile

func NewWalPartFile() *WalPartFile

func (*WalPartFile) CombineRecords

func (partFile *WalPartFile) CombineRecords() ([]walparser.XLogRecord, error)

func (*WalPartFile) IsComplete

func (partFile *WalPartFile) IsComplete() bool

func (*WalPartFile) Save

func (partFile *WalPartFile) Save(writer io.Writer) error

type WalPartRecorder

type WalPartRecorder struct {
	// contains filtered or unexported fields
}

func NewWalPartRecorder

func NewWalPartRecorder(walFilename string, manager *DeltaFileManager) (*WalPartRecorder, error)

func (*WalPartRecorder) SaveNextWalHead

func (recorder *WalPartRecorder) SaveNextWalHead(head []byte) error

func (*WalPartRecorder) SavePreviousWalTail

func (recorder *WalPartRecorder) SavePreviousWalTail(tailData []byte) error

type WrongTypeError

type WrongTypeError struct {
	// contains filtered or unexported fields
}

func NewWrongTypeError

func NewWrongTypeError(desiredType string) WrongTypeError

func (WrongTypeError) Error

func (err WrongTypeError) Error() string

type ZeroReader

type ZeroReader struct{}

ZeroReader generates a slice of zeroes. Used to pad tar in cases where length of file changes.

func (*ZeroReader) Read

func (z *ZeroReader) Read(p []byte) (int, error)

type ZstdCompressor

type ZstdCompressor struct{}

func (ZstdCompressor) FileExtension

func (compressor ZstdCompressor) FileExtension() string

func (ZstdCompressor) NewWriter

func (compressor ZstdCompressor) NewWriter(writer io.Writer) ReaderFromWriteCloser

type ZstdDecompressor

type ZstdDecompressor struct{}

func (ZstdDecompressor) Decompress

func (decompressor ZstdDecompressor) Decompress(dst io.Writer, src io.Reader) error

func (ZstdDecompressor) FileExtension

func (decompressor ZstdDecompressor) FileExtension() string

type ZstdReaderFromWriter

type ZstdReaderFromWriter struct {
	zstd.Writer
}

func NewZstdReaderFromWriter

func NewZstdReaderFromWriter(dst io.Writer) *ZstdReaderFromWriter

func (*ZstdReaderFromWriter) ReadFrom

func (writer *ZstdReaderFromWriter) ReadFrom(reader io.Reader) (n int64, err error)

Source Files

Directories

Path Synopsis
storages
fs
gcs
s3

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL