Documentation ¶
Index ¶
- Constants
- Variables
- func ApplyFileIncrement(fileName string, increment io.Reader, fsync bool) error
- func ConfigureSegContentID(contentIDFlag string) (int, error)
- func FindRestorePointBeforeTS(timestampStr string, folder storage.Folder) (string, error)
- func FormatCmdStateName(contentID int, cmdName string) string
- func FormatCmdStatePath(contentID int, cmdName string) string
- func FormatSegmentBackupPath(contentID int) string
- func FormatSegmentStateFolderPath(contentID int) string
- func FormatSegmentStoragePrefix(contentID int) string
- func FormatSegmentWalPath(contentID int) string
- func HandleDetailedBackupList(folder storage.Folder, pretty, json bool)
- func HandleRestorePointList(folder storage.Folder, pretty, json bool)
- func LoadStorageAoFiles(baseBackupsFolder storage.Folder) (map[string]struct{}, error)
- func NewGreenplumBackupFetcher(restoreCfgPath string, inPlaceRestore bool, logsDir string, ...) func(folder storage.Folder, backup internal.Backup)
- func NewIncrementalPageReader(file io.ReadSeekCloser, eof, offset int64) (io.ReadCloser, error)
- func NewSegBackupHandler(arguments postgres.BackupArguments) (*postgres.BackupHandler, error)
- func ReadIncrementFileHeader(reader io.Reader) error
- func RestorePointMetadataFileName(pointName string) string
- func SetSegmentStoragePrefix(contentID int)
- func StripRightmostRestorePointName(path string) string
- func ValidateMatch(folder storage.Folder, backupName string, restorePoint string) error
- type AOFilesMetadataDTO
- type AoRelFileMetadata
- type AoRelFileStorageMap
- type AoStorageUploader
- type Backup
- type BackupAOFileDesc
- type BackupAOFiles
- type BackupArguments
- type BackupDetail
- type BackupFetchMode
- type BackupHandler
- type BackupObject
- type BackupSentinelDto
- type BackupWorkers
- type ClusterRestoreConfig
- type CurrBackupInfo
- type DeleteArgs
- type DeleteHandler
- func (h *DeleteHandler) DeleteBeforeTarget(target internal.BackupObject) error
- func (h *DeleteHandler) HandleDeleteBefore(args []string)
- func (h *DeleteHandler) HandleDeleteEverything(args []string)
- func (h *DeleteHandler) HandleDeleteGarbage(args []string) error
- func (h *DeleteHandler) HandleDeleteRetain(args []string)
- func (h *DeleteHandler) HandleDeleteRetainAfter(args []string)
- func (h *DeleteHandler) HandleDeleteTarget(targetSelector internal.BackupSelector)
- type ExtractProviderImpl
- type FetchHandler
- type FilesToExtractProviderImpl
- type GenericMetaFetcher
- type GenericMetaInteractor
- type GenericMetaSetter
- type GpQueryRunner
- func (queryRunner *GpQueryRunner) AbortBackup() (err error)
- func (queryRunner *GpQueryRunner) CreateGreenplumRestorePoint(restorePointName string) (restoreLSNs map[int]string, err error)
- func (queryRunner *GpQueryRunner) FetchAOStorageMetadata(dbInfo postgres.PgDatabaseInfo) (AoRelFileStorageMap, error)
- func (queryRunner *GpQueryRunner) GetGreenplumSegmentsInfo(semVer semver.Version) (segments []cluster.SegConfig, err error)
- func (queryRunner *GpQueryRunner) GetGreenplumVersion() (version string, err error)
- func (queryRunner *GpQueryRunner) IsInBackup() (isInBackupByContentID map[int]bool, err error)
- type GpTarBallComposer
- func (c *GpTarBallComposer) AddFile(info *internal.ComposeFileInfo)
- func (c *GpTarBallComposer) AddHeader(fileInfoHeader *tar.Header, info os.FileInfo) error
- func (c *GpTarBallComposer) FinishComposing() (internal.TarFileSets, error)
- func (c *GpTarBallComposer) GetFiles() internal.BundleFiles
- func (c *GpTarBallComposer) SkipFile(tarHeader *tar.Header, fileInfo os.FileInfo)
- type GpTarBallComposerMaker
- type InPlaceSegMaker
- type IncrementalTarInterpreter
- type InvalidIncrementFileHeaderError
- type NoRestorePointsFoundError
- type PgHbaMaker
- type PgSegmentSentinelDto
- type PrevBackupInfo
- type RecoveryConfigMaker
- type RelStorageType
- type RestoreCfgSegMaker
- type RestorePointBackupSelector
- type RestorePointCreator
- type RestorePointMetadata
- type RestorePointTime
- type SegBackup
- type SegCmdRunner
- type SegCmdState
- type SegCmdStatus
- type SegConfigMaker
- type SegDeleteBeforeHandler
- type SegDeleteHandler
- type SegDeleteTargetHandler
- type SegDeleteType
- type SegDeltaBackupConfigurator
- type SegmentFwdArg
- type SegmentMetadata
- type SegmentRestoreConfig
- type SegmentRole
- type SegmentUserData
- type UnexpectedTarDataError
- type UnknownIncrementFileHeaderError
Constants ¶
const ( AoStoragePath = "aosegments" AoSegSuffix = "_aoseg" AoSegDeltaDelimiter = "_D_" )
const ( BackupNamePrefix = "backup_" BackupNameLength = len(BackupNamePrefix) + len(utility.BackupTimeFormat) SegBackupLogPrefix = "wal-g-log" SegBackupPushCmdName = "seg-backup-push" )
const AOFilesMetadataName = "ao_files_metadata.json"
const MetadataDatetimeFormat = "%Y-%m-%dT%H:%M:%S.%fZ"
const PgHbaTemplate = `` /* 759-byte string literal not displayed */
const RestorePointSuffix = "_restore_point.json"
const SegmentsFolderPath = "segments_" + utility.VersionStr + "/"
const SignatureMagicNumber byte = 0x56
Variables ¶
var IncrementFileHeader = []byte{'w', 'i', '1', SignatureMagicNumber}
IncrementFileHeader contains "wi" at the head which stands for "wal-g increment" format version "1", signature magic number
Functions ¶
func ApplyFileIncrement ¶
func ConfigureSegContentID ¶
func FindRestorePointBeforeTS ¶
FindRestorePointBeforeTS finds restore point that was created before the provided timestamp and finish time closest to the provided timestamp
func FormatCmdStateName ¶
func FormatCmdStatePath ¶
func FormatSegmentBackupPath ¶
func FormatSegmentWalPath ¶
func HandleRestorePointList ¶
func LoadStorageAoFiles ¶
LoadStorageAoFiles loads the list of the AO/AOCS segment files that are referenced from previous backups
func NewIncrementalPageReader ¶
func NewIncrementalPageReader(file io.ReadSeekCloser, eof, offset int64) (io.ReadCloser, error)
func NewSegBackupHandler ¶
func NewSegBackupHandler(arguments postgres.BackupArguments) (*postgres.BackupHandler, error)
func ReadIncrementFileHeader ¶
func SetSegmentStoragePrefix ¶
func SetSegmentStoragePrefix(contentID int)
Types ¶
type AOFilesMetadataDTO ¶
type AOFilesMetadataDTO struct {
Files BackupAOFiles
}
func NewAOFilesMetadataDTO ¶
func NewAOFilesMetadataDTO() *AOFilesMetadataDTO
type AoRelFileMetadata ¶
type AoRelFileMetadata struct {
// contains filtered or unexported fields
}
func NewAoRelFileMetadata ¶
func NewAoRelFileMetadata(relNameMd5 string, storageType RelStorageType, eof, modCount int64) AoRelFileMetadata
type AoRelFileStorageMap ¶
type AoRelFileStorageMap map[walparser.BlockLocation]AoRelFileMetadata
AoRelFileStorageMap indicates the storage type for the relfile
func NewAoRelFileStorageMap ¶
func NewAoRelFileStorageMap(queryRunner *GpQueryRunner) (AoRelFileStorageMap, error)
type AoStorageUploader ¶
type AoStorageUploader struct {
// contains filtered or unexported fields
}
func NewAoStorageUploader ¶
func NewAoStorageUploader(uploader internal.Uploader, baseAoFiles BackupAOFiles, crypter crypto.Crypter, files internal.BundleFiles, isIncremental bool) *AoStorageUploader
func (*AoStorageUploader) AddFile ¶
func (u *AoStorageUploader) AddFile(cfi *internal.ComposeFileInfo, aoMeta AoRelFileMetadata, location *walparser.BlockLocation) error
func (*AoStorageUploader) GetFiles ¶
func (u *AoStorageUploader) GetFiles() *AOFilesMetadataDTO
type Backup ¶
type Backup struct { internal.Backup SentinelDto *BackupSentinelDto // used for storage query caching // contains filtered or unexported fields }
Backup contains information about a valid Greenplum backup generated and uploaded by WAL-G.
func ListStorageBackups ¶
ListStorageBackups returns the list of storage backups sorted by finish time (in ascending order)
func (*Backup) GetSegmentBackup ¶
func (*Backup) GetSentinel ¶
func (backup *Backup) GetSentinel() (BackupSentinelDto, error)
type BackupAOFileDesc ¶
type BackupAOFileDesc struct { StoragePath string `json:"StoragePath"` IsSkipped bool `json:"IsSkipped"` IsIncremented bool `json:"IsIncremented,omitempty"` MTime time.Time `json:"MTime"` StorageType RelStorageType `json:"StorageType"` EOF int64 `json:"EOF"` ModCount int64 `json:"ModCount,omitempty"` Compressor string `json:"Compressor,omitempty"` FileMode int64 `json:"FileMode"` }
type BackupAOFiles ¶
type BackupAOFiles map[string]BackupAOFileDesc
type BackupArguments ¶
type BackupArguments struct {
// contains filtered or unexported fields
}
BackupArguments holds all arguments parsed from cmd to this handler class
func NewBackupArguments ¶
func NewBackupArguments(isPermanent, isFull bool, userData interface{}, fwdArgs []SegmentFwdArg, logsDir string, segPollInterval time.Duration, segPollRetries int, deltaBaseSelector internal.BackupSelector) BackupArguments
NewBackupArguments creates a BackupArgument object to hold the arguments from the cmd
type BackupDetail ¶
type BackupDetail struct { Name string RestorePoint *string `json:"restore_point,omitempty"` UserData interface{} `json:"user_data,omitempty"` StartTime time.Time `json:"start_time"` FinishTime time.Time `json:"finish_time"` DatetimeFormat string `json:"date_fmt,omitempty"` Hostname string `json:"hostname"` GpVersion string `json:"gp_version"` IsPermanent bool `json:"is_permanent"` SystemIdentifier *uint64 `json:"system_identifier,omitempty"` UncompressedSize int64 `json:"uncompressed_size"` CompressedSize int64 `json:"compressed_size"` DataCatalogSize int64 `json:"data_catalog_size"` IncrementFrom *string `json:"increment_from,omitempty"` IncrementFullName *string `json:"increment_full_name,omitempty"` IncrementCount *int `json:"increment_count,omitempty"` }
func MakeBackupDetails ¶
func MakeBackupDetails(backups []Backup) []BackupDetail
func NewBackupDetail ¶
func NewBackupDetail(backup Backup) BackupDetail
type BackupFetchMode ¶
type BackupFetchMode string
const ( DefaultFetchMode BackupFetchMode = "default" UnpackFetchMode BackupFetchMode = "unpack" PrepareFetchMode BackupFetchMode = "prepare" )
func NewBackupFetchMode ¶
func NewBackupFetchMode(mode string) (BackupFetchMode, error)
type BackupHandler ¶
type BackupHandler struct {
// contains filtered or unexported fields
}
BackupHandler is the main struct which is handling the backup process
func NewBackupHandler ¶
func NewBackupHandler(arguments BackupArguments) (bh *BackupHandler, err error)
NewBackupHandler returns a backup handler object, which can handle the backup
func (*BackupHandler) HandleBackupPush ¶
func (bh *BackupHandler) HandleBackupPush()
HandleBackupPush handles the backup being read from filesystem and being pushed to the repository
type BackupObject ¶
type BackupObject struct { internal.BackupObject // contains filtered or unexported fields }
func (BackupObject) GetBaseBackupName ¶
func (o BackupObject) GetBaseBackupName() string
func (BackupObject) GetIncrementFromName ¶
func (o BackupObject) GetIncrementFromName() string
func (BackupObject) IsFullBackup ¶
func (o BackupObject) IsFullBackup() bool
type BackupSentinelDto ¶
type BackupSentinelDto struct { RestorePoint *string `json:"restore_point,omitempty"` Segments []SegmentMetadata `json:"segments,omitempty"` UserData interface{} `json:"user_data,omitempty"` StartTime time.Time `json:"start_time"` FinishTime time.Time `json:"finish_time"` DatetimeFormat string `json:"date_fmt,omitempty"` Hostname string `json:"hostname"` GpVersion string `json:"gp_version"` IsPermanent bool `json:"is_permanent"` SystemIdentifier *uint64 `json:"system_identifier"` UncompressedSize int64 `json:"uncompressed_size"` CompressedSize int64 `json:"compressed_size"` DataCatalogSize int64 `json:"data_catalog_size"` IncrementFrom *string `json:"increment_from,omitempty"` IncrementFullName *string `json:"increment_full_name,omitempty"` IncrementCount *int `json:"increment_count,omitempty"` }
BackupSentinelDto describes file structure of json sentinel
func NewBackupSentinelDto ¶
func NewBackupSentinelDto(currBackupInfo *CurrBackupInfo, prevBackupInfo *PrevBackupInfo, restoreLSNs map[int]string, userData interface{}, isPermanent bool) BackupSentinelDto
NewBackupSentinelDto returns new BackupSentinelDto instance
func (*BackupSentinelDto) IsIncremental ¶
func (s *BackupSentinelDto) IsIncremental() (isIncremental bool)
func (*BackupSentinelDto) String ¶
func (s *BackupSentinelDto) String() string
type BackupWorkers ¶
BackupWorkers holds the external objects that the handler uses to get the backup data / write the backup data
type ClusterRestoreConfig ¶
type ClusterRestoreConfig struct {
Segments map[int]SegmentRestoreConfig `json:"segments"`
}
ClusterRestoreConfig is used to describe the restored cluster
type CurrBackupInfo ¶
type CurrBackupInfo struct {
// contains filtered or unexported fields
}
CurrBackupInfo holds all information that is harvest during the backup process
type DeleteArgs ¶
type DeleteHandler ¶
type DeleteHandler struct { internal.DeleteHandler // contains filtered or unexported fields }
func NewDeleteHandler ¶
func NewDeleteHandler(folder storage.Folder, args DeleteArgs) (*DeleteHandler, error)
func (*DeleteHandler) DeleteBeforeTarget ¶
func (h *DeleteHandler) DeleteBeforeTarget(target internal.BackupObject) error
func (*DeleteHandler) HandleDeleteBefore ¶
func (h *DeleteHandler) HandleDeleteBefore(args []string)
func (*DeleteHandler) HandleDeleteEverything ¶
func (h *DeleteHandler) HandleDeleteEverything(args []string)
func (*DeleteHandler) HandleDeleteGarbage ¶
func (h *DeleteHandler) HandleDeleteGarbage(args []string) error
HandleDeleteGarbage delete outdated WAL archives and leftover backup files
func (*DeleteHandler) HandleDeleteRetain ¶
func (h *DeleteHandler) HandleDeleteRetain(args []string)
func (*DeleteHandler) HandleDeleteRetainAfter ¶
func (h *DeleteHandler) HandleDeleteRetainAfter(args []string)
func (*DeleteHandler) HandleDeleteTarget ¶
func (h *DeleteHandler) HandleDeleteTarget(targetSelector internal.BackupSelector)
type ExtractProviderImpl ¶
type ExtractProviderImpl struct {
FilesToExtractProviderImpl
}
type FetchHandler ¶
type FetchHandler struct {
// contains filtered or unexported fields
}
func NewFetchHandler ¶
func NewFetchHandler( backup internal.Backup, sentinel BackupSentinelDto, segCfgMaker SegConfigMaker, logsDir string, fetchContentIds []int, mode BackupFetchMode, restorePoint string, ) *FetchHandler
nolint:gocritic
func (*FetchHandler) Fetch ¶
func (fh *FetchHandler) Fetch() error
func (*FetchHandler) Prepare ¶
func (fh *FetchHandler) Prepare() error
func (*FetchHandler) Unpack ¶
func (fh *FetchHandler) Unpack()
type FilesToExtractProviderImpl ¶
type FilesToExtractProviderImpl struct {
postgres.FilesToExtractProviderImpl
}
type GenericMetaFetcher ¶
type GenericMetaFetcher struct{}
func NewGenericMetaFetcher ¶
func NewGenericMetaFetcher() GenericMetaFetcher
func (GenericMetaFetcher) Fetch ¶
func (mf GenericMetaFetcher) Fetch(backupName string, backupFolder storage.Folder) (internal.GenericMetadata, error)
TODO: Unit tests
type GenericMetaInteractor ¶
type GenericMetaInteractor struct { GenericMetaFetcher GenericMetaSetter }
func NewGenericMetaInteractor ¶
func NewGenericMetaInteractor() GenericMetaInteractor
type GenericMetaSetter ¶
type GenericMetaSetter struct{}
func NewGenericMetaSetter ¶
func NewGenericMetaSetter() GenericMetaSetter
func (GenericMetaSetter) SetIsPermanent ¶
func (ms GenericMetaSetter) SetIsPermanent(backupName string, backupFolder storage.Folder, isPermanent bool) error
TODO: Unit tests
func (GenericMetaSetter) SetUserData ¶
func (ms GenericMetaSetter) SetUserData(backupName string, backupFolder storage.Folder, userData interface{}) error
TODO: Unit tests
type GpQueryRunner ¶
type GpQueryRunner struct {
*postgres.PgQueryRunner
}
GpQueryRunner is implementation for controlling Greenplum
func NewGpQueryRunner ¶
func NewGpQueryRunner(conn *pgx.Conn) (*GpQueryRunner, error)
NewGpQueryRunner builds QueryRunner from available connection
func ToGpQueryRunner ¶
func ToGpQueryRunner(queryRunner *postgres.PgQueryRunner) *GpQueryRunner
func (*GpQueryRunner) AbortBackup ¶
func (queryRunner *GpQueryRunner) AbortBackup() (err error)
AbortBackup stops the backup process on all segments
func (*GpQueryRunner) CreateGreenplumRestorePoint ¶
func (queryRunner *GpQueryRunner) CreateGreenplumRestorePoint(restorePointName string) (restoreLSNs map[int]string, err error)
CreateGreenplumRestorePoint creates a restore point
func (*GpQueryRunner) FetchAOStorageMetadata ¶
func (queryRunner *GpQueryRunner) FetchAOStorageMetadata(dbInfo postgres.PgDatabaseInfo) (AoRelFileStorageMap, error)
FetchAOStorageMetadata queries the storage metadata for AO & AOCS tables (GreenplumDB)
func (*GpQueryRunner) GetGreenplumSegmentsInfo ¶
func (queryRunner *GpQueryRunner) GetGreenplumSegmentsInfo(semVer semver.Version) (segments []cluster.SegConfig, err error)
GetGreenplumSegmentsInfo returns the information about segments
func (*GpQueryRunner) GetGreenplumVersion ¶
func (queryRunner *GpQueryRunner) GetGreenplumVersion() (version string, err error)
GetGreenplumVersion returns version
func (*GpQueryRunner) IsInBackup ¶
func (queryRunner *GpQueryRunner) IsInBackup() (isInBackupByContentID map[int]bool, err error)
IsInBackup check if there is backup running
type GpTarBallComposer ¶
type GpTarBallComposer struct {
// contains filtered or unexported fields
}
func NewGpTarBallComposer ¶
func NewGpTarBallComposer( tarBallQueue *internal.TarBallQueue, crypter crypto.Crypter, relStorageMap AoRelFileStorageMap, bundleFiles internal.BundleFiles, packer *postgres.TarBallFilePackerImpl, aoStorageUploader *AoStorageUploader, tarFileSets internal.TarFileSets, uploader internal.Uploader, backupName string, ) (*GpTarBallComposer, error)
func (*GpTarBallComposer) AddFile ¶
func (c *GpTarBallComposer) AddFile(info *internal.ComposeFileInfo)
func (*GpTarBallComposer) FinishComposing ¶
func (c *GpTarBallComposer) FinishComposing() (internal.TarFileSets, error)
func (*GpTarBallComposer) GetFiles ¶
func (c *GpTarBallComposer) GetFiles() internal.BundleFiles
type GpTarBallComposerMaker ¶
type GpTarBallComposerMaker struct { TarFileSets internal.TarFileSets // contains filtered or unexported fields }
func NewGpTarBallComposerMaker ¶
func NewGpTarBallComposerMaker(relStorageMap AoRelFileStorageMap, uploader internal.Uploader, backupName string, ) (*GpTarBallComposerMaker, error)
func (*GpTarBallComposerMaker) Make ¶
func (maker *GpTarBallComposerMaker) Make(bundle *postgres.Bundle) (internal.TarBallComposer, error)
type InPlaceSegMaker ¶
type InPlaceSegMaker struct{}
func (*InPlaceSegMaker) Make ¶
func (c *InPlaceSegMaker) Make(metadata SegmentMetadata) (cluster.SegConfig, error)
type IncrementalTarInterpreter ¶
type IncrementalTarInterpreter struct { *postgres.FileTarInterpreter // contains filtered or unexported fields }
func NewIncrementalTarInterpreter ¶
func NewIncrementalTarInterpreter(dbDataDirectory string, sentinel postgres.BackupSentinelDto, filesMetadata postgres.FilesMetadataDto, aoFilesMetadata AOFilesMetadataDTO, filesToUnwrap map[string]bool, createNewIncrementalFiles bool) *IncrementalTarInterpreter
type InvalidIncrementFileHeaderError ¶
type InvalidIncrementFileHeaderError struct {
// contains filtered or unexported fields
}
func (InvalidIncrementFileHeaderError) Error ¶
func (err InvalidIncrementFileHeaderError) Error() string
type NoRestorePointsFoundError ¶
type NoRestorePointsFoundError struct {
// contains filtered or unexported fields
}
func NewNoRestorePointsFoundError ¶
func NewNoRestorePointsFoundError() NoRestorePointsFoundError
type PgHbaMaker ¶
type PgHbaMaker struct {
// contains filtered or unexported fields
}
func NewPgHbaMaker ¶
func NewPgHbaMaker(segments map[int][]*cluster.SegConfig) PgHbaMaker
func (PgHbaMaker) Make ¶
func (m PgHbaMaker) Make() (string, error)
type PgSegmentSentinelDto ¶
type PgSegmentSentinelDto struct { postgres.BackupSentinelDto BackupName string }
PgSegmentSentinelDto is used during the initial fetching of the segment backup metadata
type PrevBackupInfo ¶
type PrevBackupInfo struct {
// contains filtered or unexported fields
}
type RecoveryConfigMaker ¶
type RecoveryConfigMaker struct {
// contains filtered or unexported fields
}
func NewRecoveryConfigMaker ¶
func NewRecoveryConfigMaker(walgBinaryPath, cfgPath, recoveryTargetName string) RecoveryConfigMaker
func (RecoveryConfigMaker) Make ¶
func (m RecoveryConfigMaker) Make(contentID int) string
type RelStorageType ¶
type RelStorageType byte
const ( AppendOptimized RelStorageType = 'a' ColumnOriented RelStorageType = 'c' )
type RestoreCfgSegMaker ¶
type RestoreCfgSegMaker struct {
// contains filtered or unexported fields
}
func (*RestoreCfgSegMaker) Make ¶
func (c *RestoreCfgSegMaker) Make(metadata SegmentMetadata) (cluster.SegConfig, error)
type RestorePointBackupSelector ¶
type RestorePointBackupSelector struct {
// contains filtered or unexported fields
}
func NewRestorePointBackupSelector ¶
func NewRestorePointBackupSelector(restorePoint string) *RestorePointBackupSelector
type RestorePointCreator ¶
type RestorePointCreator struct { Uploader internal.Uploader Conn *pgx.Conn // contains filtered or unexported fields }
func NewRestorePointCreator ¶
func NewRestorePointCreator(pointName string) (rpc *RestorePointCreator, err error)
NewRestorePointCreator returns a restore point creator
func (*RestorePointCreator) Create ¶
func (rpc *RestorePointCreator) Create()
Create creates cluster-wide consistent restore point
type RestorePointMetadata ¶
type RestorePointMetadata struct { Name string `json:"name"` StartTime time.Time `json:"start_time"` FinishTime time.Time `json:"finish_time"` Hostname string `json:"hostname"` GpVersion string `json:"gp_version"` SystemIdentifier *uint64 `json:"system_identifier"` LsnBySegment map[int]string `json:"lsn_by_segment"` }
func FetchRestorePointMetadata ¶
func FetchRestorePointMetadata(folder storage.Folder, pointName string) (RestorePointMetadata, error)
func (*RestorePointMetadata) String ¶
func (s *RestorePointMetadata) String() string
type RestorePointTime ¶
type RestorePointTime struct { Name string `json:"restore_point_name"` Time time.Time `json:"time"` }
func GetRestorePoints ¶
func GetRestorePoints(folder storage.Folder) (restorePoints []RestorePointTime, err error)
GetRestorePoints receives restore points descriptions and sorts them by time
func GetRestorePointsTimeSlices ¶
func GetRestorePointsTimeSlices(restorePoints []storage.Object) []RestorePointTime
type SegBackup ¶
type SegBackup struct { postgres.Backup AoFilesMetadataDto *AOFilesMetadataDTO }
func ToGpSegBackup ¶
func (*SegBackup) LoadAoFilesMetadata ¶
func (backup *SegBackup) LoadAoFilesMetadata() (*AOFilesMetadataDTO, error)
type SegCmdRunner ¶
type SegCmdRunner struct {
// contains filtered or unexported fields
}
func NewSegCmdRunner ¶
func NewSegCmdRunner(contentID int, cmdName, cmdArgs string, updInterval time.Duration) *SegCmdRunner
func (*SegCmdRunner) Run ¶
func (r *SegCmdRunner) Run()
type SegCmdState ¶
type SegCmdState struct { TS time.Time `json:"ts"` Status SegCmdStatus `json:"status"` }
type SegCmdStatus ¶
type SegCmdStatus string
const ( RunningCmdStatus SegCmdStatus = "running" FailedCmdStatus SegCmdStatus = "failed" SuccessCmdStatus SegCmdStatus = "success" InterruptedCmdStatus SegCmdStatus = "interrupted" )
type SegConfigMaker ¶
type SegConfigMaker interface {
Make(SegmentMetadata) (cluster.SegConfig, error)
}
func NewRestoreCfgSegMaker ¶
func NewRestoreCfgSegMaker(restoreConfigReader io.Reader) (SegConfigMaker, error)
func NewSegConfigMaker ¶
func NewSegConfigMaker(restoreCfgPath string, inPlaceRestore bool) (SegConfigMaker, error)
type SegDeleteBeforeHandler ¶
type SegDeleteBeforeHandler struct { *postgres.DeleteHandler // contains filtered or unexported fields }
func (SegDeleteBeforeHandler) Delete ¶
func (h SegDeleteBeforeHandler) Delete(segBackup SegBackup) error
type SegDeleteHandler ¶
func NewSegDeleteHandler ¶
func NewSegDeleteHandler(rootFolder storage.Folder, contentID int, args DeleteArgs, delType SegDeleteType, ) (SegDeleteHandler, error)
type SegDeleteTargetHandler ¶
type SegDeleteTargetHandler struct { *postgres.DeleteHandler // contains filtered or unexported fields }
func (SegDeleteTargetHandler) Delete ¶
func (h SegDeleteTargetHandler) Delete(segBackup SegBackup) error
type SegDeleteType ¶
type SegDeleteType int
const ( SegDeleteBefore SegDeleteType = iota SegDeleteTarget )
type SegDeltaBackupConfigurator ¶
type SegDeltaBackupConfigurator struct {
// contains filtered or unexported fields
}
func NewSegDeltaBackupConfigurator ¶
func NewSegDeltaBackupConfigurator(deltaBaseSelector internal.BackupSelector) SegDeltaBackupConfigurator
func (SegDeltaBackupConfigurator) Configure ¶
func (c SegDeltaBackupConfigurator) Configure(folder storage.Folder, isPermanent bool, ) (prevBackupInfo postgres.PrevBackupInfo, incrementCount int, err error)
type SegmentFwdArg ¶
SegmentFwdArg describes the specific WAL-G arguments that is going to be forwarded to the segments
type SegmentMetadata ¶
type SegmentMetadata struct { DatabaseID int `json:"db_id"` ContentID int `json:"content_id"` Role SegmentRole `json:"role"` Port int `json:"port"` Hostname string `json:"hostname"` DataDir string `json:"data_dir"` BackupID string `json:"backup_id"` BackupName string `json:"backup_name"` RestorePointLSN string `json:"restore_point_lsn"` }
func NewSegmentMetadata ¶
func NewSegmentMetadata(backupID string, segCfg cluster.SegConfig, restoreLSN, backupName string) SegmentMetadata
func (SegmentMetadata) ToSegConfig ¶
func (c SegmentMetadata) ToSegConfig() cluster.SegConfig
type SegmentRestoreConfig ¶
type SegmentRole ¶
type SegmentRole string
const ( Primary SegmentRole = "p" Mirror SegmentRole = "m" )
type SegmentUserData ¶
type SegmentUserData struct {
ID string `json:"id"`
}
func NewSegmentUserData ¶
func NewSegmentUserData() SegmentUserData
func NewSegmentUserDataFromID ¶
func NewSegmentUserDataFromID(backupID string) SegmentUserData
func (SegmentUserData) QuotedString ¶
func (d SegmentUserData) QuotedString() string
QuotedString will do json.Marshal-ing followed by quoting in order to escape special control characters in the resulting JSON so it can be transferred as the cmdline argument to a segment
func (SegmentUserData) String ¶
func (d SegmentUserData) String() string
type UnexpectedTarDataError ¶
type UnexpectedTarDataError struct {
// contains filtered or unexported fields
}
func (UnexpectedTarDataError) Error ¶
func (err UnexpectedTarDataError) Error() string
type UnknownIncrementFileHeaderError ¶
type UnknownIncrementFileHeaderError struct {
// contains filtered or unexported fields
}
func (UnknownIncrementFileHeaderError) Error ¶
func (err UnknownIncrementFileHeaderError) Error() string
Source Files ¶
- ao_increment.go
- ao_metadata.go
- ao_storage.go
- ao_storage_uploader.go
- backup.go
- backup_fetch_handler.go
- backup_list_handler.go
- backup_object.go
- backup_push_handler.go
- backup_selector.go
- backup_sentinel_dto.go
- configure.go
- delete_handler.go
- extract_provider.go
- generic_meta_interactor.go
- incremental_tar_interpreter.go
- pg_hba_cfg_maker.go
- query_runner.go
- recovery_cfg_maker.go
- relfile_storage_map.go
- restore_point.go
- restore_point_list_handler.go
- segment_backup.go
- segment_backup_push_handler.go
- segment_command_runner.go
- segment_command_state.go
- segment_config_maker.go
- segment_delete_handler.go
- segment_delta_configurator.go
- tar_ball_composer.go
- tars_to_extract_provider.go
- util.go