Documentation ¶
Index ¶
- Constants
- Variables
- func AddConfigFlags(Cmd *cobra.Command, hiddenCfgFlagAnnotation string)
- func AddTurboFlag(cmd *cobra.Command)
- func AssertRequiredSettingsSet() error
- func CheckAllowedSettings(config *viper.Viper)
- func CompressAndEncrypt(source io.Reader, compressor compression.Compressor, crypter crypto.Crypter) io.Reader
- func Configure()
- func ConfigureAndRunDefaultWebServer() error
- func ConfigureArchiveStatusManager() (fsutil.DataFolder, error)
- func ConfigureCompressor() (compression.Compressor, error)
- func ConfigureCrypter() crypto.Crypter
- func ConfigureFolder() (storage.Folder, error)
- func ConfigureFolderForSpecificConfig(config *viper.Viper) (storage.Folder, error)
- func ConfigureLimiters()
- func ConfigureLogging() error
- func ConfigurePGArchiveStatusManager() (fsutil.DataFolder, error)
- func ConfigureSettings(currentType string)
- func ConfigureStoragePrefix(folder storage.Folder) storage.Folder
- func DecompressDecryptBytes(archiveReader io.Reader, decompressor compression.Decompressor) (io.ReadCloser, error)
- func DecryptAndDecompressTar(reader io.Reader, filePath string, crypter crypto.Crypter) (io.ReadCloser, error)
- func DecryptBytes(archiveReader io.Reader) (io.Reader, error)
- func DefaultHandleBackupList(folder storage.Folder, pretty, json bool)
- func DeleteArgsValidator(args, stringModifiers []string, minArgs int, maxArgs int) error
- func DeleteBackups(folder storage.Folder, backups []string) error
- func DeleteBeforeArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteEverythingArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteGarbage(folder storage.Folder, garbage []string) error
- func DeleteRetainAfterArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteRetainArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteTargetArgsValidator(cmd *cobra.Command, args []string) error
- func DownloadAndDecompressSplittedStream(backup Backup, blockSize int, extension string, writeCloser io.WriteCloser, ...) error
- func DownloadAndDecompressStorageFile(folder storage.Folder, fileName string) (io.ReadCloser, error)
- func DownloadAndDecompressStream(backup Backup, writeCloser io.WriteCloser) error
- func DownloadFile(folder storage.Folder, filename, ext string, writeCloser io.WriteCloser) error
- func DownloadFileTo(folder storage.Folder, fileName string, dstPath string) error
- func ExtractAll(tarInterpreter TarInterpreter, files []ReaderMaker) error
- func ExtractAllWithSleeper(tarInterpreter TarInterpreter, files []ReaderMaker, sleeper Sleeper) error
- func ExtractDeleteEverythingModifierFromArgs(args []string) int
- func ExtractDeleteModifierFromArgs(args []string) (int, string)
- func ExtractDeleteRetainAfterModifierFromArgs(args []string) (int, string, string)
- func ExtractDeleteTargetModifierFromArgs(args []string) int
- func FatalOnUnrecoverableMetadataError(backupTime BackupTime, err error)
- func FetchDto(folder storage.Folder, dto interface{}, path string) error
- func FolderFromConfig(configFile string) (storage.Folder, error)
- func FolderSize(folder storage.Folder, path string) (int64, error)
- func FormatTime(backupTime time.Time) string
- func FormatTimeInner(backupTime time.Time, timeFormat string) string
- func GetBackupSentinelObjects(folder storage.Folder) ([]storage.Object, error)
- func GetBackupToCommandFetcher(cmd *exec.Cmd) func(folder storage.Folder, backup Backup)
- func GetBoolSetting(setting string) (val bool, ok bool, err error)
- func GetBoolSettingDefault(setting string, def bool) (bool, error)
- func GetCommandSetting(variableName string) (*exec.Cmd, error)
- func GetCommandSettingContext(ctx context.Context, variableName string) (*exec.Cmd, error)
- func GetDataFolderPath() string
- func GetDeltaConfig() (maxDeltas int, fromFull bool)
- func GetDurationSetting(setting string) (time.Duration, error)
- func GetGarbageFromPrefix(folders []storage.Folder, nonGarbage []BackupTime) []string
- func GetLastDecompressor() (compression.Decompressor, error)
- func GetLatestBackupName(folder storage.Folder) (string, error)
- func GetLogsDstSettings(operationLogsDstEnvVariable string) (dstFolder string, err error)
- func GetMaxConcurrency(concurrencyType string) (int, error)
- func GetMaxDownloadConcurrency() (int, error)
- func GetMaxUploadConcurrency() (int, error)
- func GetMaxUploadDiskConcurrency() (int, error)
- func GetOplogArchiveAfterSize() (int, error)
- func GetOplogPITRDiscoveryIntervalSetting() (*time.Duration, error)
- func GetPartitionedBackupFileNames(backup Backup, decompressor compression.Decompressor) ([][]string, error)
- func GetPartitionedSteamMultipartName(backupName string, extension string, partIdx int, fileNumber int) string
- func GetPartitionedStreamName(backupName string, extension string, partIdx int) string
- func GetPermanentBackups(folder storage.Folder, metaFetcher GenericMetaFetcher) map[string]bool
- func GetPgSlotName() (pgSlotName string)
- func GetRelativeArchiveDataFolderPath() string
- func GetRequiredSetting(setting string) (string, error)
- func GetSentinelUserData() (interface{}, error)
- func GetSetting(key string) (value string, ok bool)
- func GetStreamName(backupName string, extension string) string
- func HandleBackupFetch(folder storage.Folder, targetBackupSelector BackupSelector, ...)
- func HandleBackupList(getBackupsFunc func() ([]BackupTime, error), ...)
- func HandleBackupMark(uploader *Uploader, backupName string, toPermanent bool, ...)
- func InitConfig()
- func IsPermanent(objectName string, permanentBackups map[string]bool, backupNameLength int) bool
- func MetadataNameFromBackup(backupName string) string
- func PackFileTo(tarBall TarBall, fileInfoHeader *tar.Header, fileContent io.Reader) (fileSize int64, err error)
- func ParseTS(endTSEnvVar string) (endTS *time.Time, err error)
- func PrettyFormatTime(backupTime time.Time) string
- func PushMetrics()
- func ReadConfigFromFile(config *viper.Viper, configFile string)
- func SentinelNameFromBackup(backupName string) string
- func SetDefaultValues(config *viper.Viper)
- func SetGoMaxProcs(config *viper.Viper)
- func SetLastDecompressor(decompressor compression.Decompressor) error
- func SortBackupTimeSlices(backupTimes []BackupTime)
- func SortTimedBackup(backups []TimedBackup)
- func SplitPurgingBackups(backups []TimedBackup, retainCount *int, retainAfter *time.Time) (purge, retain map[string]bool, err error)
- func StartReadingFile(fileInfoHeader *tar.Header, info os.FileInfo, path string) (io.ReadSeekCloser, error)
- func StreamBackupToCommandStdin(cmd *exec.Cmd, backup Backup) error
- func StreamMetadataNameFromBackup(backupName string) string
- func TryDownloadFile(folder storage.Folder, path string) (fileReader io.ReadCloser, exists bool, err error)
- func UnmarshalSentinelUserData(userDataStr string) (interface{}, error)
- func UnwrapLatestModifier(backupName string, folder storage.Folder) (string, error)
- func UploadBackupStreamMetadata(uploader UploaderProvider, metadata interface{}, backupName string) error
- func UploadDto(folder storage.Folder, dto interface{}, path string) error
- func UploadSentinel(uploader UploaderProvider, sentinelDto interface{}, backupName string) error
- func WriteAsJSON(data interface{}, output io.Writer, pretty bool) error
- func WriteBackupList(backups []BackupTime, output io.Writer)
- func WritePrettyBackupList(backups []BackupTime, output io.Writer)
- type ArchiveNonExistenceError
- type Backup
- func (backup *Backup) AssureExists() error
- func (backup *Backup) CheckExistence() (bool, error)
- func (backup *Backup) FetchMetadata(metadataDto interface{}) error
- func (backup *Backup) FetchSentinel(sentinelDto interface{}) error
- func (backup *Backup) SentinelExists() (bool, error)
- func (backup *Backup) UploadMetadata(metadataDto interface{}) error
- func (backup *Backup) UploadSentinel(sentinelDto interface{}) error
- type BackupFileDescription
- type BackupFileList
- type BackupHasPermanentBackupInFutureError
- type BackupMarkHandler
- type BackupNameSelector
- type BackupNonExistenceError
- type BackupObject
- type BackupSelector
- func CreateTargetDeleteBackupSelector(cmd *cobra.Command, args []string, targetUserData string, ...) (BackupSelector, error)
- func NewDeltaBaseSelector(targetBackupName, targetUserData string, metaFetcher GenericMetaFetcher) (BackupSelector, error)
- func NewTargetBackupSelector(targetUserData, targetName string, metaFetcher GenericMetaFetcher) (BackupSelector, error)
- type BackupStreamMetadata
- type BackupTime
- type Bundle
- func (bundle *Bundle) AddToBundle(path string, info os.FileInfo, err error) error
- func (bundle *Bundle) FinishComposing() (TarFileSets, error)
- func (bundle *Bundle) FinishQueue() error
- func (bundle *Bundle) GetFileRelPath(fileAbsPath string) string
- func (bundle *Bundle) SetupComposer(composerMaker TarBallComposerMaker) (err error)
- func (bundle *Bundle) StartQueue(tarBallMaker TarBallMaker) error
- type BundleFiles
- type CachedDecompressor
- type CommonDirectoryDownloader
- type CommonDirectoryUploader
- type CommonFilesFilter
- type ComposeFileInfo
- type ComposeRatingEvaluator
- type CompressAndEncryptError
- type CorruptBlocksInfo
- type DefaultBackupObject
- type DefaultComposeRatingEvaluator
- type DeleteHandler
- func (h *DeleteHandler) DeleteBeforeTarget(target BackupObject, confirmed bool) error
- func (h *DeleteHandler) DeleteBeforeTargetWhere(target BackupObject, confirmed bool, ...) error
- func (h *DeleteHandler) DeleteEverything(confirmed bool)
- func (h *DeleteHandler) DeleteTarget(target BackupObject, confirmed, findFull bool, ...) error
- func (h *DeleteHandler) FindTargetBefore(beforeStr string, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetBeforeName(name string, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetBeforeTime(timeLine time.Time, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetByName(bname string) (BackupObject, error)
- func (h *DeleteHandler) FindTargetBySelector(targetSelector BackupSelector) (BackupObject, error)
- func (h *DeleteHandler) FindTargetRetain(retentionCount, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetRetainAfter(retentionCount int, afterStr string, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetRetainAfterName(retentionCount int, name string, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetRetainAfterTime(retentionCount int, timeLine time.Time, modifier int) (BackupObject, error)
- func (h *DeleteHandler) HandleDeleteBefore(args []string, confirmed bool)
- func (h *DeleteHandler) HandleDeleteEverything(args []string, permanentBackups map[string]bool, confirmed bool)
- func (h *DeleteHandler) HandleDeleteRetain(args []string, confirmed bool)
- func (h *DeleteHandler) HandleDeleteRetainAfter(args []string, confirmed bool)
- func (h *DeleteHandler) HandleDeleteTarget(targetSelector BackupSelector, confirmed, findFull bool)
- type DeleteHandlerOption
- type DevNullWriter
- type DirectoryDownloader
- type DirectoryIsNotEmptyError
- type DirectoryUploader
- type DtoSerializer
- type DtoSerializerType
- type ErrWaiter
- type ErrorLogger
- type ExponentialSleeper
- type FileNotExistError
- type FileTarInterpreter
- type FileType
- type FilesFilter
- type GenericMetaFetcher
- type GenericMetaInteractor
- type GenericMetaSetter
- type GenericMetadata
- type IncrementDetails
- type IncrementDetailsFetcher
- type InfoLogger
- type InvalidConcurrencyValueError
- type LatestBackupSelector
- type LazyCache
- type LimitedFolder
- type Logging
- type MetaConstructor
- type NOPTarBall
- func (tarBall *NOPTarBall) AddSize(i int64)
- func (tarBall *NOPTarBall) AwaitUploads()
- func (tarBall *NOPTarBall) CloseTar() error
- func (tarBall *NOPTarBall) Name() string
- func (tarBall *NOPTarBall) SetUp(crypter crypto.Crypter, params ...string)
- func (tarBall *NOPTarBall) Size() int64
- func (tarBall *NOPTarBall) TarWriter() *tar.Writer
- type NOPTarBallMaker
- type NoBackupsFoundError
- type NoFilesToExtractError
- type NopBundleFiles
- func (files *NopBundleFiles) AddFile(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool)
- func (files *NopBundleFiles) AddFileDescription(name string, backupFileDescription BackupFileDescription)
- func (files *NopBundleFiles) AddFileWithCorruptBlocks(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool, ...)
- func (files *NopBundleFiles) AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo)
- func (files *NopBundleFiles) GetUnderlyingMap() *sync.Map
- type NopIncrementDetailsFetcher
- type NopTarFileSets
- type OldestNonPermanentSelector
- type ProfileStopper
- type ReaderMaker
- type RegularBundleFiles
- func (files *RegularBundleFiles) AddFile(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool)
- func (files *RegularBundleFiles) AddFileDescription(name string, backupFileDescription BackupFileDescription)
- func (files *RegularBundleFiles) AddFileWithCorruptBlocks(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool, ...)
- func (files *RegularBundleFiles) AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo)
- func (files *RegularBundleFiles) GetUnderlyingMap() *sync.Map
- type RegularJSON
- type RegularTarBallComposer
- func (c *RegularTarBallComposer) AddFile(info *ComposeFileInfo)
- func (c *RegularTarBallComposer) AddHeader(header *tar.Header, fileInfo os.FileInfo) error
- func (c *RegularTarBallComposer) FinishComposing() (TarFileSets, error)
- func (c *RegularTarBallComposer) GetFiles() BundleFiles
- func (c *RegularTarBallComposer) SkipFile(tarHeader *tar.Header, fileInfo os.FileInfo)
- type RegularTarBallComposerMaker
- type RegularTarBallFilePacker
- type RegularTarFileSets
- type Sentinel
- type SentinelMarshallingError
- type Sleeper
- type SplitStreamUploader
- type StorageAdapter
- type StorageReaderMaker
- type StorageTarBall
- func (tarBall *StorageTarBall) AddSize(i int64)
- func (tarBall *StorageTarBall) AwaitUploads()
- func (tarBall *StorageTarBall) CloseTar() error
- func (tarBall *StorageTarBall) Name() string
- func (tarBall *StorageTarBall) SetUp(crypter crypto.Crypter, names ...string)
- func (tarBall *StorageTarBall) Size() int64
- func (tarBall *StorageTarBall) TarWriter() *tar.Writer
- type StorageTarBallMaker
- type StreamFetcher
- type StreamedJSON
- type TarBall
- type TarBallComposer
- type TarBallComposerMaker
- type TarBallFilePacker
- type TarBallMaker
- type TarBallQueue
- func (tarQueue *TarBallQueue) CheckSizeAndEnqueueBack(tarBall TarBall) error
- func (tarQueue *TarBallQueue) CloseTarball(tarBall TarBall) error
- func (tarQueue *TarBallQueue) Deque() TarBall
- func (tarQueue *TarBallQueue) DequeCtx(ctx context.Context) (TarBall, error)
- func (tarQueue *TarBallQueue) EnqueueBack(tarBall TarBall)
- func (tarQueue *TarBallQueue) FinishQueue() error
- func (tarQueue *TarBallQueue) FinishTarBall(tarBall TarBall) error
- func (tarQueue *TarBallQueue) NewTarBall(dedicatedUploader bool) TarBall
- func (tarQueue *TarBallQueue) StartQueue() error
- type TarFileSets
- type TarInterpreter
- type TarSizeError
- type TimedBackup
- type UnconfiguredStorageError
- type UnknownCompressionMethodError
- type UnknownSerializerTypeError
- type UnmarshallingError
- type UnsetRequiredSettingError
- type UnsupportedFileTypeError
- type UploadObject
- type Uploader
- func (uploader *Uploader) ChangeDirectory(relativePath string)
- func (uploader *Uploader) Clone() *Uploader
- func (uploader *Uploader) Compression() compression.Compressor
- func (uploader *Uploader) DisableSizeTracking()
- func (uploader *Uploader) Finish()
- func (uploader *Uploader) Folder() storage.Folder
- func (uploader *Uploader) PushStream(stream io.Reader) (string, error)
- func (uploader *Uploader) PushStreamToDestination(stream io.Reader, dstPath string) error
- func (uploader *Uploader) RawDataSize() (int64, error)
- func (uploader *Uploader) Upload(path string, content io.Reader) error
- func (uploader *Uploader) UploadFile(file ioextensions.NamedReader) error
- func (uploader *Uploader) UploadMultiple(objects []UploadObject) error
- func (uploader *Uploader) UploadedDataSize() (int64, error)
- type UploaderProvider
- type UserDataBackupSelector
- type WrongTypeError
Constants ¶
const ( PG = "PG" SQLSERVER = "SQLSERVER" MYSQL = "MYSQL" REDIS = "REDIS" FDB = "FDB" MONGO = "MONGO" GP = "GP" DownloadConcurrencySetting = "WALG_DOWNLOAD_CONCURRENCY" UploadConcurrencySetting = "WALG_UPLOAD_CONCURRENCY" UploadDiskConcurrencySetting = "WALG_UPLOAD_DISK_CONCURRENCY" UploadQueueSetting = "WALG_UPLOAD_QUEUE" SentinelUserDataSetting = "WALG_SENTINEL_USER_DATA" PreventWalOverwriteSetting = "WALG_PREVENT_WAL_OVERWRITE" UploadWalMetadata = "WALG_UPLOAD_WAL_METADATA" DeltaMaxStepsSetting = "WALG_DELTA_MAX_STEPS" DeltaOriginSetting = "WALG_DELTA_ORIGIN" CompressionMethodSetting = "WALG_COMPRESSION_METHOD" StoragePrefixSetting = "WALG_STORAGE_PREFIX" DiskRateLimitSetting = "WALG_DISK_RATE_LIMIT" NetworkRateLimitSetting = "WALG_NETWORK_RATE_LIMIT" UseWalDeltaSetting = "WALG_USE_WAL_DELTA" UseReverseUnpackSetting = "WALG_USE_REVERSE_UNPACK" SkipRedundantTarsSetting = "WALG_SKIP_REDUNDANT_TARS" VerifyPageChecksumsSetting = "WALG_VERIFY_PAGE_CHECKSUMS" StoreAllCorruptBlocksSetting = "WALG_STORE_ALL_CORRUPT_BLOCKS" UseRatingComposerSetting = "WALG_USE_RATING_COMPOSER" UseCopyComposerSetting = "WALG_USE_COPY_COMPOSER" UseDatabaseComposerSetting = "WALG_USE_DATABASE_COMPOSER" WithoutFilesMetadataSetting = "WALG_WITHOUT_FILES_METADATA" DeltaFromNameSetting = "WALG_DELTA_FROM_NAME" DeltaFromUserDataSetting = "WALG_DELTA_FROM_USER_DATA" FetchTargetUserDataSetting = "WALG_FETCH_TARGET_USER_DATA" LogLevelSetting = "WALG_LOG_LEVEL" TarSizeThresholdSetting = "WALG_TAR_SIZE_THRESHOLD" TarDisableFsyncSetting = "WALG_TAR_DISABLE_FSYNC" CseKmsIDSetting = "WALG_CSE_KMS_ID" CseKmsRegionSetting = "WALG_CSE_KMS_REGION" LibsodiumKeySetting = "WALG_LIBSODIUM_KEY" LibsodiumKeyPathSetting = "WALG_LIBSODIUM_KEY_PATH" LibsodiumKeyTransform = "WALG_LIBSODIUM_KEY_TRANSFORM" GpgKeyIDSetting = "GPG_KEY_ID" PgpKeySetting = "WALG_PGP_KEY" PgpKeyPathSetting = "WALG_PGP_KEY_PATH" PgpKeyPassphraseSetting = "WALG_PGP_KEY_PASSPHRASE" PgDataSetting = "PGDATA" UserSetting = "USER" // TODO : do something with it PgPortSetting = "PGPORT" PgUserSetting = "PGUSER" PgHostSetting = "PGHOST" PgPasswordSetting = "PGPASSWORD" PgPassfileSetting = "PGPASSFILE" PgDatabaseSetting = "PGDATABASE" PgSslModeSetting = "PGSSLMODE" PgSlotName = "WALG_SLOTNAME" PgWalSize = "WALG_PG_WAL_SIZE" TotalBgUploadedLimit = "TOTAL_BG_UPLOADED_LIMIT" NameStreamCreateCmd = "WALG_STREAM_CREATE_COMMAND" NameStreamRestoreCmd = "WALG_STREAM_RESTORE_COMMAND" MaxDelayedSegmentsCount = "WALG_INTEGRITY_MAX_DELAYED_WALS" PrefetchDir = "WALG_PREFETCH_DIR" PgReadyRename = "PG_READY_RENAME" SerializerTypeSetting = "WALG_SERIALIZER_TYPE" StreamSplitterPartitions = "WALG_STREAM_SPLITTER_PARTITIONS" StreamSplitterBlockSize = "WALG_STREAM_SPLITTER_BLOCK_SIZE" StreamSplitterMaxFileSize = "WALG_STREAM_SPLITTER_MAX_FILE_SIZE" StatsdAddressSetting = "WALG_STATSD_ADDRESS" PgAliveCheckInterval = "WALG_ALIVE_CHECK_INTERVAL" PgStopBackupTimeout = "WALG_STOP_BACKUP_TIMEOUT" ProfileSamplingRatio = "PROFILE_SAMPLING_RATIO" ProfileMode = "PROFILE_MODE" ProfilePath = "PROFILE_PATH" MongoDBUriSetting = "MONGODB_URI" MongoDBLastWriteUpdateInterval = "MONGODB_LAST_WRITE_UPDATE_INTERVAL" OplogArchiveAfterSize = "OPLOG_ARCHIVE_AFTER_SIZE" OplogArchiveTimeoutInterval = "OPLOG_ARCHIVE_TIMEOUT_INTERVAL" OplogPITRDiscoveryInterval = "OPLOG_PITR_DISCOVERY_INTERVAL" OplogPushStatsEnabled = "OPLOG_PUSH_STATS_ENABLED" OplogPushStatsLoggingInterval = "OPLOG_PUSH_STATS_LOGGING_INTERVAL" OplogPushStatsUpdateInterval = "OPLOG_PUSH_STATS_UPDATE_INTERVAL" OplogPushStatsExposeHTTP = "OPLOG_PUSH_STATS_EXPOSE_HTTP" OplogPushWaitForBecomePrimary = "OPLOG_PUSH_WAIT_FOR_BECOME_PRIMARY" OplogPushPrimaryCheckInterval = "OPLOG_PUSH_PRIMARY_CHECK_INTERVAL" OplogReplayOplogAlwaysUpsert = "OPLOG_REPLAY_OPLOG_ALWAYS_UPSERT" OplogReplayOplogApplicationMode = "OPLOG_REPLAY_OPLOG_APPLICATION_MODE" OplogReplayIgnoreErrorCodes = "OPLOG_REPLAY_IGNORE_ERROR_CODES" MysqlDatasourceNameSetting = "WALG_MYSQL_DATASOURCE_NAME" MysqlSslCaSetting = "WALG_MYSQL_SSL_CA" MysqlBinlogReplayCmd = "WALG_MYSQL_BINLOG_REPLAY_COMMAND" MysqlBinlogDstSetting = "WALG_MYSQL_BINLOG_DST" MysqlBackupPrepareCmd = "WALG_MYSQL_BACKUP_PREPARE_COMMAND" MysqlTakeBinlogsFromMaster = "WALG_MYSQL_TAKE_BINLOGS_FROM_MASTER" MysqlCheckGTIDs = "WALG_MYSQL_CHECK_GTIDS" MysqlBinlogServerHost = "WALG_MYSQL_BINLOG_SERVER_HOST" MysqlBinlogServerPort = "WALG_MYSQL_BINLOG_SERVER_PORT" MysqlBinlogServerUser = "WALG_MYSQL_BINLOG_SERVER_USER" MysqlBinlogServerPassword = "WALG_MYSQL_BINLOG_SERVER_PASSWORD" MysqlBinlogServerID = "WALG_MYSQL_BINLOG_SERVER_ID" MysqlBinlogServerReplicaSource = "WALG_MYSQL_BINLOG_SERVER_REPLICA_SOURCE" MysqlBackupDownloadMaxRetry = "WALG_BACKUP_DOWNLOAD_MAX_RETRY" RedisPassword = "WALG_REDIS_PASSWORD" GPLogsDirectory = "WALG_GP_LOGS_DIR" GPSegContentID = "WALG_GP_SEG_CONTENT_ID" GPSegmentsPollInterval = "WALG_GP_SEG_POLL_INTERVAL" GPSegmentsPollRetries = "WALG_GP_SEG_POLL_RETRIES" GPSegmentsUpdInterval = "WALG_GP_SEG_UPD_INTERVAL" GPSegmentStatesDir = "WALG_GP_SEG_STATES_DIR" GPDeleteConcurrency = "WALG_GP_DELETE_CONCURRENCY" GPAoSegSizeThreshold = "WALG_GP_AOSEG_SIZE_THRESHOLD" GoMaxProcs = "GOMAXPROCS" HTTPListen = "HTTP_LISTEN" HTTPExposePprof = "HTTP_EXPOSE_PPROF" HTTPExposeExpVar = "HTTP_EXPOSE_EXPVAR" SQLServerBlobHostname = "SQLSERVER_BLOB_HOSTNAME" SQLServerBlobCertFile = "SQLSERVER_BLOB_CERT_FILE" SQLServerBlobKeyFile = "SQLSERVER_BLOB_KEY_FILE" SQLServerBlobLockFile = "SQLSERVER_BLOB_LOCK_FILE" SQLServerConnectionString = "SQLSERVER_CONNECTION_STRING" SQLServerDBConcurrency = "SQLSERVER_DB_CONCURRENCY" SQLServerReuseProxy = "SQLSERVER_REUSE_PROXY" EndpointSourceSetting = "S3_ENDPOINT_SOURCE" EndpointPortSetting = "S3_ENDPOINT_PORT" AwsAccessKeyID = "AWS_ACCESS_KEY_ID" AwsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" AwsSessionToken = "AWS_SESSION_TOKEN" YcKmsKeyIDSetting = "YC_CSE_KMS_KEY_ID" YcSaKeyFileSetting = "YC_SERVICE_ACCOUNT_KEY_FILE" PgBackRestStanza = "PGBACKREST_STANZA" AzureStorageAccount = "AZURE_STORAGE_ACCOUNT" AzureStorageAccessKey = "AZURE_STORAGE_ACCESS_KEY" AzureStorageSasToken = "AZURE_STORAGE_SAS_TOKEN" AzureEnvironmentName = "AZURE_ENVIRONMENT_NAME" GoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS" SwiftOsAuthURL = "OS_AUTH_URL" SwiftOsUsername = "OS_USERNAME" SwiftOsPassword = "OS_PASSWORD" SwiftOsTenantName = "OS_TENANT_NAME" SwiftOsRegionName = "OS_REGION_NAME" SSHPort = "SSH_PORT" SSHPassword = "SSH_PASSWORD" SSHUsername = "SSH_USERNAME" SSHPrivateKeyPath = "SSH_PRIVATE_KEY_PATH" SystemdNotifySocket = "NOTIFY_SOCKET" )
const ( DefaultDataBurstRateLimit = 8 * pgDefaultDatabasePageSize DefaultDataFolderPath = "/tmp" WaleFileHost = "file://localhost" )
const ( NoDeleteModifier = iota FullDeleteModifier FindFullDeleteModifier ForceDeleteModifier ConfirmFlag = "confirm" DeleteShortDescription = "Clears old backups and WALs" DeleteRetainExamples = `` /* 321-byte string literal not displayed */ DeleteBeforeExamples = `` /* 156-byte string literal not displayed */ DeleteEverythingExamples = `` /* 154-byte string literal not displayed */ DeleteTargetExamples = `` /* 420-byte string literal not displayed */ DeleteEverythingUsageExample = "everything [FORCE]" DeleteRetainUsageExample = "retain [FULL|FIND_FULL] backup_count" DeleteBeforeUsageExample = "before [FIND_FULL] backup_name|timestamp" DeleteTargetUsageExample = "target [FIND_FULL] backup_name | --target-user-data <data>" DeleteTargetUserDataFlag = "target-user-data" DeleteTargetUserDataDescription = "delete storage backup which has the specified user data" )
const ( SplitMergeStreamBackup = "SPLIT_MERGE_STREAM_BACKUP" SingleStreamStreamBackup = "STREAM_BACKUP" )
const ( StreamPrefix = "stream_" StreamBackupNameLength = 23 // len(StreamPrefix) + len(utility.BackupTimeFormat) )
const LatestString = "LATEST"
const MaxCorruptBlocksInFileDesc int = 10
const MinAllowedConcurrency = 1
const TarPartitionFolderName = "/tar_partitions/"
Variables ¶
var ( CfgFile string MongoDefaultSettings = map[string]string{ OplogPushStatsLoggingInterval: "30s", OplogPushStatsUpdateInterval: "30s", OplogPushWaitForBecomePrimary: "false", OplogPushPrimaryCheckInterval: "30s", OplogArchiveTimeoutInterval: "60s", OplogArchiveAfterSize: "16777216", MongoDBLastWriteUpdateInterval: "3s", StreamSplitterBlockSize: "1048576", } MysqlDefaultSettings = map[string]string{ StreamSplitterBlockSize: "1048576", MysqlBackupDownloadMaxRetry: "1", } SQLServerDefaultSettings = map[string]string{ SQLServerDBConcurrency: "10", } PGDefaultSettings = map[string]string{ PgWalSize: "16", PgBackRestStanza: "main", } GPDefaultSettings = map[string]string{ GPLogsDirectory: "/var/log", PgWalSize: "64", GPSegmentsPollInterval: "5m", GPSegmentsUpdInterval: "10s", GPSegmentsPollRetries: "5", GPSegmentStatesDir: "/tmp", GPDeleteConcurrency: "1", GPAoSegSizeThreshold: "1048576", } AllowedSettings map[string]bool CommonAllowedSettings = map[string]bool{}/* 103 elements not displayed */ PGAllowedSettings = map[string]bool{ PgPortSetting: true, PgUserSetting: true, PgHostSetting: true, PgDataSetting: true, PgPasswordSetting: true, PgPassfileSetting: true, PgDatabaseSetting: true, PgSslModeSetting: true, PgSlotName: true, PgWalSize: true, PrefetchDir: true, PgReadyRename: true, PgBackRestStanza: true, PgAliveCheckInterval: true, PgStopBackupTimeout: true, } MongoAllowedSettings = map[string]bool{ MongoDBUriSetting: true, MongoDBLastWriteUpdateInterval: true, OplogArchiveTimeoutInterval: true, OplogArchiveAfterSize: true, OplogPushStatsEnabled: true, OplogPushStatsLoggingInterval: true, OplogPushStatsUpdateInterval: true, OplogPushStatsExposeHTTP: true, OplogPushWaitForBecomePrimary: true, OplogPushPrimaryCheckInterval: true, OplogPITRDiscoveryInterval: true, StreamSplitterBlockSize: true, StreamSplitterPartitions: true, } SQLServerAllowedSettings = map[string]bool{ SQLServerBlobHostname: true, SQLServerBlobCertFile: true, SQLServerBlobKeyFile: true, SQLServerBlobLockFile: true, SQLServerConnectionString: true, SQLServerDBConcurrency: true, SQLServerReuseProxy: true, } MysqlAllowedSettings = map[string]bool{ MysqlDatasourceNameSetting: true, MysqlSslCaSetting: true, MysqlBinlogReplayCmd: true, MysqlBinlogDstSetting: true, MysqlBackupPrepareCmd: true, MysqlTakeBinlogsFromMaster: true, MysqlCheckGTIDs: true, StreamSplitterPartitions: true, StreamSplitterBlockSize: true, StreamSplitterMaxFileSize: true, MysqlBinlogServerHost: true, MysqlBinlogServerPort: true, MysqlBinlogServerUser: true, MysqlBinlogServerPassword: true, MysqlBinlogServerID: true, MysqlBinlogServerReplicaSource: true, MysqlBackupDownloadMaxRetry: true, } RedisAllowedSettings = map[string]bool{ RedisPassword: true, } GPAllowedSettings = map[string]bool{ GPLogsDirectory: true, GPSegContentID: true, GPSegmentsPollRetries: true, GPSegmentsPollInterval: true, GPSegmentsUpdInterval: true, GPSegmentStatesDir: true, GPDeleteConcurrency: true, GPAoSegSizeThreshold: true, } RequiredSettings = make(map[string]bool) HTTPSettingExposeFuncs = map[string]func(webserver.WebServer){ HTTPExposePprof: webserver.EnablePprofEndpoints, HTTPExposeExpVar: webserver.EnableExpVarEndpoints, OplogPushStatsExposeHTTP: nil, } Turbo bool )
var ( WalgMetricsPrefix = "walg_" WalgMetrics = metrics{ // contains filtered or unexported fields } )
var DeprecatedExternalGpgMessage = fmt.Sprintf( `You are using deprecated functionality that uses an external gpg library. It will be removed in next major version. Please set GPG key using environment variables %s or %s. `, PgpKeySetting, PgpKeyPathSetting)
var ErrorSizeTrackingDisabled = fmt.Errorf("size tracking disabled by DisableSizeTracking method")
var MaxExtractRetryWait = 5 * time.Minute
var MinExtractRetryWait = time.Minute
var StorageAdapters = []StorageAdapter{ {"S3_PREFIX", s3.SettingList, s3.ConfigureFolder, nil}, {"FILE_PREFIX", nil, fs.ConfigureFolder, preprocessFilePrefix}, {"GS_PREFIX", gcs.SettingList, gcs.ConfigureFolder, nil}, {"AZ_PREFIX", azure.SettingList, azure.ConfigureFolder, nil}, {"SWIFT_PREFIX", swift.SettingList, swift.ConfigureFolder, nil}, {"SSH_PREFIX", sh.SettingsList, sh.ConfigureFolder, nil}, }
var StringModifiers = []string{"FULL", "FIND_FULL"}
var StringModifiersDeleteEverything = []string{"FORCE"}
Functions ¶
func AddConfigFlags ¶
func AddTurboFlag ¶
func AssertRequiredSettingsSet ¶
func AssertRequiredSettingsSet() error
func CheckAllowedSettings ¶
CheckAllowedSettings warnings if a viper instance's setting not allowed
func CompressAndEncrypt ¶
func CompressAndEncrypt(source io.Reader, compressor compression.Compressor, crypter crypto.Crypter) io.Reader
CompressAndEncrypt compresses input to a pipe reader. Output must be used or pipe will block.
func ConfigureAndRunDefaultWebServer ¶
func ConfigureAndRunDefaultWebServer() error
ConfigureAndRunDefaultWebServer configures and runs web server
func ConfigureArchiveStatusManager ¶
func ConfigureArchiveStatusManager() (fsutil.DataFolder, error)
TODO : unit tests
func ConfigureCompressor ¶
func ConfigureCompressor() (compression.Compressor, error)
TODO : unit tests
func ConfigureCrypter ¶
ConfigureCrypter uses environment variables to create and configure a crypter. In case no configuration in environment variables found, return `<nil>` value.
func ConfigureFolderForSpecificConfig ¶
TODO: something with that when provided multiple 'keys' in the config, this function will always return only one concrete 'folder'. Chosen folder depends only on 'StorageAdapters' order
func ConfigureLogging ¶
func ConfigureLogging() error
func ConfigurePGArchiveStatusManager ¶
func ConfigurePGArchiveStatusManager() (fsutil.DataFolder, error)
func DecompressDecryptBytes ¶
func DecompressDecryptBytes(archiveReader io.Reader, decompressor compression.Decompressor) (io.ReadCloser, error)
func DecryptAndDecompressTar ¶
func DecryptAndDecompressTar(reader io.Reader, filePath string, crypter crypto.Crypter) (io.ReadCloser, error)
DecryptAndDecompressTar decrypts file and checks its extension. If it's tar, a decompression is not needed. Otherwise it uses corresponding decompressor. If none found an error will be returned.
func DefaultHandleBackupList ¶
func DeleteArgsValidator ¶
func DeleteBackups ¶
DeleteBackups purges given backups files TODO: extract BackupLayout abstraction and provide DataPath(), SentinelPath(), Exists() methods
func DeleteGarbage ¶
DeleteGarbage purges given garbage keys
func DownloadAndDecompressSplittedStream ¶
func DownloadAndDecompressSplittedStream(backup Backup, blockSize int, extension string, writeCloser io.WriteCloser, maxDownloadRetry int) error
TODO : unit tests DownloadAndDecompressSplittedStream downloads, decompresses and writes stream to stdout
func DownloadAndDecompressStorageFile ¶
func DownloadAndDecompressStorageFile(folder storage.Folder, fileName string) (io.ReadCloser, error)
TODO : unit tests
func DownloadAndDecompressStream ¶
func DownloadAndDecompressStream(backup Backup, writeCloser io.WriteCloser) error
TODO : unit tests downloadAndDecompressStream downloads, decompresses and writes stream to stdout
func DownloadFile ¶
DownloadFile downloads, decompresses and decrypts
func DownloadFileTo ¶
TODO : unit tests DownloadFileTo downloads a file and writes it to local file
func ExtractAll ¶
func ExtractAll(tarInterpreter TarInterpreter, files []ReaderMaker) error
ExtractAll Handles all files passed in. Supports `.lzo`, `.lz4`, `.lzma`, and `.tar`. File type `.nop` is used for testing purposes. Each file is extracted in its own goroutine and ExtractAll will wait for all goroutines to finish. Retries unsuccessful attempts log2(MaxConcurrency) times, dividing concurrency by two each time.
func ExtractAllWithSleeper ¶
func ExtractAllWithSleeper(tarInterpreter TarInterpreter, files []ReaderMaker, sleeper Sleeper) error
func ExtractDeleteEverythingModifierFromArgs ¶
ExtractDeleteEverythingModifierFromArgs extracts the args for the "delete everything" command
func ExtractDeleteModifierFromArgs ¶
ExtractDeleteModifierFromArgs extracts the delete modifier the "delete retain"/"delete before" commands
func ExtractDeleteRetainAfterModifierFromArgs ¶
ExtractDeleteRetainAfterModifierFromArgs extracts the args for the "delete retain --after" command
func ExtractDeleteTargetModifierFromArgs ¶
ExtractDeleteTargetModifierFromArgs extracts the args for the "delete target" command
func FatalOnUnrecoverableMetadataError ¶
func FatalOnUnrecoverableMetadataError(backupTime BackupTime, err error)
func FolderFromConfig ¶
FolderFromConfig prefers the config parameters instead of the current environment variables
func FormatTime ¶
func GetBackupToCommandFetcher ¶
GetBackupToCommandFetcher returns function that copies all bytes from backup to cmd's stdin
func GetDataFolderPath ¶
func GetDataFolderPath() string
func GetGarbageFromPrefix ¶
func GetGarbageFromPrefix(folders []storage.Folder, nonGarbage []BackupTime) []string
func GetLastDecompressor ¶
func GetLastDecompressor() (compression.Decompressor, error)
func GetLogsDstSettings ¶
TODO : unit tests GetLogsDstSettings reads from the environment variables fetch settings
func GetMaxConcurrency ¶
func GetMaxUploadConcurrency ¶
func GetPartitionedBackupFileNames ¶
func GetPartitionedBackupFileNames(backup Backup, decompressor compression.Decompressor) ([][]string, error)
func GetPermanentBackups ¶
func GetPermanentBackups(folder storage.Folder, metaFetcher GenericMetaFetcher) map[string]bool
func GetPgSlotName ¶
func GetPgSlotName() (pgSlotName string)
GetPgSlotName reads the slot name from the environment
func GetRelativeArchiveDataFolderPath ¶
func GetRelativeArchiveDataFolderPath() string
func GetRequiredSetting ¶
func GetSentinelUserData ¶
func GetSentinelUserData() (interface{}, error)
func GetSetting ¶
GetSetting extract setting by key if key is set, return empty string otherwise
func GetStreamName ¶
func HandleBackupFetch ¶
func HandleBackupFetch(folder storage.Folder, targetBackupSelector BackupSelector, fetcher func(folder storage.Folder, backup Backup))
TODO : unit tests HandleBackupFetch is invoked to perform wal-g backup-fetch
func HandleBackupList ¶
func HandleBackupList( getBackupsFunc func() ([]BackupTime, error), writeBackupListFunc func([]BackupTime), logging Logging, )
func HandleBackupMark ¶
func HandleBackupMark(uploader *Uploader, backupName string, toPermanent bool, metaInteractor GenericMetaInteractor)
func IsPermanent ¶
IsPermanent is a generic function to determine if the storage object is permanent. It does not support permanent WALs or binlogs.
func MetadataNameFromBackup ¶
func PackFileTo ¶
func PrettyFormatTime ¶
func PushMetrics ¶
func PushMetrics()
func ReadConfigFromFile ¶
ReadConfigFromFile read config to the viper instance
func SentinelNameFromBackup ¶
func SetDefaultValues ¶
SetDefaultValues set default settings to the viper instance
func SetGoMaxProcs ¶
func SetLastDecompressor ¶
func SetLastDecompressor(decompressor compression.Decompressor) error
func SortBackupTimeSlices ¶
func SortBackupTimeSlices(backupTimes []BackupTime)
func SortTimedBackup ¶
func SortTimedBackup(backups []TimedBackup)
func SplitPurgingBackups ¶
func SplitPurgingBackups(backups []TimedBackup, retainCount *int, retainAfter *time.Time) (purge, retain map[string]bool, err error)
SplitPurgingBackups partitions backups to delete and retain, if no retains policy than retain all backups
func StartReadingFile ¶
func StartReadingFile(fileInfoHeader *tar.Header, info os.FileInfo, path string) (io.ReadSeekCloser, error)
TODO : unit tests
func StreamBackupToCommandStdin ¶
StreamBackupToCommandStdin downloads and decompresses backup stream to cmd stdin.
func TryDownloadFile ¶
func UnwrapLatestModifier ¶
UnwrapLatestModifier checks if LATEST is provided instead of backupName if so, replaces it with the name of the latest backup
func UploadBackupStreamMetadata ¶
func UploadBackupStreamMetadata(uploader UploaderProvider, metadata interface{}, backupName string) error
func UploadSentinel ¶
func UploadSentinel(uploader UploaderProvider, sentinelDto interface{}, backupName string) error
TODO : unit tests
func WriteBackupList ¶
func WriteBackupList(backups []BackupTime, output io.Writer)
func WritePrettyBackupList ¶
func WritePrettyBackupList(backups []BackupTime, output io.Writer)
Types ¶
type ArchiveNonExistenceError ¶
type ArchiveNonExistenceError struct {
// contains filtered or unexported fields
}
func (ArchiveNonExistenceError) Error ¶
func (err ArchiveNonExistenceError) Error() string
type Backup ¶
type Backup struct { Name string // base backup folder or catchup backup folder Folder storage.Folder }
Backup provides basic functionality to fetch backup-related information from storage
WAL-G stores information about single backup in the following files:
Sentinel file - contains useful information, such as backup start time, backup size, etc. see FetchSentinel, UploadSentinel
Metadata file (only in Postgres) - Postgres sentinel files can be quite large (> 1GB), so the metadata file is useful for the quick fetch of backup-related information. see FetchMetadata, UploadMetadata
func GetBackupByName ¶
func (*Backup) AssureExists ¶
AssureExists is similar to CheckExistence, but returns an error in two cases: 1. Backup does not exist 2. Failed to check if backup exist
func (*Backup) CheckExistence ¶
func (*Backup) FetchMetadata ¶
TODO : unit tests
func (*Backup) FetchSentinel ¶
TODO : unit tests
func (*Backup) SentinelExists ¶
SentinelExists checks that the sentinel file of the specified backup exists.
func (*Backup) UploadMetadata ¶
func (*Backup) UploadSentinel ¶
type BackupFileDescription ¶
type BackupFileDescription struct { IsIncremented bool // should never be both incremented and Skipped IsSkipped bool MTime time.Time CorruptBlocks *CorruptBlocksInfo `json:",omitempty"` UpdatesCount uint64 }
func NewBackupFileDescription ¶
func NewBackupFileDescription(isIncremented, isSkipped bool, modTime time.Time) *BackupFileDescription
func (*BackupFileDescription) SetCorruptBlocks ¶
func (desc *BackupFileDescription) SetCorruptBlocks(corruptBlockNumbers []uint32, storeAllBlocks bool)
type BackupFileList ¶
type BackupFileList map[string]BackupFileDescription
type BackupHasPermanentBackupInFutureError ¶
type BackupHasPermanentBackupInFutureError struct {
// contains filtered or unexported fields
}
type BackupMarkHandler ¶
type BackupMarkHandler struct {
// contains filtered or unexported fields
}
func NewBackupMarkHandler ¶
func NewBackupMarkHandler(metaInteractor GenericMetaInteractor, storageRootFolder storage.Folder) BackupMarkHandler
func (*BackupMarkHandler) GetBackupsToMark ¶
func (h *BackupMarkHandler) GetBackupsToMark(backupName string, toPermanent bool) ([]string, error)
GetBackupsToMark retrieves all previous permanent or impermanent backups, including itself, any previous delta backups and initial full backup, in increasing order beginning from full backup, returning backups ready to be marked
For example, when marking backups from impermanent to permanent, we retrieve all currently impermanent backups and return them as a slice
func (*BackupMarkHandler) MarkBackup ¶
func (h *BackupMarkHandler) MarkBackup(backupName string, toPermanent bool)
MarkBackup marks a backup as permanent or impermanent
type BackupNameSelector ¶
type BackupNameSelector struct {
// contains filtered or unexported fields
}
Select backup by provided backup name
func NewBackupNameSelector ¶
func NewBackupNameSelector(backupName string, checkExistence bool) (BackupNameSelector, error)
type BackupNonExistenceError ¶
type BackupNonExistenceError struct {
// contains filtered or unexported fields
}
func NewBackupNonExistenceError ¶
func NewBackupNonExistenceError(backupName string) BackupNonExistenceError
func (BackupNonExistenceError) Error ¶
func (err BackupNonExistenceError) Error() string
type BackupObject ¶
type BackupObject interface { storage.Object GetBackupTime() time.Time GetBackupName() string // TODO: move increment info into separate struct (in backup.go) IsFullBackup() bool GetBaseBackupName() string GetIncrementFromName() string }
BackupObject represents the backup sentinel object uploaded on storage
func FindBackupObjects ¶
func FindBackupObjects(folder storage.Folder) ([]BackupObject, error)
func NewDefaultBackupObject ¶
func NewDefaultBackupObject(object storage.Object) BackupObject
type BackupSelector ¶
BackupSelector returns the name of storage backup chosen according to the internal rules. Returns NoBackupsFoundError in case there are no backups matching the criteria.
func CreateTargetDeleteBackupSelector ¶
func CreateTargetDeleteBackupSelector(cmd *cobra.Command, args []string, targetUserData string, metaFetcher GenericMetaFetcher) (BackupSelector, error)
create the BackupSelector to select the backup to delete
func NewDeltaBaseSelector ¶
func NewDeltaBaseSelector( targetBackupName, targetUserData string, metaFetcher GenericMetaFetcher) (BackupSelector, error)
NewDeltaBaseSelector returns the BackupSelector for delta backup base according to the provided flags
func NewTargetBackupSelector ¶
func NewTargetBackupSelector(targetUserData, targetName string, metaFetcher GenericMetaFetcher) (BackupSelector, error)
type BackupStreamMetadata ¶
type BackupTime ¶
type BackupTime struct { BackupName string `json:"backup_name"` Time time.Time `json:"time"` WalFileName string `json:"wal_file_name"` }
BackupTime is used to sort backups by latest modified time.
func GetBackupTimeSlices ¶
func GetBackupTimeSlices(backups []storage.Object) []BackupTime
func GetBackups ¶
func GetBackups(folder storage.Folder) (backups []BackupTime, err error)
TODO : unit tests GetBackups receives backup descriptions and sorts them by time
func GetBackupsAndGarbage ¶
func GetBackupsAndGarbage(folder storage.Folder) (backups []BackupTime, garbage []string, err error)
TODO : unit tests
type Bundle ¶
type Bundle struct { Directory string Sentinel *Sentinel TarBallComposer TarBallComposer TarBallQueue *TarBallQueue Crypter crypto.Crypter TarSizeThreshold int64 ExcludedFilenames map[string]utility.Empty FilesFilter FilesFilter }
func (*Bundle) AddToBundle ¶
func (*Bundle) FinishComposing ¶
func (bundle *Bundle) FinishComposing() (TarFileSets, error)
func (*Bundle) FinishQueue ¶
func (*Bundle) GetFileRelPath ¶
func (*Bundle) SetupComposer ¶
func (bundle *Bundle) SetupComposer(composerMaker TarBallComposerMaker) (err error)
func (*Bundle) StartQueue ¶
func (bundle *Bundle) StartQueue(tarBallMaker TarBallMaker) error
type BundleFiles ¶
type BundleFiles interface { AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo) AddFile(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool) AddFileDescription(name string, backupFileDescription BackupFileDescription) AddFileWithCorruptBlocks(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool, corruptedBlocks []uint32, storeAllBlocks bool) GetUnderlyingMap() *sync.Map }
BundleFiles represents the files in the backup that is going to be created
type CachedDecompressor ¶
type CachedDecompressor struct {
FileExtension string
}
CachedDecompressor is the file extension describing decompressor
type CommonDirectoryDownloader ¶
func (*CommonDirectoryDownloader) DownloadDirectory ¶
func (downloader *CommonDirectoryDownloader) DownloadDirectory(pathToRestore string) error
type CommonDirectoryUploader ¶
type CommonDirectoryUploader struct {
// contains filtered or unexported fields
}
func NewCommonDirectoryUploader ¶
func NewCommonDirectoryUploader( crypter crypto.Crypter, packer TarBallFilePacker, tarBallComposerMaker TarBallComposerMaker, tarSizeThreshold int64, excludedFiles map[string]utility.Empty, backupName string, uploader *Uploader) *CommonDirectoryUploader
func (*CommonDirectoryUploader) Upload ¶
func (u *CommonDirectoryUploader) Upload(path string) TarFileSets
type CommonFilesFilter ¶
type CommonFilesFilter struct{}
func (*CommonFilesFilter) ShouldUploadFile ¶
func (*CommonFilesFilter) ShouldUploadFile(path string) bool
type ComposeFileInfo ¶
type ComposeFileInfo struct { Path string FileInfo os.FileInfo WasInBase bool Header *tar.Header IsIncremented bool }
func NewComposeFileInfo ¶
type ComposeRatingEvaluator ¶
type CompressAndEncryptError ¶
type CompressAndEncryptError struct {
// contains filtered or unexported fields
}
CompressAndEncryptError is used to catch specific errors from CompressAndEncrypt when uploading to Storage. Will not retry upload if this error occurs.
func (CompressAndEncryptError) Error ¶
func (err CompressAndEncryptError) Error() string
type CorruptBlocksInfo ¶
type DefaultBackupObject ¶
func (DefaultBackupObject) GetBackupName ¶
func (o DefaultBackupObject) GetBackupName() string
func (DefaultBackupObject) GetBackupTime ¶
func (o DefaultBackupObject) GetBackupTime() time.Time
func (DefaultBackupObject) GetBaseBackupName ¶
func (o DefaultBackupObject) GetBaseBackupName() string
func (DefaultBackupObject) GetIncrementFromName ¶
func (o DefaultBackupObject) GetIncrementFromName() string
func (DefaultBackupObject) IsFullBackup ¶
func (o DefaultBackupObject) IsFullBackup() bool
type DefaultComposeRatingEvaluator ¶
type DefaultComposeRatingEvaluator struct {
// contains filtered or unexported fields
}
func NewDefaultComposeRatingEvaluator ¶
func NewDefaultComposeRatingEvaluator(incrementFromFiles BackupFileList) *DefaultComposeRatingEvaluator
type DeleteHandler ¶
func NewDeleteHandler ¶
func NewDeleteHandler( folder storage.Folder, backups []BackupObject, less func(object1, object2 storage.Object) bool, options ...DeleteHandlerOption, ) *DeleteHandler
func (*DeleteHandler) DeleteBeforeTarget ¶
func (h *DeleteHandler) DeleteBeforeTarget(target BackupObject, confirmed bool) error
func (*DeleteHandler) DeleteBeforeTargetWhere ¶
func (h *DeleteHandler) DeleteBeforeTargetWhere(target BackupObject, confirmed bool, objSelector func(object storage.Object) bool, folderFilter func(name string) bool) error
func (*DeleteHandler) DeleteEverything ¶
func (h *DeleteHandler) DeleteEverything(confirmed bool)
func (*DeleteHandler) DeleteTarget ¶
func (h *DeleteHandler) DeleteTarget(target BackupObject, confirmed, findFull bool, folderFilter func(name string) bool) error
func (*DeleteHandler) FindTargetBefore ¶
func (h *DeleteHandler) FindTargetBefore(beforeStr string, modifier int) (BackupObject, error)
TODO: unit tests
func (*DeleteHandler) FindTargetBeforeName ¶
func (h *DeleteHandler) FindTargetBeforeName(name string, modifier int) (BackupObject, error)
func (*DeleteHandler) FindTargetBeforeTime ¶
func (h *DeleteHandler) FindTargetBeforeTime(timeLine time.Time, modifier int) (BackupObject, error)
TODO: unit tests
func (*DeleteHandler) FindTargetByName ¶
func (h *DeleteHandler) FindTargetByName(bname string) (BackupObject, error)
TODO: unit tests
func (*DeleteHandler) FindTargetBySelector ¶
func (h *DeleteHandler) FindTargetBySelector(targetSelector BackupSelector) (BackupObject, error)
TODO: unit tests
func (*DeleteHandler) FindTargetRetain ¶
func (h *DeleteHandler) FindTargetRetain(retentionCount, modifier int) (BackupObject, error)
func (*DeleteHandler) FindTargetRetainAfter ¶
func (h *DeleteHandler) FindTargetRetainAfter(retentionCount int, afterStr string, modifier int) (BackupObject, error)
TODO: unit tests
func (*DeleteHandler) FindTargetRetainAfterName ¶
func (h *DeleteHandler) FindTargetRetainAfterName( retentionCount int, name string, modifier int) (BackupObject, error)
TODO: unit tests
func (*DeleteHandler) FindTargetRetainAfterTime ¶
func (h *DeleteHandler) FindTargetRetainAfterTime(retentionCount int, timeLine time.Time, modifier int, ) (BackupObject, error)
TODO: unit tests
func (*DeleteHandler) HandleDeleteBefore ¶
func (h *DeleteHandler) HandleDeleteBefore(args []string, confirmed bool)
func (*DeleteHandler) HandleDeleteEverything ¶
func (h *DeleteHandler) HandleDeleteEverything(args []string, permanentBackups map[string]bool, confirmed bool)
func (*DeleteHandler) HandleDeleteRetain ¶
func (h *DeleteHandler) HandleDeleteRetain(args []string, confirmed bool)
func (*DeleteHandler) HandleDeleteRetainAfter ¶
func (h *DeleteHandler) HandleDeleteRetainAfter(args []string, confirmed bool)
func (*DeleteHandler) HandleDeleteTarget ¶
func (h *DeleteHandler) HandleDeleteTarget(targetSelector BackupSelector, confirmed, findFull bool)
type DeleteHandlerOption ¶
type DeleteHandlerOption func(h *DeleteHandler)
func IsPermanentFunc ¶
func IsPermanentFunc(isPermanent func(storage.Object) bool) DeleteHandlerOption
type DevNullWriter ¶
type DevNullWriter struct { io.WriteCloser // contains filtered or unexported fields }
type DirectoryDownloader ¶
func NewCommonDirectoryDownloader ¶
func NewCommonDirectoryDownloader(folder storage.Folder, backupName string) DirectoryDownloader
type DirectoryIsNotEmptyError ¶
type DirectoryIsNotEmptyError struct {
// contains filtered or unexported fields
}
func NewDirectoryIsNotEmptyError ¶
func NewDirectoryIsNotEmptyError(path string) DirectoryIsNotEmptyError
type DirectoryUploader ¶
type DirectoryUploader interface {
Upload(path string) TarFileSets
}
type DtoSerializer ¶
type DtoSerializerType ¶
type DtoSerializerType string
const ( RegularJSONSerializer DtoSerializerType = "json_default" StreamedJSONSerializer DtoSerializerType = "json_streamed" )
type ErrorLogger ¶
type ErrorLogger interface {
FatalOnError(err error)
}
type ExponentialSleeper ¶
type ExponentialSleeper struct {
// contains filtered or unexported fields
}
func NewExponentialSleeper ¶
func NewExponentialSleeper(startSleepDuration, sleepDurationBound time.Duration) *ExponentialSleeper
func (*ExponentialSleeper) Sleep ¶
func (sleeper *ExponentialSleeper) Sleep()
type FileNotExistError ¶
type FileNotExistError struct {
// contains filtered or unexported fields
}
func NewFileNotExistError ¶
func NewFileNotExistError(path string) FileNotExistError
func (FileNotExistError) Error ¶
func (err FileNotExistError) Error() string
type FileTarInterpreter ¶
type FileTarInterpreter struct { DirectoryToSave string // contains filtered or unexported fields }
type FilesFilter ¶
type GenericMetaFetcher ¶
type GenericMetaFetcher interface {
Fetch(backupName string, backupFolder storage.Folder) (GenericMetadata, error)
}
type GenericMetaInteractor ¶
type GenericMetaInteractor interface { GenericMetaFetcher GenericMetaSetter }
GenericMetaInteractor is a combination of GenericMetaFetcher and GenericMetaSetter. It can be useful when need both.
type GenericMetaSetter ¶
type GenericMetadata ¶
type GenericMetadata struct { BackupName string UncompressedSize int64 CompressedSize int64 Hostname string StartTime time.Time FinishTime time.Time IsPermanent bool IsIncremental bool // need to use separate fetcher // to avoid useless sentinel load (in Postgres) IncrementDetails IncrementDetailsFetcher UserData interface{} }
GenericMetadata allows to obtain some basic information about existing backup in storage. It is useful when creating a functionality that is common to all databases, for example backup-list or backup-mark.
To support the GenericMetadata in some particular database, one should write its own GenericMetaFetcher and GenericMetaSetter.
type IncrementDetails ¶
IncrementDetails is useful to fetch information about dependencies of some incremental backup
type IncrementDetailsFetcher ¶
type IncrementDetailsFetcher interface {
Fetch() (isIncremental bool, details IncrementDetails, err error)
}
type InfoLogger ¶
type InfoLogger interface {
Println(v ...interface{})
}
type InvalidConcurrencyValueError ¶
type InvalidConcurrencyValueError struct {
// contains filtered or unexported fields
}
func (InvalidConcurrencyValueError) Error ¶
func (err InvalidConcurrencyValueError) Error() string
type LatestBackupSelector ¶
type LatestBackupSelector struct { }
Select the latest backup from storage
func NewLatestBackupSelector ¶
func NewLatestBackupSelector() LatestBackupSelector
type LazyCache ¶
type LazyCache[K comparable, V any] struct { // contains filtered or unexported fields }
func NewLazyCache ¶
func NewLazyCache[K comparable, V any](load func(key K) (value V, err error)) *LazyCache[K, V]
func (*LazyCache[K, V]) LoadExisting ¶
type LimitedFolder ¶
func NewLimitedFolder ¶
func NewLimitedFolder(folder storage.Folder, limiter *rate.Limiter) *LimitedFolder
func (*LimitedFolder) GetSubFolder ¶
func (lf *LimitedFolder) GetSubFolder(subFolderRelativePath string) storage.Folder
func (*LimitedFolder) PutObject ¶
func (lf *LimitedFolder) PutObject(name string, content io.Reader) error
func (*LimitedFolder) ReadObject ¶
func (lf *LimitedFolder) ReadObject(objectRelativePath string) (io.ReadCloser, error)
type Logging ¶
type Logging struct { InfoLogger InfoLogger ErrorLogger ErrorLogger }
type MetaConstructor ¶
type MetaConstructor interface { Init() error Finalize(backupName string) error MetaInfo() interface{} }
MetaConstructor - interface that helps with building meta-info about backup and generate MetaInfo see MongoMetaConstructor see RedisMetaConstructor
type NOPTarBall ¶
type NOPTarBall struct {
// contains filtered or unexported fields
}
NOPTarBall mocks a tarball. Used for prefault logic.
func (*NOPTarBall) AddSize ¶
func (tarBall *NOPTarBall) AddSize(i int64)
func (*NOPTarBall) AwaitUploads ¶
func (tarBall *NOPTarBall) AwaitUploads()
func (*NOPTarBall) CloseTar ¶
func (tarBall *NOPTarBall) CloseTar() error
func (*NOPTarBall) Name ¶
func (tarBall *NOPTarBall) Name() string
func (*NOPTarBall) SetUp ¶
func (tarBall *NOPTarBall) SetUp(crypter crypto.Crypter, params ...string)
func (*NOPTarBall) Size ¶
func (tarBall *NOPTarBall) Size() int64
func (*NOPTarBall) TarWriter ¶
func (tarBall *NOPTarBall) TarWriter() *tar.Writer
type NOPTarBallMaker ¶
type NOPTarBallMaker struct {
// contains filtered or unexported fields
}
NOPTarBallMaker creates a new NOPTarBall. Used for testing purposes.
func (*NOPTarBallMaker) Make ¶
func (tarBallMaker *NOPTarBallMaker) Make(inheritState bool) TarBall
Make creates a new NOPTarBall.
type NoBackupsFoundError ¶
type NoBackupsFoundError struct {
// contains filtered or unexported fields
}
func NewNoBackupsFoundError ¶
func NewNoBackupsFoundError() NoBackupsFoundError
func (NoBackupsFoundError) Error ¶
func (err NoBackupsFoundError) Error() string
type NoFilesToExtractError ¶
type NoFilesToExtractError struct {
// contains filtered or unexported fields
}
func (NoFilesToExtractError) Error ¶
func (err NoFilesToExtractError) Error() string
type NopBundleFiles ¶
type NopBundleFiles struct { }
func (*NopBundleFiles) AddFileDescription ¶
func (files *NopBundleFiles) AddFileDescription(name string, backupFileDescription BackupFileDescription)
func (*NopBundleFiles) AddFileWithCorruptBlocks ¶
func (*NopBundleFiles) AddSkippedFile ¶
func (files *NopBundleFiles) AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo)
func (*NopBundleFiles) GetUnderlyingMap ¶
func (files *NopBundleFiles) GetUnderlyingMap() *sync.Map
type NopIncrementDetailsFetcher ¶
type NopIncrementDetailsFetcher struct{}
NopIncrementDetailsFetcher is useful for databases without incremental backup support
func (*NopIncrementDetailsFetcher) Fetch ¶
func (idf *NopIncrementDetailsFetcher) Fetch() (bool, IncrementDetails, error)
type NopTarFileSets ¶
type NopTarFileSets struct { }
func NewNopTarFileSets ¶
func NewNopTarFileSets() *NopTarFileSets
func (*NopTarFileSets) AddFile ¶
func (tarFileSets *NopTarFileSets) AddFile(name string, file string)
func (*NopTarFileSets) AddFiles ¶
func (tarFileSets *NopTarFileSets) AddFiles(name string, files []string)
func (*NopTarFileSets) Get ¶
func (tarFileSets *NopTarFileSets) Get() map[string][]string
type OldestNonPermanentSelector ¶
type OldestNonPermanentSelector struct {
// contains filtered or unexported fields
}
OldestNonPermanentSelector finds oldest non-permanent backup available in storage.
func NewOldestNonPermanentSelector ¶
func NewOldestNonPermanentSelector(metaFetcher GenericMetaFetcher) *OldestNonPermanentSelector
type ProfileStopper ¶
type ProfileStopper interface {
Stop()
}
func Profile ¶
func Profile() (ProfileStopper, error)
type ReaderMaker ¶
type ReaderMaker interface { Reader() (io.ReadCloser, error) StoragePath() string LocalPath() string FileType() FileType Mode() int64 }
ReaderMaker is the generic interface used by extract. It allows for ease of handling different file formats.
type RegularBundleFiles ¶
func (*RegularBundleFiles) AddFileDescription ¶
func (files *RegularBundleFiles) AddFileDescription(name string, backupFileDescription BackupFileDescription)
func (*RegularBundleFiles) AddFileWithCorruptBlocks ¶
func (*RegularBundleFiles) AddSkippedFile ¶
func (files *RegularBundleFiles) AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo)
func (*RegularBundleFiles) GetUnderlyingMap ¶
func (files *RegularBundleFiles) GetUnderlyingMap() *sync.Map
type RegularJSON ¶
type RegularJSON struct{}
type RegularTarBallComposer ¶
type RegularTarBallComposer struct {
// contains filtered or unexported fields
}
func NewRegularTarBallComposer ¶
func NewRegularTarBallComposer( tarBallQueue *TarBallQueue, tarBallFilePacker TarBallFilePacker, files BundleFiles, tarFileSets TarFileSets, crypter crypto.Crypter, ) *RegularTarBallComposer
func (*RegularTarBallComposer) AddFile ¶
func (c *RegularTarBallComposer) AddFile(info *ComposeFileInfo)
func (*RegularTarBallComposer) FinishComposing ¶
func (c *RegularTarBallComposer) FinishComposing() (TarFileSets, error)
func (*RegularTarBallComposer) GetFiles ¶
func (c *RegularTarBallComposer) GetFiles() BundleFiles
type RegularTarBallComposerMaker ¶
type RegularTarBallComposerMaker struct {
// contains filtered or unexported fields
}
func NewRegularTarBallComposerMaker ¶
func NewRegularTarBallComposerMaker(files BundleFiles, tarFileSets TarFileSets) *RegularTarBallComposerMaker
func (*RegularTarBallComposerMaker) Make ¶
func (maker *RegularTarBallComposerMaker) Make(bundle *Bundle) (TarBallComposer, error)
type RegularTarBallFilePacker ¶
type RegularTarBallFilePacker struct {
// contains filtered or unexported fields
}
func NewRegularTarBallFilePacker ¶
func NewRegularTarBallFilePacker(files BundleFiles) *RegularTarBallFilePacker
func (*RegularTarBallFilePacker) PackFileIntoTar ¶
func (p *RegularTarBallFilePacker) PackFileIntoTar(cfi *ComposeFileInfo, tarBall TarBall) error
type RegularTarFileSets ¶
func NewRegularTarFileSets ¶
func NewRegularTarFileSets() *RegularTarFileSets
func (*RegularTarFileSets) AddFile ¶
func (tarFileSets *RegularTarFileSets) AddFile(name string, file string)
func (*RegularTarFileSets) AddFiles ¶
func (tarFileSets *RegularTarFileSets) AddFiles(name string, files []string)
func (*RegularTarFileSets) Get ¶
func (tarFileSets *RegularTarFileSets) Get() map[string][]string
type SentinelMarshallingError ¶
type SentinelMarshallingError struct {
// contains filtered or unexported fields
}
region errors
func NewSentinelMarshallingError ¶
func NewSentinelMarshallingError(sentinelName string, err error) SentinelMarshallingError
func (SentinelMarshallingError) Error ¶
func (err SentinelMarshallingError) Error() string
type SplitStreamUploader ¶
type SplitStreamUploader struct { *Uploader // contains filtered or unexported fields }
SplitStreamUploader - new UploaderProvider implementation that enable us to split upload streams into blocks
of blockSize bytes, then puts it in at most `partitions` streams that are compressed and pushed to storage
func (*SplitStreamUploader) Clone ¶
func (uploader *SplitStreamUploader) Clone() *SplitStreamUploader
func (*SplitStreamUploader) PushStream ¶
func (uploader *SplitStreamUploader) PushStream(stream io.Reader) (string, error)
TODO : unit tests returns backup_prefix (Note: individual parition names are built by adding '_0000.br' or '_0000_0000.br' suffix)
type StorageAdapter ¶
type StorageAdapter struct {
// contains filtered or unexported fields
}
type StorageReaderMaker ¶
type StorageReaderMaker struct { Folder storage.Folder StorageFileType FileType FileMode int64 // contains filtered or unexported fields }
StorageReaderMaker creates readers for downloading from storage
func NewRegularFileStorageReaderMarker ¶
func NewRegularFileStorageReaderMarker(folder storage.Folder, storagePath, localPath string, fileMode int64) *StorageReaderMaker
func NewStorageReaderMaker ¶
func NewStorageReaderMaker(folder storage.Folder, relativePath string) *StorageReaderMaker
func (*StorageReaderMaker) FileType ¶
func (readerMaker *StorageReaderMaker) FileType() FileType
func (*StorageReaderMaker) LocalPath ¶
func (readerMaker *StorageReaderMaker) LocalPath() string
func (*StorageReaderMaker) Mode ¶
func (readerMaker *StorageReaderMaker) Mode() int64
func (*StorageReaderMaker) Reader ¶
func (readerMaker *StorageReaderMaker) Reader() (io.ReadCloser, error)
func (*StorageReaderMaker) StoragePath ¶
func (readerMaker *StorageReaderMaker) StoragePath() string
type StorageTarBall ¶
type StorageTarBall struct {
// contains filtered or unexported fields
}
StorageTarBall represents a tar file that is going to be uploaded to storage.
func (*StorageTarBall) AddSize ¶
func (tarBall *StorageTarBall) AddSize(i int64)
AddSize to total Size
func (*StorageTarBall) AwaitUploads ¶
func (tarBall *StorageTarBall) AwaitUploads()
func (*StorageTarBall) CloseTar ¶
func (tarBall *StorageTarBall) CloseTar() error
CloseTar closes the tar writer, flushing any unwritten data to the underlying writer before also closing the underlying writer.
func (*StorageTarBall) Name ¶
func (tarBall *StorageTarBall) Name() string
func (*StorageTarBall) SetUp ¶
func (tarBall *StorageTarBall) SetUp(crypter crypto.Crypter, names ...string)
SetUp creates a new tar writer and starts upload to storage. Upload will block until the tar file is finished writing. If a name for the file is not given, default name is of the form `part_....tar.[Compressor file extension]`.
func (*StorageTarBall) Size ¶
func (tarBall *StorageTarBall) Size() int64
Size accumulated in this tarball
func (*StorageTarBall) TarWriter ¶
func (tarBall *StorageTarBall) TarWriter() *tar.Writer
type StorageTarBallMaker ¶
type StorageTarBallMaker struct {
// contains filtered or unexported fields
}
StorageTarBallMaker creates tarballs that are uploaded to storage.
func NewStorageTarBallMaker ¶
func NewStorageTarBallMaker(backupName string, uploader *Uploader) *StorageTarBallMaker
func (*StorageTarBallMaker) Make ¶
func (tarBallMaker *StorageTarBallMaker) Make(dedicatedUploader bool) TarBall
Make returns a tarball with required storage fields.
type StreamFetcher ¶
type StreamFetcher = func(backup Backup, writeCloser io.WriteCloser) error
func GetBackupStreamFetcher ¶
func GetBackupStreamFetcher(backup Backup) (StreamFetcher, error)
type StreamedJSON ¶
type StreamedJSON struct{}
type TarBall ¶
type TarBall interface { SetUp(crypter crypto.Crypter, args ...string) CloseTar() error Size() int64 AddSize(int64) TarWriter() *tar.Writer AwaitUploads() Name() string }
A TarBall represents one tar file.
type TarBallComposer ¶
type TarBallComposer interface { AddFile(info *ComposeFileInfo) AddHeader(header *tar.Header, fileInfo os.FileInfo) error SkipFile(tarHeader *tar.Header, fileInfo os.FileInfo) FinishComposing() (TarFileSets, error) GetFiles() BundleFiles }
type TarBallComposerMaker ¶
type TarBallComposerMaker interface {
Make(bundle *Bundle) (TarBallComposer, error)
}
TarBallComposerMaker is used to make an instance of TarBallComposer
type TarBallFilePacker ¶
type TarBallFilePacker interface {
PackFileIntoTar(cfi *ComposeFileInfo, tarBall TarBall) error
}
type TarBallMaker ¶
TarBallMaker is used to allow for flexible creation of different TarBalls.
func NewNopTarBallMaker ¶
func NewNopTarBallMaker() TarBallMaker
type TarBallQueue ¶
type TarBallQueue struct { TarSizeThreshold int64 AllTarballsSize *int64 TarBallMaker TarBallMaker LastCreatedTarball TarBall // contains filtered or unexported fields }
TarBallQueue is used to process multiple tarballs concurrently
func NewTarBallQueue ¶
func NewTarBallQueue(tarSizeThreshold int64, tarBallMaker TarBallMaker) *TarBallQueue
func (*TarBallQueue) CheckSizeAndEnqueueBack ¶
func (tarQueue *TarBallQueue) CheckSizeAndEnqueueBack(tarBall TarBall) error
func (*TarBallQueue) CloseTarball ¶
func (tarQueue *TarBallQueue) CloseTarball(tarBall TarBall) error
func (*TarBallQueue) Deque ¶
func (tarQueue *TarBallQueue) Deque() TarBall
func (*TarBallQueue) DequeCtx ¶
func (tarQueue *TarBallQueue) DequeCtx(ctx context.Context) (TarBall, error)
DequeCtx returns a TarBall from the queue. If the context finishes before it can do so, it returns the result of ctx.Err().
func (*TarBallQueue) EnqueueBack ¶
func (tarQueue *TarBallQueue) EnqueueBack(tarBall TarBall)
func (*TarBallQueue) FinishQueue ¶
func (tarQueue *TarBallQueue) FinishQueue() error
func (*TarBallQueue) FinishTarBall ¶
func (tarQueue *TarBallQueue) FinishTarBall(tarBall TarBall) error
func (*TarBallQueue) NewTarBall ¶
func (tarQueue *TarBallQueue) NewTarBall(dedicatedUploader bool) TarBall
NewTarBall starts writing new tarball
func (*TarBallQueue) StartQueue ¶
func (tarQueue *TarBallQueue) StartQueue() error
type TarFileSets ¶
type TarInterpreter ¶
TarInterpreter behaves differently for different file types.
func NewFileTarInterpreter ¶
func NewFileTarInterpreter(directoryToSave string) TarInterpreter
type TarSizeError ¶
type TarSizeError struct {
// contains filtered or unexported fields
}
type TimedBackup ¶
type UnconfiguredStorageError ¶
type UnconfiguredStorageError struct {
// contains filtered or unexported fields
}
func (UnconfiguredStorageError) Error ¶
func (err UnconfiguredStorageError) Error() string
type UnknownCompressionMethodError ¶
type UnknownCompressionMethodError struct {
// contains filtered or unexported fields
}
func (UnknownCompressionMethodError) Error ¶
func (err UnknownCompressionMethodError) Error() string
type UnknownSerializerTypeError ¶
type UnknownSerializerTypeError struct {
// contains filtered or unexported fields
}
func NewUnknownSerializerTypeError ¶
func NewUnknownSerializerTypeError(serializerType DtoSerializerType) UnknownSerializerTypeError
func (UnknownSerializerTypeError) Error ¶
func (err UnknownSerializerTypeError) Error() string
type UnmarshallingError ¶
type UnmarshallingError struct {
// contains filtered or unexported fields
}
func (UnmarshallingError) Error ¶
func (err UnmarshallingError) Error() string
type UnsetRequiredSettingError ¶
type UnsetRequiredSettingError struct {
// contains filtered or unexported fields
}
func NewUnsetRequiredSettingError ¶
func NewUnsetRequiredSettingError(settingName string) UnsetRequiredSettingError
func (UnsetRequiredSettingError) Error ¶
func (err UnsetRequiredSettingError) Error() string
type UnsupportedFileTypeError ¶
type UnsupportedFileTypeError struct {
// contains filtered or unexported fields
}
UnsupportedFileTypeError is used to signal file types that are unsupported by WAL-G.
func (UnsupportedFileTypeError) Error ¶
func (err UnsupportedFileTypeError) Error() string
type Uploader ¶
type Uploader struct { UploadingFolder storage.Folder Compressor compression.Compressor ArchiveStatusManager asm.ArchiveStatusManager PGArchiveStatusManager asm.ArchiveStatusManager Failed atomic.Value // contains filtered or unexported fields }
Uploader contains fields associated with uploading tarballs. Multiple tarballs can share one uploader.
func ConfigureUploader ¶
ConfigureUploader connects to storage and creates an uploader. It makes sure that a valid session has started; if invalid, returns AWS error and `<nil>` values.
func NewUploader ¶
func NewUploader( compressor compression.Compressor, uploadingLocation storage.Folder, ) *Uploader
FIXME: return UploaderProvider
func (*Uploader) ChangeDirectory ¶
func (*Uploader) Compression ¶
func (uploader *Uploader) Compression() compression.Compressor
Compression returns configured compressor
func (*Uploader) DisableSizeTracking ¶
func (uploader *Uploader) DisableSizeTracking()
DisableSizeTracking stops bandwidth tracking
func (*Uploader) Finish ¶
func (uploader *Uploader) Finish()
Finish waits for all waiting parts to be uploaded. If an error occurs, prints alert to stderr.
func (*Uploader) PushStream ¶
TODO : unit tests PushStream compresses a stream and push it
func (*Uploader) PushStreamToDestination ¶
TODO : unit tests PushStreamToDestination compresses a stream and push it to specifyed destination
func (*Uploader) RawDataSize ¶
RawDataSize returns 0 and error when SizeTracking disabled (see DisableSizeTracking)
func (*Uploader) UploadFile ¶
func (uploader *Uploader) UploadFile(file ioextensions.NamedReader) error
TODO : unit tests UploadFile compresses a file and uploads it.
func (*Uploader) UploadMultiple ¶
func (uploader *Uploader) UploadMultiple(objects []UploadObject) error
UploadMultiple uploads multiple objects from the start of the slice, returning the first error if any. Note that this operation is not atomic TODO : unit tests
func (*Uploader) UploadedDataSize ¶
UploadedDataSize returns 0 and error when SizeTracking disabled (see DisableSizeTracking)
type UploaderProvider ¶
type UploaderProvider interface { Upload(path string, content io.Reader) error UploadFile(file ioextensions.NamedReader) error PushStream(stream io.Reader) (string, error) PushStreamToDestination(stream io.Reader, dstPath string) error Compression() compression.Compressor DisableSizeTracking() UploadedDataSize() (int64, error) RawDataSize() (int64, error) ChangeDirectory(relativePath string) Folder() storage.Folder }
func ConfigureSplitUploader ¶
func ConfigureSplitUploader() (UploaderProvider, error)
func NewSplitStreamUploader ¶
func NewSplitStreamUploader( uploader *Uploader, partitions int, blockSize int, maxFileSize int, ) UploaderProvider
type UserDataBackupSelector ¶
type UserDataBackupSelector struct {
// contains filtered or unexported fields
}
Select backup which has the provided user data
func NewUserDataBackupSelector ¶
func NewUserDataBackupSelector(userDataRaw string, metaFetcher GenericMetaFetcher) (UserDataBackupSelector, error)
type WrongTypeError ¶
type WrongTypeError struct {
// contains filtered or unexported fields
}
func NewWrongTypeError ¶
func NewWrongTypeError(desiredType string) WrongTypeError
func (WrongTypeError) Error ¶
func (err WrongTypeError) Error() string
Source Files ¶
- backup.go
- backup_fetch_handler.go
- backup_file_description.go
- backup_list_handler.go
- backup_list_time_format.go
- backup_mark.go
- backup_mark_handler.go
- backup_object.go
- backup_selector.go
- backup_time.go
- backup_util.go
- bundle.go
- bundle_files.go
- compose_rating_evaluator.go
- compress_and_encrypt.go
- config.go
- configure.go
- configure_crypter.go
- delete_handler.go
- delete_util.go
- directory_downloader.go
- directory_uploader.go
- exponential_sleeper.go
- extract.go
- fetch_helper.go
- file_tar_interpreter.go
- files_filter.go
- generic_metadata.go
- lazy_cache.go
- limited_folder.go
- metrics.go
- nop_tarball.go
- profile.go
- reader_maker.go
- regular_tar_ball_composer.go
- sentinel.go
- serializer.go
- sleeper.go
- storage_adapter.go
- storage_reader_maker.go
- storage_tar_ball.go
- storage_tar_ball_maker.go
- stream_fetch_helper.go
- stream_metadata.go
- stream_push_helper.go
- tar_ball.go
- tar_ball_composer.go
- tar_ball_file_packer.go
- tar_ball_maker.go
- tar_ball_queue.go
- tar_file_sets.go
- uploader.go
Directories ¶
Path | Synopsis |
---|---|
Package abool provides atomic Boolean type for cleaner code and better performance.
|
Package abool provides atomic Boolean type for cleaner code and better performance. |
databases
|
|
testtools
Package mock_internal is a generated GoMock package.
|
Package mock_internal is a generated GoMock package. |