Documentation ¶
Index ¶
- Constants
- Variables
- func AddConfigFlags(Cmd *cobra.Command)
- func AssertRequiredSettingsSet() error
- func CheckAllowedSettings(config *viper.Viper)
- func CompressAndEncrypt(source io.Reader, compressor compression.Compressor, crypter crypto.Crypter) io.Reader
- func Configure()
- func ConfigureAndRunDefaultWebServer() error
- func ConfigureArchiveStatusManager() (fsutil.DataFolder, error)
- func ConfigureCompressor() (compression.Compressor, error)
- func ConfigureCrypter() crypto.Crypter
- func ConfigureFolder() (storage.Folder, error)
- func ConfigureFolderForSpecificConfig(config *viper.Viper) (storage.Folder, error)
- func ConfigureLogging() error
- func ConfigureSettings(currentType string)
- func DecompressDecryptBytes(dst io.Writer, archiveReader io.ReadCloser, ...) error
- func DecryptAndDecompressTar(writer io.Writer, readerMaker ReaderMaker, crypter crypto.Crypter) error
- func DefaultHandleBackupList(folder storage.Folder, pretty, json bool)
- func DeleteBeforeArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteEverythingArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteRetainAfterArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteRetainArgsValidator(cmd *cobra.Command, args []string) error
- func DeleteTargetArgsValidator(cmd *cobra.Command, args []string) error
- func DownloadAndDecompressStorageFile(folder storage.Folder, fileName string) (io.ReadCloser, error)
- func DownloadFile(folder storage.Folder, filename, ext string, writeCloser io.WriteCloser) error
- func DownloadFileTo(folder storage.Folder, fileName string, dstPath string) error
- func ExtractAll(tarInterpreter TarInterpreter, files []ReaderMaker) error
- func ExtractDeleteEverythingModifierFromArgs(args []string) int
- func ExtractDeleteTargetModifierFromArgs(args []string) int
- func FileIsPiped(stream *os.File) bool
- func FolderFromConfig(configFile string) (storage.Folder, error)
- func FolderSize(folder storage.Folder, path string) (int64, error)
- func GetBackupSentinelObjects(folder storage.Folder) ([]storage.Object, error)
- func GetBoolSetting(setting string) (val bool, ok bool, err error)
- func GetBoolSettingDefault(setting string, def bool) (bool, error)
- func GetCommandSetting(variableName string) (*exec.Cmd, error)
- func GetCommandSettingContext(ctx context.Context, variableName string) (*exec.Cmd, error)
- func GetCommandStreamFetcher(cmd *exec.Cmd) func(folder storage.Folder, backup Backup)
- func GetDataFolderPath() string
- func GetDurationSetting(setting string) (time.Duration, error)
- func GetLastDecompressor() (compression.Decompressor, error)
- func GetLatestBackupName(folder storage.Folder) (string, error)
- func GetLogsDstSettings(operationLogsDstEnvVariable string) (dstFolder string, err error)
- func GetMaxConcurrency(concurrencyType string) (int, error)
- func GetMaxDownloadConcurrency() (int, error)
- func GetMaxUploadConcurrency() (int, error)
- func GetMaxUploadDiskConcurrency() (int, error)
- func GetOplogArchiveAfterSize() (int, error)
- func GetOplogPITRDiscoveryIntervalSetting() (*time.Duration, error)
- func GetPermanentBackups(folder storage.Folder, metaFetcher GenericMetaFetcher) map[string]bool
- func GetPgSlotName() (pgSlotName string)
- func GetRelativeArchiveDataFolderPath() string
- func GetRequiredSetting(setting string) (string, error)
- func GetSentinelUserData() interface{}
- func GetSetting(key string) (value string, ok bool)
- func GetStreamName(backupName string, extension string) string
- func HandleBackupFetch(folder storage.Folder, targetBackupSelector BackupSelector, ...)
- func HandleBackupList(getBackupsFunc func() ([]BackupTime, error), ...)
- func HandleBackupMark(uploader *Uploader, backupName string, toPermanent bool, ...)
- func InitConfig()
- func PackFileTo(tarBall TarBall, fileInfoHeader *tar.Header, fileContent io.Reader) (fileSize int64, err error)
- func ParseTS(endTSEnvVar string) (endTS *time.Time, err error)
- func ReadConfigFromFile(config *viper.Viper, configFile string)
- func SentinelNameFromBackup(backupName string) string
- func SetDefaultValues(config *viper.Viper)
- func SetLastDecompressor(decompressor compression.Decompressor) error
- func StreamBackupToCommandStdin(cmd *exec.Cmd, backup Backup) error
- func TryDownloadFile(folder storage.Folder, path string) (walFileReader io.ReadCloser, exists bool, err error)
- func UnmarshalSentinelUserData(userDataStr string) interface{}
- func UnwrapLatestModifier(backupName string, folder storage.Folder) (string, error)
- func UploadSentinel(uploader UploaderProvider, sentinelDto interface{}, backupName string) error
- func WriteAsJSON(data interface{}, output io.Writer, pretty bool) error
- func WriteBackupList(backups []BackupTime, output io.Writer)
- func WritePrettyBackupList(backups []BackupTime, output io.Writer)
- type ArchiveNonExistenceError
- type Backup
- func (backup *Backup) AssureExists() error
- func (backup *Backup) CheckExistence() (bool, error)
- func (backup *Backup) FetchMetadata(metadataDto interface{}) error
- func (backup *Backup) FetchSentinel(sentinelDto interface{}) error
- func (backup *Backup) SentinelExists() (bool, error)
- func (backup *Backup) UploadMetadata(metadataDto interface{}) error
- func (backup *Backup) UploadSentinel(sentinelDto interface{}) error
- type BackupFileDescription
- type BackupFileList
- type BackupHasPermanentBackupInFutureError
- type BackupMarkHandler
- type BackupNameSelector
- type BackupNonExistenceError
- type BackupObject
- type BackupSelector
- type BackupTime
- type CachedDecompressor
- type ComposeRatingEvaluator
- type CompressAndEncryptError
- type CorruptBlocksInfo
- type DecompressionError
- type DefaultBackupObject
- type DefaultComposeRatingEvaluator
- type DeleteHandler
- func (h *DeleteHandler) DeleteBeforeTarget(target BackupObject, confirmed bool) error
- func (h *DeleteHandler) DeleteEverything(confirmed bool)
- func (h *DeleteHandler) DeleteTargets(targets []BackupObject, confirmed bool) error
- func (h *DeleteHandler) FindTargetBeforeName(name string, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetBeforeTime(timeLine time.Time, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetByName(bname string) (BackupObject, error)
- func (h *DeleteHandler) FindTargetRetain(retentionCount, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetRetainAfterName(retentionCount int, name string, modifier int) (BackupObject, error)
- func (h *DeleteHandler) FindTargetRetainAfterTime(retentionCount int, timeLine time.Time, modifier int) (BackupObject, error)
- func (h *DeleteHandler) HandleDeleteBefore(args []string, confirmed bool)
- func (h *DeleteHandler) HandleDeleteEverything(args []string, permanentBackups map[string]bool, confirmed bool)
- func (h *DeleteHandler) HandleDeleteRetain(args []string, confirmed bool)
- func (h *DeleteHandler) HandleDeleteRetainAfter(args []string, confirmed bool)
- func (h *DeleteHandler) HandleDeleteTarget(targetSelector BackupSelector, confirmed, findFull bool)
- type DeleteHandlerOption
- type EmptyWriteIgnorer
- type ErrWaiter
- type ErrorLogger
- type ExponentialRetrier
- type GenericMetaFetcher
- type GenericMetaInteractor
- type GenericMetaSetter
- type GenericMetadata
- type IncrementDetails
- type IncrementDetailsFetcher
- type InfoLogger
- type InvalidConcurrencyValueError
- type LatestBackupSelector
- type LazyCache
- func (lazyCache *LazyCache) Load(key interface{}) (value interface{}, exists bool, err error)
- func (lazyCache *LazyCache) LoadExisting(key interface{}) (value interface{}, exists bool)
- func (lazyCache *LazyCache) Range(reduce func(key, value interface{}) bool)
- func (lazyCache *LazyCache) Store(key, value interface{})
- type Logging
- type MetaConstructor
- type NOPTarBall
- func (tarBall *NOPTarBall) AddSize(i int64)
- func (tarBall *NOPTarBall) AwaitUploads()
- func (tarBall *NOPTarBall) CloseTar() error
- func (tarBall *NOPTarBall) Name() string
- func (tarBall *NOPTarBall) SetUp(crypter crypto.Crypter, params ...string)
- func (tarBall *NOPTarBall) Size() int64
- func (tarBall *NOPTarBall) TarWriter() *tar.Writer
- type NOPTarBallMaker
- type NoBackupsFoundError
- type NoFilesToExtractError
- type NopIncrementDetailsFetcher
- type ReaderMaker
- type Sentinel
- type SentinelMarshallingError
- type StorageAdapter
- type StorageReaderMaker
- type StorageTarBall
- func (tarBall *StorageTarBall) AddSize(i int64)
- func (tarBall *StorageTarBall) AwaitUploads()
- func (tarBall *StorageTarBall) CloseTar() error
- func (tarBall *StorageTarBall) Name() string
- func (tarBall *StorageTarBall) SetUp(crypter crypto.Crypter, names ...string)
- func (tarBall *StorageTarBall) Size() int64
- func (tarBall *StorageTarBall) TarWriter() *tar.Writer
- type StorageTarBallMaker
- type TarBall
- type TarBallMaker
- type TarBallQueue
- func (tarQueue *TarBallQueue) CheckSizeAndEnqueueBack(tarBall TarBall) error
- func (tarQueue *TarBallQueue) CloseTarball(tarBall TarBall) error
- func (tarQueue *TarBallQueue) Deque() TarBall
- func (tarQueue *TarBallQueue) DequeCtx(ctx context.Context) (TarBall, error)
- func (tarQueue *TarBallQueue) EnqueueBack(tarBall TarBall)
- func (tarQueue *TarBallQueue) FinishQueue() error
- func (tarQueue *TarBallQueue) FinishTarBall(tarBall TarBall) error
- func (tarQueue *TarBallQueue) NewTarBall(dedicatedUploader bool) TarBall
- func (tarQueue *TarBallQueue) StartQueue() error
- type TarInterpreter
- type UnconfiguredStorageError
- type UnknownCompressionMethodError
- type UnmarshallingError
- type UnsetRequiredSettingError
- type UnsupportedFileTypeError
- type UploadObject
- type Uploader
- func (uploader *Uploader) Clone() *Uploader
- func (uploader *Uploader) Compression() compression.Compressor
- func (uploader *Uploader) DisableSizeTracking()
- func (uploader *Uploader) Finish()
- func (uploader *Uploader) PushStream(stream io.Reader) (string, error)
- func (uploader *Uploader) PushStreamToDestination(stream io.Reader, dstPath string) error
- func (uploader *Uploader) RawDataSize() (int64, error)
- func (uploader *Uploader) Upload(path string, content io.Reader) error
- func (uploader *Uploader) UploadFile(file ioextensions.NamedReader) error
- func (uploader *Uploader) UploadMultiple(objects []UploadObject) error
- func (uploader *Uploader) UploadedDataSize() (int64, error)
- type UploaderProvider
- type UserDataBackupSelector
- type WithSizeReader
- type WrongTypeError
Constants ¶
const ( PG = "PG" SQLSERVER = "SQLSERVER" MYSQL = "MYSQL" REDIS = "REDIS" FDB = "FDB" MONGO = "MONGO" DownloadConcurrencySetting = "WALG_DOWNLOAD_CONCURRENCY" UploadConcurrencySetting = "WALG_UPLOAD_CONCURRENCY" UploadDiskConcurrencySetting = "WALG_UPLOAD_DISK_CONCURRENCY" UploadQueueSetting = "WALG_UPLOAD_QUEUE" SentinelUserDataSetting = "WALG_SENTINEL_USER_DATA" PreventWalOverwriteSetting = "WALG_PREVENT_WAL_OVERWRITE" UploadWalMetadata = "WALG_UPLOAD_WAL_METADATA" DeltaMaxStepsSetting = "WALG_DELTA_MAX_STEPS" DeltaOriginSetting = "WALG_DELTA_ORIGIN" CompressionMethodSetting = "WALG_COMPRESSION_METHOD" DiskRateLimitSetting = "WALG_DISK_RATE_LIMIT" NetworkRateLimitSetting = "WALG_NETWORK_RATE_LIMIT" UseWalDeltaSetting = "WALG_USE_WAL_DELTA" UseReverseUnpackSetting = "WALG_USE_REVERSE_UNPACK" SkipRedundantTarsSetting = "WALG_SKIP_REDUNDANT_TARS" VerifyPageChecksumsSetting = "WALG_VERIFY_PAGE_CHECKSUMS" StoreAllCorruptBlocksSetting = "WALG_STORE_ALL_CORRUPT_BLOCKS" UseRatingComposerSetting = "WALG_USE_RATING_COMPOSER" DeltaFromNameSetting = "WALG_DELTA_FROM_NAME" DeltaFromUserDataSetting = "WALG_DELTA_FROM_USER_DATA" FetchTargetUserDataSetting = "WALG_FETCH_TARGET_USER_DATA" LogLevelSetting = "WALG_LOG_LEVEL" TarSizeThresholdSetting = "WALG_TAR_SIZE_THRESHOLD" CseKmsIDSetting = "WALG_CSE_KMS_ID" CseKmsRegionSetting = "WALG_CSE_KMS_REGION" LibsodiumKeySetting = "WALG_LIBSODIUM_KEY" LibsodiumKeyPathSetting = "WALG_LIBSODIUM_KEY_PATH" GpgKeyIDSetting = "GPG_KEY_ID" PgpKeySetting = "WALG_PGP_KEY" PgpKeyPathSetting = "WALG_PGP_KEY_PATH" PgpKeyPassphraseSetting = "WALG_PGP_KEY_PASSPHRASE" PgDataSetting = "PGDATA" UserSetting = "USER" // TODO : do something with it PgPortSetting = "PGPORT" PgUserSetting = "PGUSER" PgHostSetting = "PGHOST" PgPasswordSetting = "PGPASSWORD" PgDatabaseSetting = "PGDATABASE" PgSslModeSetting = "PGSSLMODE" PgSlotName = "WALG_SLOTNAME" PgWalSize = "WALG_PG_WAL_SIZE" TotalBgUploadedLimit = "TOTAL_BG_UPLOADED_LIMIT" NameStreamCreateCmd = "WALG_STREAM_CREATE_COMMAND" NameStreamRestoreCmd = "WALG_STREAM_RESTORE_COMMAND" MaxDelayedSegmentsCount = "WALG_INTEGRITY_MAX_DELAYED_WALS" PrefetchDir = "WALG_PREFETCH_DIR" MongoDBUriSetting = "MONGODB_URI" MongoDBLastWriteUpdateInterval = "MONGODB_LAST_WRITE_UPDATE_INTERVAL" OplogArchiveAfterSize = "OPLOG_ARCHIVE_AFTER_SIZE" OplogArchiveTimeoutInterval = "OPLOG_ARCHIVE_TIMEOUT_INTERVAL" OplogPITRDiscoveryInterval = "OPLOG_PITR_DISCOVERY_INTERVAL" OplogPushStatsEnabled = "OPLOG_PUSH_STATS_ENABLED" OplogPushStatsLoggingInterval = "OPLOG_PUSH_STATS_LOGGING_INTERVAL" OplogPushStatsUpdateInterval = "OPLOG_PUSH_STATS_UPDATE_INTERVAL" OplogPushStatsExposeHTTP = "OPLOG_PUSH_STATS_EXPOSE_HTTP" OplogPushWaitForBecomePrimary = "OPLOG_PUSH_WAIT_FOR_BECOME_PRIMARY" OplogPushPrimaryCheckInterval = "OPLOG_PUSH_PRIMARY_CHECK_INTERVAL" OplogReplayOplogAlwaysUpsert = "OPLOG_REPLAY_OPLOG_ALWAYS_UPSERT" OplogReplayOplogApplicationMode = "OPLOG_REPLAY_OPLOG_APPLICATION_MODE" OplogReplayIgnoreErrorCodes = "OPLOG_REPLAY_IGNORE_ERROR_CODES" MysqlDatasourceNameSetting = "WALG_MYSQL_DATASOURCE_NAME" MysqlSslCaSetting = "WALG_MYSQL_SSL_CA" MysqlBinlogReplayCmd = "WALG_MYSQL_BINLOG_REPLAY_COMMAND" MysqlBinlogDstSetting = "WALG_MYSQL_BINLOG_DST" MysqlBackupPrepareCmd = "WALG_MYSQL_BACKUP_PREPARE_COMMAND" MysqlTakeBinlogsFromMaster = "WALG_MYSQL_TAKE_BINLOGS_FROM_MASTER" GoMaxProcs = "GOMAXPROCS" HTTPListen = "HTTP_LISTEN" HTTPExposePprof = "HTTP_EXPOSE_PPROF" HTTPExposeExpVar = "HTTP_EXPOSE_EXPVAR" SQLServerBlobHostname = "SQLSERVER_BLOB_HOSTNAME" SQLServerBlobCertFile = "SQLSERVER_BLOB_CERT_FILE" SQLServerBlobKeyFile = "SQLSERVER_BLOB_KEY_FILE" SQLServerBlobLockFile = "SQLSERVER_BLOB_LOCK_FILE" SQLServerConnectionString = "SQLSERVER_CONNECTION_STRING" EndpointSourceSetting = "S3_ENDPOINT_SOURCE" EndpointPortSetting = "S3_ENDPOINT_PORT" AwsAccessKeyID = "AWS_ACCESS_KEY_ID" AwsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" YcKmsKeyIDSetting = "YC_CSE_KMS_KEY_ID" YcSaKeyFileSetting = "YC_SERVICE_ACCOUNT_KEY_FILE" )
const ( DefaultDataBurstRateLimit = 8 * pgDefaultDatabasePageSize DefaultDataFolderPath = "/tmp" WaleFileHost = "file://localhost" )
const ( NoDeleteModifier = iota FullDeleteModifier FindFullDeleteModifier ForceDeleteModifier ConfirmFlag = "confirm" DeleteShortDescription = "Clears old backups and WALs" DeleteRetainExamples = `` /* 321-byte string literal not displayed */ DeleteBeforeExamples = `` /* 156-byte string literal not displayed */ DeleteEverythingExamples = `` /* 154-byte string literal not displayed */ DeleteTargetExamples = `` //nolint:lll /* 420-byte string literal not displayed */ DeleteEverythingUsageExample = "everything [FORCE]" DeleteRetainUsageExample = "retain [FULL|FIND_FULL] backup_count" DeleteBeforeUsageExample = "before [FIND_FULL] backup_name|timestamp" DeleteTargetUsageExample = "target [FIND_FULL] backup_name | --target-user-data <data>" DeleteTargetUserDataFlag = "target-user-data" DeleteTargetUserDataDescription = "delete storage backup which has the specified user data" )
const LatestString = "LATEST"
const MaxCorruptBlocksInFileDesc int = 10
const MinAllowedConcurrency = 1
const (
StreamPrefix = "stream_"
)
const TarPartitionFolderName = "/tar_partitions/"
Variables ¶
var ( CfgFile string MongoDefaultSettings = map[string]string{ OplogPushStatsLoggingInterval: "30s", OplogPushStatsUpdateInterval: "30s", OplogPushWaitForBecomePrimary: "false", OplogPushPrimaryCheckInterval: "30s", OplogArchiveTimeoutInterval: "60s", OplogArchiveAfterSize: "16777216", MongoDBLastWriteUpdateInterval: "3s", } PGDefaultSettings = map[string]string{ PgWalSize: "16", } AllowedSettings map[string]bool CommonAllowedSettings = map[string]bool{ DownloadConcurrencySetting: true, UploadConcurrencySetting: true, UploadDiskConcurrencySetting: true, UploadQueueSetting: true, SentinelUserDataSetting: true, PreventWalOverwriteSetting: true, UploadWalMetadata: true, DeltaMaxStepsSetting: true, DeltaOriginSetting: true, CompressionMethodSetting: true, DiskRateLimitSetting: true, NetworkRateLimitSetting: true, UseWalDeltaSetting: true, LogLevelSetting: true, TarSizeThresholdSetting: true, "WALG_" + GpgKeyIDSetting: true, "WALE_" + GpgKeyIDSetting: true, PgpKeySetting: true, PgpKeyPathSetting: true, PgpKeyPassphraseSetting: true, LibsodiumKeySetting: true, LibsodiumKeyPathSetting: true, TotalBgUploadedLimit: true, NameStreamCreateCmd: true, NameStreamRestoreCmd: true, UseReverseUnpackSetting: true, SkipRedundantTarsSetting: true, VerifyPageChecksumsSetting: true, StoreAllCorruptBlocksSetting: true, UseRatingComposerSetting: true, MaxDelayedSegmentsCount: true, DeltaFromNameSetting: true, DeltaFromUserDataSetting: true, FetchTargetUserDataSetting: true, "WALG_SWIFT_PREFIX": true, "OS_AUTH_URL": true, "OS_USERNAME": true, "OS_PASSWORD": true, "OS_TENANT_NAME": true, "OS_REGION_NAME": true, "WALG_S3_PREFIX": true, "WALE_S3_PREFIX": true, AwsAccessKeyID: true, AwsSecretAccessKey: true, "AWS_SESSION_TOKEN": true, "AWS_DEFAULT_REGION": true, "AWS_DEFAULT_OUTPUT": true, "AWS_PROFILE": true, "AWS_ROLE_SESSION_NAME": true, "AWS_CA_BUNDLE": true, "AWS_SHARED_CREDENTIALS_FILE": true, "AWS_CONFIG_FILE": true, "AWS_REGION": true, "AWS_ENDPOINT": true, "AWS_S3_FORCE_PATH_STYLE": true, "WALG_S3_CA_CERT_FILE": true, "WALG_S3_STORAGE_CLASS": true, "WALG_S3_SSE": true, "WALG_S3_SSE_KMS_ID": true, "WALG_CSE_KMS_ID": true, "WALG_CSE_KMS_REGION": true, "WALG_S3_MAX_PART_SIZE": true, "S3_ENDPOINT_SOURCE": true, "S3_ENDPOINT_PORT": true, "S3_USE_LIST_OBJECTS_V1": true, "WALG_AZ_PREFIX": true, "AZURE_STORAGE_ACCOUNT": true, "AZURE_STORAGE_KEY": true, "AZURE_STORAGE_SAS_TOKEN": true, "AZURE_ENVIRONMENT_NAME": true, "WALG_AZURE_BUFFER_SIZE": true, "WALG_AZURE_MAX_BUFFERS": true, "WALG_GS_PREFIX": true, "GOOGLE_APPLICATION_CREDENTIALS": true, YcSaKeyFileSetting: true, YcKmsKeyIDSetting: true, "WALG_SSH_PREFIX": true, "SSH_PORT": true, "SSH_PASSWORD": true, "SSH_USERNAME": true, "SSH_PRIVATE_KEY_PATH": true, "WALG_FILE_PREFIX": true, GoMaxProcs: true, HTTPListen: true, HTTPExposePprof: true, HTTPExposeExpVar: true, } PGAllowedSettings = map[string]bool{ PgPortSetting: true, PgUserSetting: true, PgHostSetting: true, PgDataSetting: true, PgPasswordSetting: true, PgDatabaseSetting: true, PgSslModeSetting: true, PgSlotName: true, PgWalSize: true, "PGPASSFILE": true, PrefetchDir: true, } MongoAllowedSettings = map[string]bool{ MongoDBUriSetting: true, MongoDBLastWriteUpdateInterval: true, OplogArchiveTimeoutInterval: true, OplogArchiveAfterSize: true, OplogPushStatsEnabled: true, OplogPushStatsLoggingInterval: true, OplogPushStatsUpdateInterval: true, OplogPushStatsExposeHTTP: true, OplogPushWaitForBecomePrimary: true, OplogPushPrimaryCheckInterval: true, OplogPITRDiscoveryInterval: true, } SQLServerAllowedSettings = map[string]bool{ SQLServerBlobHostname: true, SQLServerBlobCertFile: true, SQLServerBlobKeyFile: true, SQLServerBlobLockFile: true, SQLServerConnectionString: true, } MysqlAllowedSettings = map[string]bool{ MysqlDatasourceNameSetting: true, MysqlSslCaSetting: true, MysqlBinlogReplayCmd: true, MysqlBinlogDstSetting: true, MysqlBackupPrepareCmd: true, MysqlTakeBinlogsFromMaster: true, } RequiredSettings = make(map[string]bool) HTTPSettingExposeFuncs = map[string]func(webserver.WebServer){ HTTPExposePprof: webserver.EnablePprofEndpoints, HTTPExposeExpVar: webserver.EnableExpVarEndpoints, OplogPushStatsExposeHTTP: nil, } Turbo bool )
var DeprecatedExternalGpgMessage = fmt.Sprintf( `You are using deprecated functionality that uses an external gpg library. It will be removed in next major version. Please set GPG key using environment variables %s or %s. `, PgpKeySetting, PgpKeyPathSetting)
var ErrorSizeTrackingDisabled = fmt.Errorf("size tracking disabled by DisableSizeTracking method")
var MaxExtractRetryWait = 5 * time.Minute
var MinExtractRetryWait = time.Minute
var StorageAdapters = []StorageAdapter{ {"S3_PREFIX", s3.SettingList, s3.ConfigureFolder, nil}, {"FILE_PREFIX", nil, fs.ConfigureFolder, preprocessFilePrefix}, {"GS_PREFIX", gcs.SettingList, gcs.ConfigureFolder, nil}, {"AZ_PREFIX", azure.SettingList, azure.ConfigureFolder, nil}, {"SWIFT_PREFIX", swift.SettingList, swift.ConfigureFolder, nil}, {"SSH_PREFIX", sh.SettingsList, sh.ConfigureFolder, nil}, }
var StringModifiers = []string{"FULL", "FIND_FULL"}
var StringModifiersDeleteEverything = []string{"FORCE"}
Functions ¶
func AddConfigFlags ¶ added in v0.2.23
func AssertRequiredSettingsSet ¶ added in v0.2.23
func AssertRequiredSettingsSet() error
func CheckAllowedSettings ¶ added in v0.2.23
CheckAllowedSettings warnings if a viper instance's setting not allowed
func CompressAndEncrypt ¶ added in v0.2.10
func CompressAndEncrypt(source io.Reader, compressor compression.Compressor, crypter crypto.Crypter) io.Reader
CompressAndEncrypt compresses input to a pipe reader. Output must be used or pipe will block.
func ConfigureAndRunDefaultWebServer ¶ added in v0.2.23
func ConfigureAndRunDefaultWebServer() error
ConfigureAndRunDefaultWebServer configures and runs web server
func ConfigureArchiveStatusManager ¶ added in v0.2.14
func ConfigureArchiveStatusManager() (fsutil.DataFolder, error)
TODO : unit tests
func ConfigureCompressor ¶ added in v0.2.23
func ConfigureCompressor() (compression.Compressor, error)
TODO : unit tests
func ConfigureCrypter ¶ added in v0.2.10
ConfigureCrypter uses environment variables to create and configure a crypter. In case no configuration in environment variables found, return `<nil>` value.
func ConfigureFolder ¶ added in v0.2.8
TODO : unit tests
func ConfigureFolderForSpecificConfig ¶ added in v0.2.23
TODO: something with that when provided multiple 'keys' in the config, this function will always return only one concrete 'folder'. Chosen folder depends only on 'StorageAdapters' order
func ConfigureLogging ¶ added in v0.2.8
func ConfigureLogging() error
func ConfigureSettings ¶ added in v0.2.23
func ConfigureSettings(currentType string)
func DecompressDecryptBytes ¶ added in v0.2.23
func DecompressDecryptBytes(dst io.Writer, archiveReader io.ReadCloser, decompressor compression.Decompressor) error
TODO : unit tests
func DecryptAndDecompressTar ¶
func DecryptAndDecompressTar(writer io.Writer, readerMaker ReaderMaker, crypter crypto.Crypter) error
DecryptAndDecompressTar decrypts file and checks its extension. If it's tar, a decompression is not needed. Otherwise it uses corresponding decompressor. If none found an error will be returned.
func DefaultHandleBackupList ¶ added in v0.2.23
func DeleteBeforeArgsValidator ¶ added in v0.2.10
func DeleteEverythingArgsValidator ¶ added in v0.2.13
func DeleteRetainAfterArgsValidator ¶ added in v0.2.23
func DeleteRetainArgsValidator ¶ added in v0.2.10
func DeleteTargetArgsValidator ¶ added in v0.2.23
func DownloadAndDecompressStorageFile ¶ added in v0.2.23
func DownloadAndDecompressStorageFile(folder storage.Folder, fileName string) (io.ReadCloser, error)
TODO : unit tests
func DownloadFile ¶ added in v0.2.23
DownloadFile downloads, decompresses and decrypts
func DownloadFileTo ¶ added in v0.2.23
TODO : unit tests DownloadFileTo downloads a file and writes it to local file
func ExtractAll ¶
func ExtractAll(tarInterpreter TarInterpreter, files []ReaderMaker) error
TODO : unit tests ExtractAll Handles all files passed in. Supports `.lzo`, `.lz4`, `.lzma`, and `.tar`. File type `.nop` is used for testing purposes. Each file is extracted in its own goroutine and ExtractAll will wait for all goroutines to finish. Returns the first error encountered.
func ExtractDeleteEverythingModifierFromArgs ¶ added in v0.2.23
func ExtractDeleteTargetModifierFromArgs ¶ added in v0.2.23
func FileIsPiped ¶ added in v0.2.10
FileIsPiped Check if file is piped
func FolderFromConfig ¶ added in v0.2.23
func FolderSize ¶ added in v0.2.23
func GetBackupSentinelObjects ¶ added in v0.2.23
func GetBoolSetting ¶ added in v0.2.23
func GetBoolSettingDefault ¶ added in v0.2.23
func GetCommandSetting ¶ added in v0.2.23
func GetCommandSettingContext ¶ added in v0.2.23
func GetCommandStreamFetcher ¶ added in v0.2.23
func GetDataFolderPath ¶ added in v0.2.10
func GetDataFolderPath() string
func GetDurationSetting ¶ added in v0.2.23
func GetLastDecompressor ¶ added in v0.2.10
func GetLastDecompressor() (compression.Decompressor, error)
func GetLatestBackupName ¶ added in v0.2.7
TODO : unit tests
func GetLogsDstSettings ¶ added in v0.2.14
TODO : unit tests GetLogsDstSettings reads from the environment variables fetch settings
func GetMaxConcurrency ¶ added in v0.2.10
func GetMaxDownloadConcurrency ¶ added in v0.2.10
func GetMaxUploadConcurrency ¶ added in v0.2.10
func GetMaxUploadDiskConcurrency ¶ added in v0.2.10
func GetOplogArchiveAfterSize ¶ added in v0.2.23
func GetOplogPITRDiscoveryIntervalSetting ¶ added in v0.2.23
func GetPermanentBackups ¶ added in v0.2.23
func GetPermanentBackups(folder storage.Folder, metaFetcher GenericMetaFetcher) map[string]bool
func GetPgSlotName ¶ added in v0.2.23
func GetPgSlotName() (pgSlotName string)
GetPgSlotName reads the slot name from the environment
func GetRelativeArchiveDataFolderPath ¶ added in v0.2.23
func GetRelativeArchiveDataFolderPath() string
func GetRequiredSetting ¶ added in v0.2.23
func GetSentinelUserData ¶
func GetSentinelUserData() interface{}
func GetSetting ¶ added in v0.2.10
GetSetting extract setting by key if key is set, return empty string otherwise
func GetStreamName ¶ added in v0.2.23
func HandleBackupFetch ¶
func HandleBackupFetch(folder storage.Folder, targetBackupSelector BackupSelector, fetcher func(folder storage.Folder, backup Backup))
TODO : unit tests HandleBackupFetch is invoked to perform wal-g backup-fetch
func HandleBackupList ¶
func HandleBackupList( getBackupsFunc func() ([]BackupTime, error), writeBackupListFunc func([]BackupTime), logging Logging, )
func HandleBackupMark ¶ added in v0.2.12
func HandleBackupMark(uploader *Uploader, backupName string, toPermanent bool, metaInteractor GenericMetaInteractor)
func InitConfig ¶ added in v0.2.10
func InitConfig()
InitConfig reads config file and ENV variables if set.
func PackFileTo ¶
func ReadConfigFromFile ¶ added in v0.2.23
ReadConfigFromFile read config to the viper instance
func SentinelNameFromBackup ¶ added in v0.2.23
func SetDefaultValues ¶ added in v0.2.23
SetDefaultValues set default settings to the viper instance
func SetLastDecompressor ¶ added in v0.2.10
func SetLastDecompressor(decompressor compression.Decompressor) error
func StreamBackupToCommandStdin ¶ added in v0.2.23
StreamBackupToCommandStdin downloads and decompresses backup stream to cmd stdin.
func TryDownloadFile ¶ added in v0.2.23
func UnmarshalSentinelUserData ¶ added in v0.2.23
func UnmarshalSentinelUserData(userDataStr string) interface{}
func UnwrapLatestModifier ¶ added in v0.2.23
UnwrapLatestModifier checks if LATEST is provided instead of backupName if so, replaces it with the name of the latest backup
func UploadSentinel ¶ added in v0.2.7
func UploadSentinel(uploader UploaderProvider, sentinelDto interface{}, backupName string) error
TODO : unit tests
func WriteAsJSON ¶ added in v0.2.23
func WriteBackupList ¶ added in v0.2.10
func WriteBackupList(backups []BackupTime, output io.Writer)
func WritePrettyBackupList ¶ added in v0.2.10
func WritePrettyBackupList(backups []BackupTime, output io.Writer)
Types ¶
type ArchiveNonExistenceError ¶
type ArchiveNonExistenceError struct {
// contains filtered or unexported fields
}
func (ArchiveNonExistenceError) Error ¶
func (err ArchiveNonExistenceError) Error() string
type Backup ¶
type Backup struct { Name string // base backup folder or catchup backup folder Folder storage.Folder }
Backup provides basic functionality to fetch backup-related information from storage
WAL-G stores information about single backup in the following files:
Sentinel file - contains useful information, such as backup start time, backup size, etc. see FetchSentinel, UploadSentinel
Metadata file (only in Postgres) - Postgres sentinel files can be quite large (> 1GB), so the metadata file is useful for the quick fetch of backup-related information. see FetchMetadata, UploadMetadata
func GetBackupByName ¶
func (*Backup) AssureExists ¶ added in v0.2.23
AssureExists is similar to CheckExistence, but returns an error in two cases: 1. Backup does not exist 2. Failed to check if backup exist
func (*Backup) CheckExistence ¶
func (*Backup) FetchMetadata ¶ added in v0.2.23
TODO : unit tests
func (*Backup) FetchSentinel ¶ added in v0.2.4
TODO : unit tests
func (*Backup) SentinelExists ¶ added in v0.2.23
SentinelExists checks that the sentinel file of the specified backup exists.
func (*Backup) UploadMetadata ¶ added in v0.2.23
func (*Backup) UploadSentinel ¶ added in v0.2.23
type BackupFileDescription ¶
type BackupFileDescription struct { IsIncremented bool // should never be both incremented and Skipped IsSkipped bool MTime time.Time CorruptBlocks *CorruptBlocksInfo `json:",omitempty"` UpdatesCount uint64 }
func NewBackupFileDescription ¶
func NewBackupFileDescription(isIncremented, isSkipped bool, modTime time.Time) *BackupFileDescription
func (*BackupFileDescription) SetCorruptBlocks ¶ added in v0.2.23
func (desc *BackupFileDescription) SetCorruptBlocks(corruptBlockNumbers []uint32, storeAllBlocks bool)
type BackupFileList ¶
type BackupFileList map[string]BackupFileDescription
type BackupHasPermanentBackupInFutureError ¶ added in v0.2.12
type BackupHasPermanentBackupInFutureError struct {
// contains filtered or unexported fields
}
type BackupMarkHandler ¶ added in v0.2.23
type BackupMarkHandler struct {
// contains filtered or unexported fields
}
func NewBackupMarkHandler ¶ added in v0.2.23
func NewBackupMarkHandler(metaInteractor GenericMetaInteractor, storageRootFolder storage.Folder) BackupMarkHandler
func (*BackupMarkHandler) GetBackupsToMark ¶ added in v0.2.23
func (h *BackupMarkHandler) GetBackupsToMark(backupName string, toPermanent bool) ([]string, error)
GetBackupsToMark retrieves all previous permanent or impermanent backups, including itself, any previous delta backups and initial full backup, in increasing order beginning from full backup, returning backups ready to be marked
For example, when marking backups from impermanent to permanent, we retrieve all currently impermanent backups and return them as a slice
func (*BackupMarkHandler) MarkBackup ¶ added in v0.2.23
func (h *BackupMarkHandler) MarkBackup(backupName string, toPermanent bool)
MarkBackup marks a backup as permanent or impermanent
type BackupNameSelector ¶ added in v0.2.23
type BackupNameSelector struct {
// contains filtered or unexported fields
}
Select backup by provided backup name
func NewBackupNameSelector ¶ added in v0.2.23
func NewBackupNameSelector(backupName string) (BackupNameSelector, error)
type BackupNonExistenceError ¶
type BackupNonExistenceError struct {
// contains filtered or unexported fields
}
func NewBackupNonExistenceError ¶
func NewBackupNonExistenceError(backupName string) BackupNonExistenceError
func (BackupNonExistenceError) Error ¶
func (err BackupNonExistenceError) Error() string
type BackupObject ¶ added in v0.2.23
type BackupObject interface { storage.Object GetBackupTime() time.Time GetBackupName() string // TODO: move increment info into separate struct (in backup.go) IsFullBackup() bool GetBaseBackupName() string GetIncrementFromName() string }
BackupObject represents the backup sentinel object uploaded on storage
func NewDefaultBackupObject ¶ added in v0.2.23
func NewDefaultBackupObject(object storage.Object) BackupObject
type BackupSelector ¶ added in v0.2.23
Select the name of storage backup chosen according to the internal rules
func NewTargetBackupSelector ¶ added in v0.2.23
func NewTargetBackupSelector(targetUserData, targetName string, metaFetcher GenericMetaFetcher) (BackupSelector, error)
type BackupTime ¶
type BackupTime struct { BackupName string `json:"backup_name"` Time time.Time `json:"time"` WalFileName string `json:"wal_file_name"` }
BackupTime is used to sort backups by latest modified time.
func GetBackupTimeSlices ¶ added in v0.2.23
func GetBackupTimeSlices(backups []storage.Object) []BackupTime
func GetBackups ¶ added in v0.2.23
func GetBackups(folder storage.Folder) (backups []BackupTime, err error)
TODO : unit tests GetBackups receives backup descriptions and sorts them by time
func GetBackupsAndGarbage ¶ added in v0.2.23
func GetBackupsAndGarbage(folder storage.Folder) (backups []BackupTime, garbage []string, err error)
TODO : unit tests
type CachedDecompressor ¶ added in v0.2.10
type CachedDecompressor struct {
FileExtension string
}
CachedDecompressor is the file extension describing decompressor
type ComposeRatingEvaluator ¶ added in v0.2.23
type CompressAndEncryptError ¶ added in v0.2.10
type CompressAndEncryptError struct {
// contains filtered or unexported fields
}
CompressAndEncryptError is used to catch specific errors from CompressAndEncrypt when uploading to Storage. Will not retry upload if this error occurs.
func (CompressAndEncryptError) Error ¶ added in v0.2.10
func (err CompressAndEncryptError) Error() string
type CorruptBlocksInfo ¶ added in v0.2.23
type DecompressionError ¶ added in v0.2.23
type DecompressionError struct {
// contains filtered or unexported fields
}
type DefaultBackupObject ¶ added in v0.2.23
func (DefaultBackupObject) GetBackupName ¶ added in v0.2.23
func (o DefaultBackupObject) GetBackupName() string
func (DefaultBackupObject) GetBackupTime ¶ added in v0.2.23
func (o DefaultBackupObject) GetBackupTime() time.Time
func (DefaultBackupObject) GetBaseBackupName ¶ added in v0.2.23
func (o DefaultBackupObject) GetBaseBackupName() string
func (DefaultBackupObject) GetIncrementFromName ¶ added in v0.2.23
func (o DefaultBackupObject) GetIncrementFromName() string
func (DefaultBackupObject) IsFullBackup ¶ added in v0.2.23
func (o DefaultBackupObject) IsFullBackup() bool
type DefaultComposeRatingEvaluator ¶ added in v0.2.23
type DefaultComposeRatingEvaluator struct {
// contains filtered or unexported fields
}
func NewDefaultComposeRatingEvaluator ¶ added in v0.2.23
func NewDefaultComposeRatingEvaluator(incrementFromFiles BackupFileList) *DefaultComposeRatingEvaluator
type DeleteHandler ¶ added in v0.2.23
func NewDeleteHandler ¶ added in v0.2.23
func NewDeleteHandler( folder storage.Folder, backups []BackupObject, less func(object1, object2 storage.Object) bool, options ...DeleteHandlerOption, ) *DeleteHandler
func (*DeleteHandler) DeleteBeforeTarget ¶ added in v0.2.23
func (h *DeleteHandler) DeleteBeforeTarget(target BackupObject, confirmed bool) error
func (*DeleteHandler) DeleteEverything ¶ added in v0.2.23
func (h *DeleteHandler) DeleteEverything(confirmed bool)
func (*DeleteHandler) DeleteTargets ¶ added in v0.2.23
func (h *DeleteHandler) DeleteTargets(targets []BackupObject, confirmed bool) error
func (*DeleteHandler) FindTargetBeforeName ¶ added in v0.2.23
func (h *DeleteHandler) FindTargetBeforeName(name string, modifier int) (BackupObject, error)
func (*DeleteHandler) FindTargetBeforeTime ¶ added in v0.2.23
func (h *DeleteHandler) FindTargetBeforeTime(timeLine time.Time, modifier int) (BackupObject, error)
func (*DeleteHandler) FindTargetByName ¶ added in v0.2.23
func (h *DeleteHandler) FindTargetByName(bname string) (BackupObject, error)
func (*DeleteHandler) FindTargetRetain ¶ added in v0.2.23
func (h *DeleteHandler) FindTargetRetain(retentionCount, modifier int) (BackupObject, error)
func (*DeleteHandler) FindTargetRetainAfterName ¶ added in v0.2.23
func (h *DeleteHandler) FindTargetRetainAfterName( retentionCount int, name string, modifier int) (BackupObject, error)
func (*DeleteHandler) FindTargetRetainAfterTime ¶ added in v0.2.23
func (h *DeleteHandler) FindTargetRetainAfterTime(retentionCount int, timeLine time.Time, modifier int, ) (BackupObject, error)
func (*DeleteHandler) HandleDeleteBefore ¶ added in v0.2.23
func (h *DeleteHandler) HandleDeleteBefore(args []string, confirmed bool)
func (*DeleteHandler) HandleDeleteEverything ¶ added in v0.2.23
func (h *DeleteHandler) HandleDeleteEverything(args []string, permanentBackups map[string]bool, confirmed bool)
func (*DeleteHandler) HandleDeleteRetain ¶ added in v0.2.23
func (h *DeleteHandler) HandleDeleteRetain(args []string, confirmed bool)
func (*DeleteHandler) HandleDeleteRetainAfter ¶ added in v0.2.23
func (h *DeleteHandler) HandleDeleteRetainAfter(args []string, confirmed bool)
func (*DeleteHandler) HandleDeleteTarget ¶ added in v0.2.23
func (h *DeleteHandler) HandleDeleteTarget(targetSelector BackupSelector, confirmed, findFull bool)
type DeleteHandlerOption ¶ added in v0.2.23
type DeleteHandlerOption func(h *DeleteHandler)
func IsPermanentFunc ¶ added in v0.2.23
func IsPermanentFunc(isPermanent func(storage.Object) bool) DeleteHandlerOption
type EmptyWriteIgnorer ¶
type EmptyWriteIgnorer struct {
io.WriteCloser
}
EmptyWriteIgnorer handles 0 byte write in LZ4 package to stop pipe reader/writer from blocking.
type ErrorLogger ¶ added in v0.2.23
type ErrorLogger interface {
FatalOnError(err error)
}
type ExponentialRetrier ¶
type ExponentialRetrier struct {
// contains filtered or unexported fields
}
func NewExponentialRetrier ¶
func NewExponentialRetrier(startSleepDuration, sleepDurationBound time.Duration) *ExponentialRetrier
func (*ExponentialRetrier) Retry ¶ added in v0.2.23
func (retrier *ExponentialRetrier) Retry()
type GenericMetaFetcher ¶ added in v0.2.23
type GenericMetaFetcher interface {
Fetch(backupName string, backupFolder storage.Folder) (GenericMetadata, error)
}
type GenericMetaInteractor ¶ added in v0.2.23
type GenericMetaInteractor interface { GenericMetaFetcher GenericMetaSetter }
GenericMetaInteractor is a combination of GenericMetaFetcher and GenericMetaSetter. It can be useful when need both.
type GenericMetaSetter ¶ added in v0.2.23
type GenericMetadata ¶ added in v0.2.23
type GenericMetadata struct { BackupName string UncompressedSize int64 CompressedSize int64 Hostname string StartTime time.Time FinishTime time.Time IsPermanent bool IsIncremental bool // need to use separate fetcher // to avoid useless sentinel load (in Postgres) IncrementDetails IncrementDetailsFetcher UserData interface{} }
GenericMetadata allows to obtain some basic information about existing backup in storage. It is useful when creating a functionality that is common to all databases, for example backup-list or backup-mark.
To support the GenericMetadata in some particular database, one should write its own GenericMetaFetcher and GenericMetaSetter.
type IncrementDetails ¶ added in v0.2.23
IncrementDetails is useful to fetch information about dependencies of some incremental backup
type IncrementDetailsFetcher ¶ added in v0.2.23
type IncrementDetailsFetcher interface {
Fetch() (isIncremental bool, details IncrementDetails, err error)
}
type InfoLogger ¶ added in v0.2.23
type InfoLogger interface {
Println(v ...interface{})
}
type InvalidConcurrencyValueError ¶ added in v0.2.10
type InvalidConcurrencyValueError struct {
// contains filtered or unexported fields
}
func (InvalidConcurrencyValueError) Error ¶ added in v0.2.10
func (err InvalidConcurrencyValueError) Error() string
type LatestBackupSelector ¶ added in v0.2.23
type LatestBackupSelector struct { }
Select the latest backup from storage
func NewLatestBackupSelector ¶ added in v0.2.23
func NewLatestBackupSelector() LatestBackupSelector
type LazyCache ¶
type LazyCache struct {
// contains filtered or unexported fields
}
func NewLazyCache ¶
func (*LazyCache) LoadExisting ¶
type Logging ¶ added in v0.2.23
type Logging struct { InfoLogger InfoLogger ErrorLogger ErrorLogger }
type MetaConstructor ¶ added in v0.2.23
type MetaConstructor interface { Init() error Finalize(backupName string) error MetaInfo() interface{} }
MetaConstructor - interface that helps with building meta-info about backup and generate MetaInfo see MongoMetaConstructor see RedisMetaConstructor
type NOPTarBall ¶
type NOPTarBall struct {
// contains filtered or unexported fields
}
NOPTarBall mocks a tarball. Used for prefault logic.
func (*NOPTarBall) AddSize ¶
func (tarBall *NOPTarBall) AddSize(i int64)
func (*NOPTarBall) AwaitUploads ¶
func (tarBall *NOPTarBall) AwaitUploads()
func (*NOPTarBall) CloseTar ¶
func (tarBall *NOPTarBall) CloseTar() error
func (*NOPTarBall) Name ¶ added in v0.2.23
func (tarBall *NOPTarBall) Name() string
func (*NOPTarBall) SetUp ¶
func (tarBall *NOPTarBall) SetUp(crypter crypto.Crypter, params ...string)
func (*NOPTarBall) Size ¶
func (tarBall *NOPTarBall) Size() int64
func (*NOPTarBall) TarWriter ¶
func (tarBall *NOPTarBall) TarWriter() *tar.Writer
type NOPTarBallMaker ¶
type NOPTarBallMaker struct {
// contains filtered or unexported fields
}
NOPTarBallMaker creates a new NOPTarBall. Used for testing purposes.
func (*NOPTarBallMaker) Make ¶
func (tarBallMaker *NOPTarBallMaker) Make(inheritState bool) TarBall
Make creates a new NOPTarBall.
type NoBackupsFoundError ¶
type NoBackupsFoundError struct {
// contains filtered or unexported fields
}
func NewNoBackupsFoundError ¶
func NewNoBackupsFoundError() NoBackupsFoundError
func (NoBackupsFoundError) Error ¶
func (err NoBackupsFoundError) Error() string
type NoFilesToExtractError ¶
type NoFilesToExtractError struct {
// contains filtered or unexported fields
}
func (NoFilesToExtractError) Error ¶
func (err NoFilesToExtractError) Error() string
type NopIncrementDetailsFetcher ¶ added in v0.2.23
type NopIncrementDetailsFetcher struct{}
NopIncrementDetailsFetcher is useful for databases without incremental backup support
func (*NopIncrementDetailsFetcher) Fetch ¶ added in v0.2.23
func (idf *NopIncrementDetailsFetcher) Fetch() (bool, IncrementDetails, error)
type ReaderMaker ¶
type ReaderMaker interface { Reader() (io.ReadCloser, error) Path() string }
ReaderMaker is the generic interface used by extract. It allows for ease of handling different file formats.
type SentinelMarshallingError ¶ added in v0.2.7
type SentinelMarshallingError struct {
// contains filtered or unexported fields
}
region errors
func NewSentinelMarshallingError ¶ added in v0.2.7
func NewSentinelMarshallingError(sentinelName string, err error) SentinelMarshallingError
func (SentinelMarshallingError) Error ¶ added in v0.2.7
func (err SentinelMarshallingError) Error() string
type StorageAdapter ¶ added in v0.2.7
type StorageAdapter struct {
// contains filtered or unexported fields
}
type StorageReaderMaker ¶
StorageReaderMaker creates readers for downloading from storage
func NewStorageReaderMaker ¶
func NewStorageReaderMaker(folder storage.Folder, relativePath string) *StorageReaderMaker
func (*StorageReaderMaker) Path ¶
func (readerMaker *StorageReaderMaker) Path() string
func (*StorageReaderMaker) Reader ¶
func (readerMaker *StorageReaderMaker) Reader() (io.ReadCloser, error)
type StorageTarBall ¶
type StorageTarBall struct {
// contains filtered or unexported fields
}
StorageTarBall represents a tar file that is going to be uploaded to storage.
func (*StorageTarBall) AddSize ¶
func (tarBall *StorageTarBall) AddSize(i int64)
AddSize to total Size
func (*StorageTarBall) AwaitUploads ¶
func (tarBall *StorageTarBall) AwaitUploads()
func (*StorageTarBall) CloseTar ¶
func (tarBall *StorageTarBall) CloseTar() error
CloseTar closes the tar writer, flushing any unwritten data to the underlying writer before also closing the underlying writer.
func (*StorageTarBall) Name ¶ added in v0.2.23
func (tarBall *StorageTarBall) Name() string
func (*StorageTarBall) SetUp ¶
func (tarBall *StorageTarBall) SetUp(crypter crypto.Crypter, names ...string)
SetUp creates a new tar writer and starts upload to storage. Upload will block until the tar file is finished writing. If a name for the file is not given, default name is of the form `part_....tar.[Compressor file extension]`.
func (*StorageTarBall) Size ¶
func (tarBall *StorageTarBall) Size() int64
Size accumulated in this tarball
func (*StorageTarBall) TarWriter ¶
func (tarBall *StorageTarBall) TarWriter() *tar.Writer
type StorageTarBallMaker ¶
type StorageTarBallMaker struct {
// contains filtered or unexported fields
}
StorageTarBallMaker creates tarballs that are uploaded to storage.
func NewStorageTarBallMaker ¶
func NewStorageTarBallMaker(backupName string, uploader *Uploader) *StorageTarBallMaker
func (*StorageTarBallMaker) Make ¶
func (tarBallMaker *StorageTarBallMaker) Make(dedicatedUploader bool) TarBall
Make returns a tarball with required storage fields.
type TarBall ¶
type TarBall interface { SetUp(crypter crypto.Crypter, args ...string) CloseTar() error Size() int64 AddSize(int64) TarWriter() *tar.Writer AwaitUploads() Name() string }
A TarBall represents one tar file.
type TarBallMaker ¶
TarBallMaker is used to allow for flexible creation of different TarBalls.
func NewNopTarBallMaker ¶
func NewNopTarBallMaker() TarBallMaker
type TarBallQueue ¶ added in v0.2.23
type TarBallQueue struct { TarSizeThreshold int64 AllTarballsSize *int64 TarBallMaker TarBallMaker LastCreatedTarball TarBall // contains filtered or unexported fields }
TarBallQueue is used to process multiple tarballs concurrently
func NewTarBallQueue ¶ added in v0.2.23
func NewTarBallQueue(tarSizeThreshold int64, tarBallMaker TarBallMaker) *TarBallQueue
func (*TarBallQueue) CheckSizeAndEnqueueBack ¶ added in v0.2.23
func (tarQueue *TarBallQueue) CheckSizeAndEnqueueBack(tarBall TarBall) error
func (*TarBallQueue) CloseTarball ¶ added in v0.2.23
func (tarQueue *TarBallQueue) CloseTarball(tarBall TarBall) error
func (*TarBallQueue) Deque ¶ added in v0.2.23
func (tarQueue *TarBallQueue) Deque() TarBall
func (*TarBallQueue) DequeCtx ¶ added in v0.2.23
func (tarQueue *TarBallQueue) DequeCtx(ctx context.Context) (TarBall, error)
DequeCtx returns a TarBall from the queue. If the context finishes before it can do so, it returns the result of ctx.Err().
func (*TarBallQueue) EnqueueBack ¶ added in v0.2.23
func (tarQueue *TarBallQueue) EnqueueBack(tarBall TarBall)
func (*TarBallQueue) FinishQueue ¶ added in v0.2.23
func (tarQueue *TarBallQueue) FinishQueue() error
func (*TarBallQueue) FinishTarBall ¶ added in v0.2.23
func (tarQueue *TarBallQueue) FinishTarBall(tarBall TarBall) error
func (*TarBallQueue) NewTarBall ¶ added in v0.2.23
func (tarQueue *TarBallQueue) NewTarBall(dedicatedUploader bool) TarBall
NewTarBall starts writing new tarball
func (*TarBallQueue) StartQueue ¶ added in v0.2.23
func (tarQueue *TarBallQueue) StartQueue() error
type TarInterpreter ¶
TarInterpreter behaves differently for different file types.
type UnconfiguredStorageError ¶ added in v0.2.8
type UnconfiguredStorageError struct {
// contains filtered or unexported fields
}
func (UnconfiguredStorageError) Error ¶ added in v0.2.8
func (err UnconfiguredStorageError) Error() string
type UnknownCompressionMethodError ¶
type UnknownCompressionMethodError struct {
// contains filtered or unexported fields
}
func (UnknownCompressionMethodError) Error ¶
func (err UnknownCompressionMethodError) Error() string
type UnmarshallingError ¶ added in v0.2.10
type UnmarshallingError struct {
// contains filtered or unexported fields
}
func (UnmarshallingError) Error ¶ added in v0.2.10
func (err UnmarshallingError) Error() string
type UnsetRequiredSettingError ¶ added in v0.2.10
type UnsetRequiredSettingError struct {
// contains filtered or unexported fields
}
func NewUnsetRequiredSettingError ¶ added in v0.2.10
func NewUnsetRequiredSettingError(settingName string) UnsetRequiredSettingError
func (UnsetRequiredSettingError) Error ¶ added in v0.2.10
func (err UnsetRequiredSettingError) Error() string
type UnsupportedFileTypeError ¶
type UnsupportedFileTypeError struct {
// contains filtered or unexported fields
}
UnsupportedFileTypeError is used to signal file types that are unsupported by WAL-G.
func (UnsupportedFileTypeError) Error ¶ added in v0.2.8
func (err UnsupportedFileTypeError) Error() string
type UploadObject ¶ added in v0.2.10
UploadObject
type Uploader ¶
type Uploader struct { UploadingFolder storage.Folder Compressor compression.Compressor ArchiveStatusManager asm.ArchiveStatusManager Failed atomic.Value // contains filtered or unexported fields }
Uploader contains fields associated with uploading tarballs. Multiple tarballs can share one uploader.
func ConfigureUploader ¶ added in v0.2.8
ConfigureUploader connects to storage and creates an uploader. It makes sure that a valid session has started; if invalid, returns AWS error and `<nil>` values.
func ConfigureUploaderWithoutCompressMethod ¶ added in v0.2.23
func NewUploader ¶
func NewUploader( compressor compression.Compressor, uploadingLocation storage.Folder, ) *Uploader
func (*Uploader) Compression ¶ added in v0.2.23
func (uploader *Uploader) Compression() compression.Compressor
Compression returns configured compressor
func (*Uploader) DisableSizeTracking ¶ added in v0.2.23
func (uploader *Uploader) DisableSizeTracking()
DisableSizeTracking stops bandwidth tracking
func (*Uploader) Finish ¶ added in v0.2.23
func (uploader *Uploader) Finish()
Finish waits for all waiting parts to be uploaded. If an error occurs, prints alert to stderr.
func (*Uploader) PushStream ¶ added in v0.2.10
TODO : unit tests PushStream compresses a stream and push it
func (*Uploader) PushStreamToDestination ¶ added in v0.2.10
TODO : unit tests PushStreamToDestination compresses a stream and push it to specifyed destination
func (*Uploader) RawDataSize ¶ added in v0.2.23
RawDataSize returns 0 and error when SizeTracking disabled (see DisableSizeTracking)
func (*Uploader) UploadFile ¶
func (uploader *Uploader) UploadFile(file ioextensions.NamedReader) error
TODO : unit tests UploadFile compresses a file and uploads it.
func (*Uploader) UploadMultiple ¶ added in v0.2.10
func (uploader *Uploader) UploadMultiple(objects []UploadObject) error
UploadMultiple uploads multiple objects from the start of the slice, returning the first error if any. Note that this operation is not atomic TODO : unit tests
func (*Uploader) UploadedDataSize ¶ added in v0.2.23
UploadedDataSize returns 0 and error when SizeTracking disabled (see DisableSizeTracking)
type UploaderProvider ¶ added in v0.2.23
type UploaderProvider interface { Upload(path string, content io.Reader) error UploadFile(file ioextensions.NamedReader) error PushStream(stream io.Reader) (string, error) PushStreamToDestination(stream io.Reader, dstPath string) error Compression() compression.Compressor DisableSizeTracking() UploadedDataSize() (int64, error) RawDataSize() (int64, error) }
type UserDataBackupSelector ¶ added in v0.2.23
type UserDataBackupSelector struct {
// contains filtered or unexported fields
}
Select backup which has the provided user data
func NewUserDataBackupSelector ¶ added in v0.2.23
func NewUserDataBackupSelector(userDataRaw string, metaFetcher GenericMetaFetcher) UserDataBackupSelector
type WithSizeReader ¶ added in v0.2.13
type WithSizeReader struct {
// contains filtered or unexported fields
}
func NewWithSizeReader ¶ added in v0.2.23
func NewWithSizeReader(underlying io.Reader, readSize *int64) *WithSizeReader
type WrongTypeError ¶
type WrongTypeError struct {
// contains filtered or unexported fields
}
func NewWrongTypeError ¶
func NewWrongTypeError(desiredType string) WrongTypeError
func (WrongTypeError) Error ¶
func (err WrongTypeError) Error() string
Source Files ¶
- backup.go
- backup_fetch_handler.go
- backup_file_description.go
- backup_list_handler.go
- backup_mark.go
- backup_mark_handler.go
- backup_object.go
- backup_selector.go
- backup_time.go
- backup_util.go
- compose_rating_evaluator.go
- compress_and_encrypt.go
- config.go
- configure.go
- configure_crypter.go
- delete_handler.go
- exponential_retrier.go
- extract.go
- fetch_helper.go
- generic_metadata.go
- lazy_cache.go
- nop_tarball.go
- reader_maker.go
- sentinel.go
- storage_adapter.go
- storage_reader_maker.go
- storage_tar_ball.go
- storage_tar_ball_maker.go
- stream_fetch_helper.go
- stream_push_helper.go
- tar_ball.go
- tar_ball_maker.go
- tar_ball_queue.go
- uploader.go
- with_size_reader.go
Directories ¶
Path | Synopsis |
---|---|
Package abool provides atomic Boolean type for cleaner code and better performance.
|
Package abool provides atomic Boolean type for cleaner code and better performance. |
databases
|
|
testtools
Package mock_internal is a generated GoMock package.
|
Package mock_internal is a generated GoMock package. |