Documentation ¶
Index ¶
- Constants
- Variables
- func ApplyKVFilesWithBatchMethod(ctx context.Context, logIter LogIter, batchCount int, batchSize uint64, ...) error
- func ApplyKVFilesWithSingleMethod(ctx context.Context, files LogIter, ...) error
- func LoadMigrations(ctx context.Context, s storage.ExternalStorage) iter.TryNextor[*backuppb.Migration]
- func RestoreMetaKVFilesWithBatchMethod(ctx context.Context, defaultFiles []*backuppb.DataFileInfo, ...) error
- func SortMetaKVFiles(files []*backuppb.DataFileInfo) []*backuppb.DataFileInfo
- type BuildTableMappingManagerConfig
- type CompactedFileSplitStrategy
- type DDLMetaGroup
- type FileIndex
- type FileIndexIter
- type FilesInRegion
- type FilesInTable
- type FullBackupStorageConfig
- type GroupIndex
- type GroupIndexIter
- type KvEntryWithTS
- type Log
- type LogClient
- func (rc *LogClient) BuildTableMappingManager(ctx context.Context, cfg *BuildTableMappingManagerConfig) (*stream.TableMappingManager, error)
- func (rc *LogClient) CleanUpKVFiles(ctx context.Context) error
- func (rc *LogClient) Close(ctx context.Context)
- func (rc *LogClient) CurrentTS() uint64
- func (rc *LogClient) FailpointDoChecksumForLogRestore(ctx context.Context, kvClient kv.Client, pdClient pd.Client, ...) (finalErr error)
- func (rc *LogClient) GenGlobalID(ctx context.Context) (int64, error)
- func (rc *LogClient) GenGlobalIDs(ctx context.Context, n int) ([]int64, error)
- func (rc *LogClient) GetClusterID(ctx context.Context) uint64
- func (rc *LogClient) GetDomain() *domain.Domain
- func (rc *LogClient) GetGCRows() []*stream.PreDelRangeQuery
- func (rc *LogClient) GetMigrations(ctx context.Context) ([]*backuppb.Migration, error)
- func (rc *LogClient) Init(ctx context.Context, g glue.Glue, store kv.Storage) error
- func (rc *LogClient) InitCheckpointMetadataForCompactedSstRestore(ctx context.Context) (map[string]struct{}, error)
- func (rc *LogClient) InitCheckpointMetadataForLogRestore(ctx context.Context, startTS, restoredTS uint64, gcRatio string, ...) (string, error)
- func (rc *LogClient) InitClients(ctx context.Context, backend *backuppb.StorageBackend, ...) error
- func (rc *LogClient) InsertGCRows(ctx context.Context) error
- func (rc *LogClient) InstallLogFileManager(ctx context.Context, startTS, restoreTS uint64, metadataDownloadBatchSize uint, ...) error
- func (rc *LogClient) IterMetaKVToBuildAndSaveIdMap(ctx context.Context, tableMappingManager *stream.TableMappingManager, ...) error
- func (rc *LogClient) RecordDeleteRange(sql *stream.PreDelRangeQuery)
- func (rc *LogClient) RepairIngestIndex(ctx context.Context, ingestRecorder *ingestrec.IngestRecorder, g glue.Glue) error
- func (rc *LogClient) RestoreAndRewriteMetaKVFiles(ctx context.Context, files []*backuppb.DataFileInfo, ...) error
- func (rc *LogClient) RestoreBatchMetaKVFiles(ctx context.Context, files []*backuppb.DataFileInfo, ...) ([]*KvEntryWithTS, error)
- func (rc *LogClient) RestoreCompactedSstFiles(ctx context.Context, ...) error
- func (rc *LogClient) RestoreKVFiles(ctx context.Context, rules map[int64]*restoreutils.RewriteRules, ...) error
- func (rc *LogClient) RunGCRowsLoader(ctx context.Context)
- func (rc *LogClient) SetCrypter(crypter *backuppb.CipherInfo)
- func (rc *LogClient) SetCurrentTS(ts uint64) error
- func (rc *LogClient) SetRawKVBatchClient(ctx context.Context, pdAddrs []string, security config.Security) error
- func (rc *LogClient) SetStorage(ctx context.Context, backend *backuppb.StorageBackend, ...) error
- func (rc *LogClient) SetUpstreamClusterID(upstreamClusterID uint64)
- func (rc *LogClient) UpdateSchemaVersion(ctx context.Context) error
- func (rc *LogClient) WrapCompactedFilesIterWithSplitHelper(ctx context.Context, ...) (iter.TryNextor[*backuppb.LogFileSubcompaction], error)
- func (rc *LogClient) WrapLogFilesIterWithSplitHelper(ctx context.Context, logIter LogIter, execCtx sqlexec.RestrictedSQLExecutor, ...) (LogIter, error)
- type LogDataFileInfo
- type LogFileImporter
- type LogFileManager
- func (rc *LogFileManager) BuildMigrations(migs []*backuppb.Migration)
- func (rc *LogFileManager) FilterDataFiles(m MetaNameIter) LogIter
- func (rc *LogFileManager) FilterMetaFiles(ms MetaNameIter) MetaGroupIter
- func (rc *LogFileManager) GetCompactionIter(ctx context.Context) iter.TryNextor[*backuppb.LogFileSubcompaction]
- func (rc *LogFileManager) LoadDDLFilesAndCountDMLFiles(ctx context.Context) ([]Log, error)
- func (rc *LogFileManager) LoadDMLFiles(ctx context.Context) (LogIter, error)
- func (rc *LogFileManager) ReadAllEntries(ctx context.Context, file Log, filterTS uint64) ([]*KvEntryWithTS, []*KvEntryWithTS, error)
- func (rc *LogFileManager) ShiftTS() uint64
- func (rc *LogFileManager) ShouldFilterOut(d *backuppb.DataFileInfo) bool
- type LogFileManagerInit
- type LogFilesSkipMap
- type LogFilesSkipMapExt
- type LogIter
- type LogRestoreManager
- type LogSplitStrategy
- type Meta
- type MetaGroupIter
- type MetaIter
- type MetaMigrationsIter
- type MetaName
- type MetaNameIter
- type MetaWithMigrations
- type PhysicalMigrationsIter
- type PhysicalWithMigrations
- type RPCResult
- type RangeController
- type RegionFunc
- type RetryStrategy
- type SstRestoreManager
- type SubCompactionIter
- type WithMigrations
- type WithMigrationsBuilder
Constants ¶
const MetaKVBatchSize = 64 * 1024 * 1024
const PITRIdMapBlockSize int = 524288
const UnsafePITRLogRestoreStartBeforeAnyUpstreamUserDDL = "UNSAFE_PITR_LOG_RESTORE_START_BEFORE_ANY_UPSTREAM_USER_DDL"
Variables ¶
var TotalEntryCount int64
Functions ¶
func LoadMigrations ¶
func RestoreMetaKVFilesWithBatchMethod ¶
func RestoreMetaKVFilesWithBatchMethod( ctx context.Context, defaultFiles []*backuppb.DataFileInfo, writeFiles []*backuppb.DataFileInfo, schemasReplace *stream.SchemasReplace, updateStats func(kvCount uint64, size uint64), progressInc func(), restoreBatch func( ctx context.Context, files []*backuppb.DataFileInfo, schemasReplace *stream.SchemasReplace, kvEntries []*KvEntryWithTS, filterTS uint64, updateStats func(kvCount uint64, size uint64), progressInc func(), cf string, ) ([]*KvEntryWithTS, error), ) error
func SortMetaKVFiles ¶
func SortMetaKVFiles(files []*backuppb.DataFileInfo) []*backuppb.DataFileInfo
Types ¶
type BuildTableMappingManagerConfig ¶
type BuildTableMappingManagerConfig struct { // required CurrentIdMapSaved bool TableFilter filter.Filter // optional FullBackupStorage *FullBackupStorageConfig CipherInfo *backuppb.CipherInfo Files []*backuppb.DataFileInfo }
type CompactedFileSplitStrategy ¶
type CompactedFileSplitStrategy struct { *split.BaseSplitStrategy // contains filtered or unexported fields }
func NewCompactedFileSplitStrategy ¶
func NewCompactedFileSplitStrategy( rules map[int64]*restoreutils.RewriteRules, checkpointsSet map[string]struct{}, updateStatsFn func(uint64, uint64), ) *CompactedFileSplitStrategy
func (*CompactedFileSplitStrategy) Accumulate ¶
func (cs *CompactedFileSplitStrategy) Accumulate(subCompaction *backuppb.LogFileSubcompaction)
func (*CompactedFileSplitStrategy) ShouldSkip ¶
func (cs *CompactedFileSplitStrategy) ShouldSkip(subCompaction *backuppb.LogFileSubcompaction) bool
func (*CompactedFileSplitStrategy) ShouldSplit ¶
func (cs *CompactedFileSplitStrategy) ShouldSplit() bool
type DDLMetaGroup ¶
type DDLMetaGroup struct { Path string FileMetas []*backuppb.DataFileInfo }
type FileIndex ¶
type FileIndex = iter.Indexed[*backuppb.DataFileInfo]
FileIndex is the type of logical data file with index from physical data file.
type FileIndexIter ¶
FileIndexIter is the type of iterator of logical data file with index from physical data file.
type FilesInRegion ¶
type FilesInRegion struct {
// contains filtered or unexported fields
}
type FilesInTable ¶
type FilesInTable struct {
// contains filtered or unexported fields
}
type FullBackupStorageConfig ¶
type FullBackupStorageConfig struct { Backend *backuppb.StorageBackend Opts *storage.ExternalStorageOptions }
type GroupIndex ¶
type GroupIndex = iter.Indexed[*backuppb.DataFileGroup]
GroupIndex is the type of physical data file with index from metadata.
type GroupIndexIter ¶
type GroupIndexIter = iter.TryNextor[GroupIndex]
GroupIndexIter is the type of iterator of physical data file with index from metadata.
type KvEntryWithTS ¶
the kv entry with ts, the ts is decoded from entry.
type Log ¶
type Log = *backuppb.DataFileInfo
Log is the metadata of one file recording KV sequences.
type LogClient ¶
type LogClient struct { *LogFileManager // contains filtered or unexported fields }
func NewRestoreClient ¶
func NewRestoreClient( pdClient pd.Client, pdHTTPCli pdhttp.Client, tlsConf *tls.Config, keepaliveConf keepalive.ClientParameters, ) *LogClient
NewRestoreClient returns a new RestoreClient.
func (*LogClient) BuildTableMappingManager ¶
func (rc *LogClient) BuildTableMappingManager( ctx context.Context, cfg *BuildTableMappingManagerConfig, ) (*stream.TableMappingManager, error)
BuildTableMappingManager builds the table mapping manager. It reads the full backup storage to get the full backup table info to initialize the manager, or it reads the id map from previous task, or it loads the saved mapping from last time of run of the same task.
func (*LogClient) CleanUpKVFiles ¶
func (*LogClient) FailpointDoChecksumForLogRestore ¶
func (rc *LogClient) FailpointDoChecksumForLogRestore( ctx context.Context, kvClient kv.Client, pdClient pd.Client, rewriteRules map[int64]*restoreutils.RewriteRules, ) (finalErr error)
called by failpoint, only used for test it would print the checksum result into the log, and the auto-test script records them to compare another cluster's checksum.
func (*LogClient) GenGlobalID ¶
GenGlobalID generates a global id by transaction way.
func (*LogClient) GenGlobalIDs ¶
GenGlobalIDs generates several global ids by transaction way.
func (*LogClient) GetClusterID ¶
GetClusterID gets the cluster id from down-stream cluster.
func (*LogClient) GetGCRows ¶
func (rc *LogClient) GetGCRows() []*stream.PreDelRangeQuery
only for unit test
func (*LogClient) GetMigrations ¶
func (*LogClient) InitCheckpointMetadataForCompactedSstRestore ¶
func (*LogClient) InitCheckpointMetadataForLogRestore ¶
func (rc *LogClient) InitCheckpointMetadataForLogRestore( ctx context.Context, startTS, restoredTS uint64, gcRatio string, tiflashRecorder *tiflashrec.TiFlashRecorder, ) (string, error)
func (*LogClient) InitClients ¶
func (*LogClient) InsertGCRows ¶
InsertGCRows insert the querys into table `gc_delete_range`
func (*LogClient) InstallLogFileManager ¶
func (*LogClient) IterMetaKVToBuildAndSaveIdMap ¶
func (rc *LogClient) IterMetaKVToBuildAndSaveIdMap( ctx context.Context, tableMappingManager *stream.TableMappingManager, files []*backuppb.DataFileInfo, ) error
IterMetaKVToBuildAndSaveIdMap iterates meta kv and builds id mapping and saves it to storage.
func (*LogClient) RecordDeleteRange ¶
func (rc *LogClient) RecordDeleteRange(sql *stream.PreDelRangeQuery)
func (*LogClient) RepairIngestIndex ¶
func (rc *LogClient) RepairIngestIndex(ctx context.Context, ingestRecorder *ingestrec.IngestRecorder, g glue.Glue) error
RepairIngestIndex drops the indexes from IngestRecorder and re-add them.
func (*LogClient) RestoreAndRewriteMetaKVFiles ¶
func (rc *LogClient) RestoreAndRewriteMetaKVFiles( ctx context.Context, files []*backuppb.DataFileInfo, schemasReplace *stream.SchemasReplace, updateStats func(kvCount uint64, size uint64), progressInc func(), ) error
RestoreAndRewriteMetaKVFiles tries to restore files about meta kv-event from stream-backup.
func (*LogClient) RestoreBatchMetaKVFiles ¶
func (rc *LogClient) RestoreBatchMetaKVFiles( ctx context.Context, files []*backuppb.DataFileInfo, schemasReplace *stream.SchemasReplace, kvEntries []*KvEntryWithTS, filterTS uint64, updateStats func(kvCount uint64, size uint64), progressInc func(), cf string, ) ([]*KvEntryWithTS, error)
func (*LogClient) RestoreCompactedSstFiles ¶
func (rc *LogClient) RestoreCompactedSstFiles( ctx context.Context, compactionsIter iter.TryNextor[*backuppb.LogFileSubcompaction], rules map[int64]*restoreutils.RewriteRules, importModeSwitcher *restore.ImportModeSwitcher, onProgress func(int64), ) error
func (*LogClient) RestoreKVFiles ¶
func (rc *LogClient) RestoreKVFiles( ctx context.Context, rules map[int64]*restoreutils.RewriteRules, logIter LogIter, pitrBatchCount uint32, pitrBatchSize uint32, updateStats func(kvCount uint64, size uint64), onProgress func(cnt int64), cipherInfo *backuppb.CipherInfo, masterKeys []*encryptionpb.MasterKey, ) error
func (*LogClient) RunGCRowsLoader ¶
use channel to save the delete-range query to make it thread-safety.
func (*LogClient) SetCrypter ¶
func (rc *LogClient) SetCrypter(crypter *backuppb.CipherInfo)
func (*LogClient) SetCurrentTS ¶
func (*LogClient) SetRawKVBatchClient ¶
func (*LogClient) SetStorage ¶
func (rc *LogClient) SetStorage(ctx context.Context, backend *backuppb.StorageBackend, opts *storage.ExternalStorageOptions) error
func (*LogClient) SetUpstreamClusterID ¶
func (*LogClient) UpdateSchemaVersion ¶
UpdateSchemaVersion updates schema version by transaction way.
func (*LogClient) WrapCompactedFilesIterWithSplitHelper ¶
func (rc *LogClient) WrapCompactedFilesIterWithSplitHelper( ctx context.Context, compactedIter iter.TryNextor[*backuppb.LogFileSubcompaction], rules map[int64]*restoreutils.RewriteRules, checkpointSets map[string]struct{}, updateStatsFn func(uint64, uint64), splitSize uint64, splitKeys int64, ) (iter.TryNextor[*backuppb.LogFileSubcompaction], error)
WrapCompactedFilesIteratorWithSplit applies a splitting strategy to the compacted files iterator. It uses a region splitter to handle the splitting logic based on the provided rules and checkpoint sets.
func (*LogClient) WrapLogFilesIterWithSplitHelper ¶
func (rc *LogClient) WrapLogFilesIterWithSplitHelper( ctx context.Context, logIter LogIter, execCtx sqlexec.RestrictedSQLExecutor, rules map[int64]*restoreutils.RewriteRules, updateStatsFn func(uint64, uint64), splitSize uint64, splitKeys int64, ) (LogIter, error)
WrapLogFilesIteratorWithSplit applies a splitting strategy to the log files iterator. It uses a region splitter to handle the splitting logic based on the provided rules.
type LogDataFileInfo ¶
type LogDataFileInfo struct { *backuppb.DataFileInfo MetaDataGroupName string OffsetInMetaGroup int OffsetInMergedGroup int }
type LogFileImporter ¶
type LogFileImporter struct {
// contains filtered or unexported fields
}
func NewLogFileImporter ¶
func NewLogFileImporter( metaClient split.SplitClient, importClient importclient.ImporterClient, backend *backuppb.StorageBackend, ) *LogFileImporter
NewFileImporter returns a new file importClient.
func (*LogFileImporter) ClearFiles ¶
func (*LogFileImporter) Close ¶
func (importer *LogFileImporter) Close() error
func (*LogFileImporter) ImportKVFiles ¶
func (importer *LogFileImporter) ImportKVFiles( ctx context.Context, files []*LogDataFileInfo, rule *restoreutils.RewriteRules, shiftStartTS uint64, startTS uint64, restoreTS uint64, supportBatch bool, cipherInfo *backuppb.CipherInfo, masterKeys []*encryptionpb.MasterKey, ) error
ImportKVFiles restores the kv events.
type LogFileManager ¶
type LogFileManager struct {
// contains filtered or unexported fields
}
LogFileManager is the manager for log files of a certain restoration, which supports read / filter from the log backup archive with static start TS / restore TS.
func CreateLogFileManager ¶
func CreateLogFileManager(ctx context.Context, init LogFileManagerInit) (*LogFileManager, error)
CreateLogFileManager creates a log file manager using the specified config. Generally the config cannot be changed during its lifetime.
func (*LogFileManager) BuildMigrations ¶
func (rc *LogFileManager) BuildMigrations(migs []*backuppb.Migration)
func (*LogFileManager) FilterDataFiles ¶
func (rc *LogFileManager) FilterDataFiles(m MetaNameIter) LogIter
func (*LogFileManager) FilterMetaFiles ¶
func (rc *LogFileManager) FilterMetaFiles(ms MetaNameIter) MetaGroupIter
func (*LogFileManager) GetCompactionIter ¶
func (rc *LogFileManager) GetCompactionIter(ctx context.Context) iter.TryNextor[*backuppb.LogFileSubcompaction]
Fetch compactions that may contain file less than the TS.
func (*LogFileManager) LoadDDLFilesAndCountDMLFiles ¶
func (rc *LogFileManager) LoadDDLFilesAndCountDMLFiles(ctx context.Context) ([]Log, error)
LoadDDLFilesAndCountDMLFiles loads all DDL files needs to be restored in the restoration. This function returns all DDL files needing directly because we need sort all of them.
func (*LogFileManager) LoadDMLFiles ¶
func (rc *LogFileManager) LoadDMLFiles(ctx context.Context) (LogIter, error)
LoadDMLFiles loads all DML files needs to be restored in the restoration. This function returns a stream, because there are usually many DML files need to be restored.
func (*LogFileManager) ReadAllEntries ¶
func (rc *LogFileManager) ReadAllEntries( ctx context.Context, file Log, filterTS uint64, ) ([]*KvEntryWithTS, []*KvEntryWithTS, error)
ReadAllEntries loads content of a log file, with filtering out no needed entries.
func (*LogFileManager) ShiftTS ¶
func (rc *LogFileManager) ShiftTS() uint64
func (*LogFileManager) ShouldFilterOut ¶
func (rc *LogFileManager) ShouldFilterOut(d *backuppb.DataFileInfo) bool
ShouldFilterOut checks whether a file should be filtered out via the current client.
type LogFileManagerInit ¶
type LogFileManagerInit struct { StartTS uint64 RestoreTS uint64 Storage storage.ExternalStorage MigrationsBuilder *WithMigrationsBuilder Migrations *WithMigrations MetadataDownloadBatchSize uint EncryptionManager *encryption.Manager }
LogFileManagerInit is the config needed for initializing the log file manager.
type LogFilesSkipMap ¶
type LogFilesSkipMap struct {
// contains filtered or unexported fields
}
func NewLogFilesSkipMap ¶
func NewLogFilesSkipMap() *LogFilesSkipMap
func (*LogFilesSkipMap) Insert ¶
func (m *LogFilesSkipMap) Insert(metaKey string, groupOff, fileOff int)
type LogFilesSkipMapExt ¶
type LogFilesSkipMapExt struct {
// contains filtered or unexported fields
}
func NewLogFilesSkipMapExt ¶
func NewLogFilesSkipMapExt() *LogFilesSkipMapExt
func (*LogFilesSkipMapExt) Insert ¶
func (m *LogFilesSkipMapExt) Insert(metaKey string, groupOff, fileOff int)
func (*LogFilesSkipMapExt) NeedSkip ¶
func (m *LogFilesSkipMapExt) NeedSkip(metaKey string, groupOff, fileOff int) bool
func (*LogFilesSkipMapExt) SkipGroup ¶
func (m *LogFilesSkipMapExt) SkipGroup(metaKey string, groupOff int)
func (*LogFilesSkipMapExt) SkipMeta ¶
func (m *LogFilesSkipMapExt) SkipMeta(metaKey string)
type LogIter ¶
type LogIter = iter.TryNextor[*LogDataFileInfo]
LogIter is the type of iterator of each log files' meta information.
func WrapLogFilesIterWithCheckpointFailpoint ¶
func WrapLogFilesIterWithCheckpointFailpoint( v failpoint.Value, logIter LogIter, rules map[int64]*restoreutils.RewriteRules, ) (LogIter, error)
type LogRestoreManager ¶
type LogRestoreManager struct {
// contains filtered or unexported fields
}
LogRestoreManager is a comprehensive wrapper that encapsulates all logic related to log restoration, including concurrency management, checkpoint handling, and file importing for efficient log processing.
func NewLogRestoreManager ¶
func NewLogRestoreManager( ctx context.Context, fileImporter *LogFileImporter, poolSize uint, createCheckpointSessionFn func() (glue.Session, error), ) (*LogRestoreManager, error)
func (*LogRestoreManager) Close ¶
func (l *LogRestoreManager) Close(ctx context.Context)
type LogSplitStrategy ¶
type LogSplitStrategy struct { *split.BaseSplitStrategy // contains filtered or unexported fields }
func NewLogSplitStrategy ¶
func NewLogSplitStrategy( ctx context.Context, useCheckpoint bool, execCtx sqlexec.RestrictedSQLExecutor, rules map[int64]*restoreutils.RewriteRules, updateStatsFn func(uint64, uint64), ) (*LogSplitStrategy, error)
func (*LogSplitStrategy) Accumulate ¶
func (ls *LogSplitStrategy) Accumulate(file *LogDataFileInfo)
func (*LogSplitStrategy) ShouldSkip ¶
func (ls *LogSplitStrategy) ShouldSkip(file *LogDataFileInfo) bool
func (*LogSplitStrategy) ShouldSplit ¶
func (ls *LogSplitStrategy) ShouldSplit() bool
type MetaGroupIter ¶
type MetaGroupIter = iter.TryNextor[DDLMetaGroup]
MetaGroupIter is the iterator of flushes of metadata.
type MetaMigrationsIter ¶
type MetaMigrationsIter = iter.TryNextor[*MetaWithMigrations]
type MetaNameIter ¶
MetaNameIter is the type of iterator of metadata files' content with name.
type MetaWithMigrations ¶
type MetaWithMigrations struct {
// contains filtered or unexported fields
}
func (*MetaWithMigrations) Physicals ¶
func (mwm *MetaWithMigrations) Physicals(groupIndexIter GroupIndexIter) PhysicalMigrationsIter
type PhysicalMigrationsIter ¶
type PhysicalMigrationsIter = iter.TryNextor[*PhysicalWithMigrations]
type PhysicalWithMigrations ¶
type PhysicalWithMigrations struct {
// contains filtered or unexported fields
}
func (*PhysicalWithMigrations) Logicals ¶
func (pwm *PhysicalWithMigrations) Logicals(fileIndexIter FileIndexIter) FileIndexIter
type RPCResult ¶
RPCResult is the result after executing some RPCs to TiKV.
func RPCResultFromError ¶
func RPCResultFromPBError ¶
func RPCResultFromPBError(err *import_sstpb.Error) RPCResult
func RPCResultOK ¶
func RPCResultOK() RPCResult
func (*RPCResult) StrategyForRetry ¶
func (r *RPCResult) StrategyForRetry() RetryStrategy
func (*RPCResult) StrategyForRetryGoError ¶
func (r *RPCResult) StrategyForRetryGoError() RetryStrategy
func (*RPCResult) StrategyForRetryStoreError ¶
func (r *RPCResult) StrategyForRetryStoreError() RetryStrategy
type RangeController ¶
type RangeController struct {
// contains filtered or unexported fields
}
RangeController manages the execution of operations over a range of regions. It provides functionality to scan regions within a specified key range and apply a given function to each region, handling errors and retries automatically.
func CreateRangeController ¶
func CreateRangeController(start, end []byte, metaClient split.SplitClient, retryStatus *utils.RetryState) RangeController
CreateRangeController creates a controller that cloud be used to scan regions in a range and apply a function over these regions. You can then call the `Run` method for applying some functions.
func (*RangeController) ApplyFuncToRange ¶
func (o *RangeController) ApplyFuncToRange(ctx context.Context, f RegionFunc) error
ApplyFuncToRange apples the `regionFunc` for all regions in `o.start` and `o.end`. It would retry errors according to the `rpcResponse`.
type RegionFunc ¶
type RegionFunc func(ctx context.Context, r *split.RegionInfo) RPCResult
type RetryStrategy ¶
type RetryStrategy int
const ( StrategyGiveUp RetryStrategy = iota StrategyFromThisRegion StrategyFromStart )
type SstRestoreManager ¶
type SstRestoreManager struct {
// contains filtered or unexported fields
}
SstRestoreManager is a comprehensive wrapper that encapsulates all logic related to sst restoration, including concurrency management, checkpoint handling, and file importing(splitting) for efficient log processing.
func NewSstRestoreManager ¶
func NewSstRestoreManager( ctx context.Context, snapFileImporter *snapclient.SnapFileImporter, concurrencyPerStore uint, storeCount uint, createCheckpointSessionFn func() (glue.Session, error), ) (*SstRestoreManager, error)
func (*SstRestoreManager) Close ¶
func (s *SstRestoreManager) Close(ctx context.Context)
type SubCompactionIter ¶
type SubCompactionIter iter.TryNextor[*backuppb.LogFileSubcompaction]
func Subcompactions ¶
func Subcompactions(ctx context.Context, prefix string, s storage.ExternalStorage) SubCompactionIter
type WithMigrations ¶
type WithMigrations struct {
// contains filtered or unexported fields
}
func (*WithMigrations) Compactions ¶
func (wm *WithMigrations) Compactions(ctx context.Context, s storage.ExternalStorage) iter.TryNextor[*backuppb.LogFileSubcompaction]
func (*WithMigrations) Metas ¶
func (wm *WithMigrations) Metas(metaNameIter MetaNameIter) MetaMigrationsIter
type WithMigrationsBuilder ¶
type WithMigrationsBuilder struct {
// contains filtered or unexported fields
}
func (*WithMigrationsBuilder) Build ¶
func (builder *WithMigrationsBuilder) Build(migs []*backuppb.Migration) WithMigrations
Create the wrapper by migrations.
func (*WithMigrationsBuilder) SetShiftStartTS ¶
func (builder *WithMigrationsBuilder) SetShiftStartTS(ts uint64)