Documentation ¶
Index ¶
- Constants
- func AttachFilesToRanges(files []*backup.File, ranges []rtree.Range) []rtree.Range
- func EstimateRangeSize(files []*backup.File) int
- func Exhaust(ec <-chan error) []error
- func FilterDDLJobs(allDDLJobs []*model.Job, tables []*utils.Table) (ddlJobs []*model.Job)
- func GetSSTMetaFromFile(id []byte, file *backup.File, region *metapb.Region, ...) import_sstpb.SSTMeta
- func GetSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*RegionInfo) map[uint64][][]byte
- func GoValidateFileRanges(ctx context.Context, tableStream <-chan CreatedTable, ...) <-chan TableWithRange
- func MapTableToFiles(files []*backup.File) map[int64][]*backup.File
- func NewBackoffer(attempt int, delayTime, maxDelayTime time.Duration) utils.Backoffer
- func ParseQuoteName(name string) (string, string)
- func SortRanges(ranges []rtree.Range, rewriteRules *RewriteRules) ([]rtree.Range, error)
- func SplitRanges(ctx context.Context, client *Client, ranges []rtree.Range, ...) error
- func ValidateFileRanges(files []*backup.File, rewriteRules *RewriteRules) ([]rtree.Range, error)
- func ValidateFileRewriteRule(file *backup.File, rewriteRules *RewriteRules) error
- func ZapTables(tables []CreatedTable) zapcore.Field
- type BatchSender
- type Batcher
- func (b *Batcher) Add(tbs TableWithRange)
- func (b *Batcher) Close()
- func (b *Batcher) DisableAutoCommit()
- func (b *Batcher) EnableAutoCommit(ctx context.Context, delay time.Duration)
- func (b *Batcher) Len() int
- func (b *Batcher) Send(ctx context.Context)
- func (b *Batcher) SetThreshold(newThreshold int)
- type Client
- func (rc *Client) Close()
- func (rc *Client) CreateDatabase(ctx context.Context, db *model.DBInfo) error
- func (rc *Client) CreateTables(dom *domain.Domain, tables []*utils.Table, newTS uint64) (*RewriteRules, []*model.TableInfo, error)
- func (rc *Client) EnableOnline()
- func (rc *Client) EnableSkipCreateSQL()
- func (rc *Client) ExecDDLs(ctx context.Context, ddlJobs []*model.Job) error
- func (rc *Client) GetDDLJobs() []*model.Job
- func (rc *Client) GetDatabase(name string) *utils.Database
- func (rc *Client) GetDatabases() []*utils.Database
- func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) ([]*backup.File, error)
- func (rc *Client) GetFilesInTxnRange() ([]*backup.File, error)
- func (rc *Client) GetPDClient() pd.Client
- func (rc *Client) GetPlacementRules(ctx context.Context, pdAddrs []string) ([]placement.Rule, error)
- func (rc *Client) GetTLSConfig() *tls.Config
- func (rc *Client) GetTS(ctx context.Context) (uint64, error)
- func (rc *Client) GetTableSchema(dom *domain.Domain, dbName model.CIStr, tableName model.CIStr) (*model.TableInfo, error)
- func (rc *Client) GoCreateTables(ctx context.Context, dom *domain.Domain, tables []*utils.Table, newTS uint64, ...) <-chan CreatedTable
- func (rc *Client) GoValidateChecksum(ctx context.Context, tableStream <-chan CreatedTable, kvClient kv.Client, ...) <-chan struct{}
- func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup.StorageBackend) error
- func (rc *Client) IsIncremental() bool
- func (rc *Client) IsOnline() bool
- func (rc *Client) IsRawKvMode() bool
- func (rc *Client) IsSkipCreateSQL() bool
- func (rc *Client) LoadRestoreStores(ctx context.Context) error
- func (rc *Client) ResetPlacementRules(ctx context.Context, tables []*model.TableInfo) error
- func (rc *Client) ResetRestoreLabels(ctx context.Context) error
- func (rc *Client) ResetTS(ctx context.Context, pdAddrs []string) error
- func (rc *Client) RestoreFiles(ctx context.Context, files []*backup.File, rewriteRules *RewriteRules, ...) (err error)
- func (rc *Client) RestoreRaw(ctx context.Context, startKey []byte, endKey []byte, files []*backup.File, ...) error
- func (rc *Client) RestoreTxn(ctx context.Context, files []*backup.File, updateCh glue.Progress) error
- func (rc *Client) SetConcurrency(c uint)
- func (rc *Client) SetRateLimit(rateLimit uint64)
- func (rc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error
- func (rc *Client) SetSwitchModeInterval(interval time.Duration)
- func (rc *Client) SetupPlacementRules(ctx context.Context, tables []*model.TableInfo) error
- func (rc *Client) SwitchToImportMode(ctx context.Context)
- func (rc *Client) SwitchToNormalMode(ctx context.Context) error
- func (rc *Client) WaitPlacementSchedule(ctx context.Context, tables []*model.TableInfo) error
- type ContextManager
- type CreatedTable
- type DB
- type DrainResult
- type FileImporter
- type ImporterClient
- type LogClient
- func (l *LogClient) Ingest(ctx context.Context, meta *sst.SSTMeta, region *RegionInfo) (*sst.IngestResponse, error)
- func (l *LogClient) NeedRestoreDDL(fileName string) (bool, error)
- func (l *LogClient) NeedRestoreRowChange(fileName string) (bool, error)
- func (l *LogClient) ResetTSRange(startTS uint64, endTS uint64)
- func (l *LogClient) RestoreLogData(ctx context.Context, dom *domain.Domain) error
- type LogMeta
- type OnSplitFunc
- type RegionInfo
- type RegionSplitter
- type RewriteRules
- type SendType
- type SplitClient
- type TableSink
- type TableWithRange
Constants ¶
const ( SplitRetryTimes = 32 SplitRetryInterval = 50 * time.Millisecond SplitMaxRetryInterval = time.Second SplitCheckMaxRetryTimes = 64 SplitCheckInterval = 8 * time.Millisecond SplitMaxCheckInterval = time.Second ScatterWaitMaxRetryTimes = 64 ScatterWaitInterval = 50 * time.Millisecond ScatterMaxWaitInterval = time.Second ScatterWaitUpperInterval = 180 * time.Second RejectStoreCheckRetryTimes = 64 RejectStoreCheckInterval = 100 * time.Millisecond RejectStoreMaxCheckInterval = 2 * time.Second )
Constants for split retry machinery.
Variables ¶
This section is empty.
Functions ¶
func AttachFilesToRanges ¶
AttachFilesToRanges attach files to ranges. Panic if range is overlapped or no range for files. nolint:staticcheck
func EstimateRangeSize ¶
EstimateRangeSize estimates the total range count by file.
func FilterDDLJobs ¶
FilterDDLJobs filters ddl jobs.
func GetSSTMetaFromFile ¶
func GetSSTMetaFromFile( id []byte, file *backup.File, region *metapb.Region, regionRule *import_sstpb.RewriteRule, ) import_sstpb.SSTMeta
GetSSTMetaFromFile compares the keys in file, region and rewrite rules, then returns a sst conn. The range of the returned sst meta is [regionRule.NewKeyPrefix, append(regionRule.NewKeyPrefix, 0xff)].
func GetSplitKeys ¶
func GetSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*RegionInfo) map[uint64][][]byte
GetSplitKeys checks if the regions should be split by the new prefix of the rewrites rule and the end key of the ranges, groups the split keys by region id.
func GoValidateFileRanges ¶
func GoValidateFileRanges( ctx context.Context, tableStream <-chan CreatedTable, fileOfTable map[int64][]*backup.File, errCh chan<- error, ) <-chan TableWithRange
GoValidateFileRanges validate files by a stream of tables and yields tables with range.
func MapTableToFiles ¶
MapTableToFiles makes a map that mapping table ID to its backup files. aware that one file can and only can hold one table.
func NewBackoffer ¶
NewBackoffer creates a new controller regulating a truncated exponential backoff.
func ParseQuoteName ¶
ParseQuoteName parse the quote `db`.`table` name, and split it.
func SortRanges ¶
SortRanges checks if the range overlapped and sort them.
func SplitRanges ¶
func SplitRanges( ctx context.Context, client *Client, ranges []rtree.Range, rewriteRules *RewriteRules, updateCh glue.Progress, ) error
SplitRanges splits region by 1. data range after rewrite 2. rewrite rules.
func ValidateFileRanges ¶
ValidateFileRanges checks and returns the ranges of the files.
func ValidateFileRewriteRule ¶
func ValidateFileRewriteRule(file *backup.File, rewriteRules *RewriteRules) error
ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file.
func ZapTables ¶
func ZapTables(tables []CreatedTable) zapcore.Field
ZapTables make zap field of table for debuging, including table names.
Types ¶
type BatchSender ¶
type BatchSender interface { // PutSink sets the sink of this sender, user to this interface promise // call this function at least once before first call to `RestoreBatch`. PutSink(sink TableSink) // RestoreBatch will send the restore request. RestoreBatch(ranges DrainResult) Close() }
BatchSender is the abstract of how the batcher send a batch.
func NewTiKVSender ¶
func NewTiKVSender( ctx context.Context, cli *Client, updateCh glue.Progress, ) (BatchSender, error)
NewTiKVSender make a sender that send restore requests to TiKV.
type Batcher ¶
type Batcher struct {
// contains filtered or unexported fields
}
Batcher collects ranges to restore and send batching split/ingest request.
func NewBatcher ¶
func NewBatcher( ctx context.Context, sender BatchSender, manager ContextManager, errCh chan<- error, ) (*Batcher, <-chan CreatedTable)
NewBatcher creates a new batcher by a sender and a context manager. the former defines how the 'restore' a batch(i.e. send, or 'push down' the task to where). the context manager defines the 'lifetime' of restoring tables(i.e. how to enter 'restore' mode, and how to exit). this batcher will work background, send batches per second, or batch size reaches limit. and it will emit full-restored tables to the output channel returned.
func (*Batcher) Close ¶
func (b *Batcher) Close()
Close closes the batcher, sending all pending requests, close updateCh.
func (*Batcher) DisableAutoCommit ¶
func (b *Batcher) DisableAutoCommit()
DisableAutoCommit blocks the current goroutine until the worker can gracefully stop, and then disable auto commit.
func (*Batcher) EnableAutoCommit ¶
EnableAutoCommit enables the batcher commit batch periodically even batcher size isn't big enough. we make this function for disable AutoCommit in some case.
func (*Batcher) Send ¶
Send sends all pending requests in the batcher. returns tables sent FULLY in the current batch.
func (*Batcher) SetThreshold ¶
SetThreshold sets the threshold that how big the batch size reaching need to send batch. note this function isn't goroutine safe yet, just set threshold before anything starts(e.g. EnableAutoCommit), please.
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
Client sends requests to restore files.
func NewRestoreClient ¶
func NewRestoreClient( g glue.Glue, pdClient pd.Client, store kv.Storage, tlsConf *tls.Config, keepaliveConf keepalive.ClientParameters, ) (*Client, error)
NewRestoreClient returns a new RestoreClient.
func (*Client) CreateDatabase ¶
CreateDatabase creates a database.
func (*Client) CreateTables ¶
func (rc *Client) CreateTables( dom *domain.Domain, tables []*utils.Table, newTS uint64, ) (*RewriteRules, []*model.TableInfo, error)
CreateTables creates multiple tables, and returns their rewrite rules.
func (*Client) EnableOnline ¶
func (rc *Client) EnableOnline()
EnableOnline sets the mode of restore to online.
func (*Client) EnableSkipCreateSQL ¶
func (rc *Client) EnableSkipCreateSQL()
EnableSkipCreateSQL sets switch of skip create schema and tables.
func (*Client) GetDDLJobs ¶
GetDDLJobs returns ddl jobs.
func (*Client) GetDatabase ¶
GetDatabase returns a database by name.
func (*Client) GetDatabases ¶
GetDatabases returns all databases.
func (*Client) GetFilesInRawRange ¶
func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) ([]*backup.File, error)
GetFilesInRawRange gets all files that are in the given range or intersects with the given range.
func (*Client) GetFilesInTxnRange ¶
GetFilesInTxnRange gets all files that are in the given range or intersects with the given range.
func (*Client) GetPDClient ¶
GetPDClient returns a pd client.
func (*Client) GetPlacementRules ¶
func (rc *Client) GetPlacementRules(ctx context.Context, pdAddrs []string) ([]placement.Rule, error)
GetPlacementRules return the current placement rules.
func (*Client) GetTLSConfig ¶
GetTLSConfig returns the tls config.
func (*Client) GetTableSchema ¶
func (rc *Client) GetTableSchema( dom *domain.Domain, dbName model.CIStr, tableName model.CIStr, ) (*model.TableInfo, error)
GetTableSchema returns the schema of a table from TiDB.
func (*Client) GoCreateTables ¶
func (rc *Client) GoCreateTables( ctx context.Context, dom *domain.Domain, tables []*utils.Table, newTS uint64, dbPool []*DB, errCh chan<- error, ) <-chan CreatedTable
GoCreateTables create tables, and generate their information. this function will use workers as the same number of sessionPool, leave sessionPool nil to send DDLs sequential.
func (*Client) GoValidateChecksum ¶
func (rc *Client) GoValidateChecksum( ctx context.Context, tableStream <-chan CreatedTable, kvClient kv.Client, errCh chan<- error, updateCh glue.Progress, concurrency uint, ) <-chan struct{}
GoValidateChecksum forks a goroutine to validate checksum after restore. it returns a channel fires a struct{} when all things get done.
func (*Client) InitBackupMeta ¶
func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup.StorageBackend) error
InitBackupMeta loads schemas from BackupMeta to initialize RestoreClient.
func (*Client) IsIncremental ¶
IsIncremental returns whether this backup is incremental.
func (*Client) IsRawKvMode ¶
IsRawKvMode checks whether the backup data is in raw kv format, in which case transactional recover is forbidden.
func (*Client) IsSkipCreateSQL ¶
IsSkipCreateSQL returns whether we need skip create schema and tables in restore.
func (*Client) LoadRestoreStores ¶
LoadRestoreStores loads the stores used to restore data.
func (*Client) ResetPlacementRules ¶
ResetPlacementRules removes placement rules for tables.
func (*Client) ResetRestoreLabels ¶
ResetRestoreLabels removes the exclusive labels of the restore stores.
func (*Client) RestoreFiles ¶
func (rc *Client) RestoreFiles( ctx context.Context, files []*backup.File, rewriteRules *RewriteRules, updateCh glue.Progress, ) (err error)
RestoreFiles tries to restore the files.
func (*Client) RestoreRaw ¶
func (rc *Client) RestoreRaw( ctx context.Context, startKey []byte, endKey []byte, files []*backup.File, updateCh glue.Progress, ) error
RestoreRaw tries to restore raw keys in the specified range.
func (*Client) RestoreTxn ¶
func (rc *Client) RestoreTxn( ctx context.Context, files []*backup.File, updateCh glue.Progress, ) error
RestoreTxn tries to restore txn keys in the specified range.
func (*Client) SetConcurrency ¶
SetConcurrency sets the concurrency of dbs tables files.
func (*Client) SetRateLimit ¶
SetRateLimit to set rateLimit.
func (*Client) SetStorage ¶
func (rc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error
SetStorage set ExternalStorage for client.
func (*Client) SetSwitchModeInterval ¶
SetSwitchModeInterval set switch mode interval for client.
func (*Client) SetupPlacementRules ¶
SetupPlacementRules sets rules for the tables' regions.
func (*Client) SwitchToImportMode ¶
SwitchToImportMode switch tikv cluster to import mode.
func (*Client) SwitchToNormalMode ¶
SwitchToNormalMode switch tikv cluster to normal mode.
type ContextManager ¶
type ContextManager interface { // Enter make some tables 'enter' this context(a.k.a., prepare for restore). Enter(ctx context.Context, tables []CreatedTable) error // Leave make some tables 'leave' this context(a.k.a., restore is done, do some post-works). Leave(ctx context.Context, tables []CreatedTable) error // Close closes the context manager, sometimes when the manager is 'killed' and should do some cleanup // it would be call. Close(ctx context.Context) }
ContextManager is the struct to manage a TiKV 'context' for restore. Batcher will call Enter when any table should be restore on batch, so you can do some prepare work here(e.g. set placement rules for online restore).
func NewBRContextManager ¶
func NewBRContextManager(client *Client) ContextManager
NewBRContextManager makes a BR context manager, that is, set placement rules for online restore when enter(see <splitPrepareWork>), unset them when leave.
type CreatedTable ¶
type CreatedTable struct { RewriteRule *RewriteRules Table *model.TableInfo OldTable *utils.Table }
CreatedTable is a table created on restore process, but not yet filled with data.
type DB ¶
type DB struct {
// contains filtered or unexported fields
}
DB is a TiDB instance, not thread-safe.
func MakeDBPool ¶
MakeDBPool makes a session pool with specficated size by sessionFactory.
func (*DB) CreateDatabase ¶
CreateDatabase executes a CREATE DATABASE SQL.
func (*DB) CreateTable ¶
CreateTable executes a CREATE TABLE SQL.
type DrainResult ¶
type DrainResult struct { // TablesToSend are tables that would be send at this batch. TablesToSend []CreatedTable // BlankTablesAfterSend are tables that will be full-restored after this batch send. BlankTablesAfterSend []CreatedTable RewriteRules *RewriteRules Ranges []rtree.Range }
DrainResult is the collection of some ranges and theirs metadata.
func (DrainResult) Files ¶
func (result DrainResult) Files() []*backup.File
Files returns all files of this drain result.
type FileImporter ¶
type FileImporter struct {
// contains filtered or unexported fields
}
FileImporter used to import a file to TiKV.
func NewFileImporter ¶
func NewFileImporter( metaClient SplitClient, importClient ImporterClient, backend *backup.StorageBackend, isRawKvMode bool, rateLimit uint64, ) FileImporter
NewFileImporter returns a new file importClient.
func (*FileImporter) Import ¶
func (importer *FileImporter) Import( ctx context.Context, file *backup.File, rewriteRules *RewriteRules, ) error
Import tries to import a file. All rules must contain encoded keys.
func (*FileImporter) SetRawRange ¶
func (importer *FileImporter) SetRawRange(startKey, endKey []byte) error
SetRawRange sets the range to be restored in raw kv mode.
type ImporterClient ¶
type ImporterClient interface { DownloadSST( ctx context.Context, storeID uint64, req *import_sstpb.DownloadRequest, ) (*import_sstpb.DownloadResponse, error) IngestSST( ctx context.Context, storeID uint64, req *import_sstpb.IngestRequest, ) (*import_sstpb.IngestResponse, error) SetDownloadSpeedLimit( ctx context.Context, storeID uint64, req *import_sstpb.SetDownloadSpeedLimitRequest, ) (*import_sstpb.SetDownloadSpeedLimitResponse, error) GetImportClient( ctx context.Context, storeID uint64, ) (import_sstpb.ImportSSTClient, error) }
ImporterClient is used to import a file to TiKV.
func NewImportClient ¶
func NewImportClient(metaClient SplitClient, tlsConf *tls.Config, keepaliveConf keepalive.ClientParameters) ImporterClient
NewImportClient returns a new ImporterClient.
type LogClient ¶
type LogClient struct {
// contains filtered or unexported fields
}
LogClient sends requests to restore files.
func NewLogRestoreClient ¶
func NewLogRestoreClient( ctx context.Context, restoreClient *Client, startTS uint64, endTS uint64, tableFilter filter.Filter, concurrency uint, batchFlushPairs int, batchFlushSize int64, batchWriteKVPairs int, ) (*LogClient, error)
NewLogRestoreClient returns a new LogRestoreClient.
func (*LogClient) Ingest ¶
func (l *LogClient) Ingest(ctx context.Context, meta *sst.SSTMeta, region *RegionInfo) (*sst.IngestResponse, error)
Ingest ingests sst to TiKV.
func (*LogClient) NeedRestoreDDL ¶
NeedRestoreDDL determines whether to collect ddl file by ts range.
func (*LogClient) NeedRestoreRowChange ¶
NeedRestoreRowChange determine whether to collect this file by ts range.
func (*LogClient) ResetTSRange ¶
ResetTSRange used for test.
type LogMeta ¶
type LogMeta struct { Names map[int64]string `json:"names"` GlobalResolvedTS uint64 `json:"global_resolved_ts"` }
LogMeta represents the log.meta generated by cdc log backup.
type RegionInfo ¶
RegionInfo includes a region and the leader of the region.
func NeedSplit ¶
func NeedSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo
NeedSplit checks whether a key is necessary to split, if true returns the split region.
func PaginateScanRegion ¶
func PaginateScanRegion( ctx context.Context, client SplitClient, startKey, endKey []byte, limit int, ) ([]*RegionInfo, error)
PaginateScanRegion scan regions with a limit pagination and return all regions at once. It reduces max gRPC message size.
func (*RegionInfo) ContainsInterior ¶
func (region *RegionInfo) ContainsInterior(key []byte) bool
ContainsInterior returns whether the region contains the given key, and also that the key does not fall on the boundary (start key) of the region.
type RegionSplitter ¶
type RegionSplitter struct {
// contains filtered or unexported fields
}
RegionSplitter is a executor of region split by rules.
func NewRegionSplitter ¶
func NewRegionSplitter(client SplitClient) *RegionSplitter
NewRegionSplitter returns a new RegionSplitter.
func (*RegionSplitter) Split ¶
func (rs *RegionSplitter) Split( ctx context.Context, ranges []rtree.Range, rewriteRules *RewriteRules, onSplit OnSplitFunc, ) error
Split executes a region split. It will split regions by the rewrite rules, then it will split regions by the end key of each range. tableRules includes the prefix of a table, since some ranges may have a prefix with record sequence or index sequence. note: all ranges and rewrite rules must have raw key.
type RewriteRules ¶
type RewriteRules struct { Table []*import_sstpb.RewriteRule Data []*import_sstpb.RewriteRule }
RewriteRules contains rules for rewriting keys of tables.
func EmptyRewriteRule ¶
func EmptyRewriteRule() *RewriteRules
EmptyRewriteRule make a new, empty rewrite rule.
func GetRewriteRules ¶
func GetRewriteRules( newTable *model.TableInfo, oldTable *model.TableInfo, newTimeStamp uint64, ) *RewriteRules
GetRewriteRules returns the rewrite rule of the new table and the old table.
func (*RewriteRules) Append ¶
func (r *RewriteRules) Append(other RewriteRules)
Append append its argument to this rewrite rules.
type SendType ¶
type SendType int
SendType is the 'type' of a send. when we make a 'send' command to worker, we may want to flush all pending ranges (when auto commit enabled), or, we just want to clean overflowing ranges(when just adding a table to batcher).
const ( // SendUntilLessThanBatch will make the batcher send batch until // its remaining range is less than its batchSizeThreshold. SendUntilLessThanBatch SendType = iota // SendAll will make the batcher send all pending ranges. SendAll // SendAllThenClose will make the batcher send all pending ranges and then close itself. SendAllThenClose )
type SplitClient ¶
type SplitClient interface { // GetStore gets a store by a store id. GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) // GetRegion gets a region which includes a specified key. GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) // GetRegionByID gets a region by a region id. GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) // SplitRegion splits a region from a key, if key is not included in the region, it will return nil. // note: the key should not be encoded SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) // BatchSplitRegions splits a region from a batch of keys. // note: the keys should not be encoded BatchSplitRegions(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) ([]*RegionInfo, error) // BatchSplitRegionsWithOrigin splits a region from a batch of keys and return the original region and split new regions BatchSplitRegionsWithOrigin(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) (*RegionInfo, []*RegionInfo, error) // ScatterRegion scatters a specified region. ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error // GetOperator gets the status of operator of the specified region. GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) // ScanRegion gets a list of regions, starts from the region that contains key. // Limit limits the maximum number of regions returned. ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) // GetPlacementRule loads a placement rule from PD. GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) // SetPlacementRule insert or update a placement rule to PD. SetPlacementRule(ctx context.Context, rule placement.Rule) error // DeletePlacementRule removes a placement rule from PD. DeletePlacementRule(ctx context.Context, groupID, ruleID string) error // SetStoreLabel add or update specified label of stores. If labelValue // is empty, it clears the label. SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error }
SplitClient is an external client used by RegionSplitter.
func NewSplitClient ¶
func NewSplitClient(client pd.Client, tlsConf *tls.Config) SplitClient
NewSplitClient returns a client used by RegionSplitter.
type TableSink ¶
type TableSink interface { EmitTables(tables ...CreatedTable) EmitError(error) Close() }
TableSink is the 'sink' of restored data by a sender.
type TableWithRange ¶
type TableWithRange struct { CreatedTable Range []rtree.Range }
TableWithRange is a CreatedTable that has been bind to some of key ranges.