restore

package
v5.1.5+incompatible Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 14, 2022 License: Apache-2.0 Imports: 70 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// DefaultMergeRegionSizeBytes is the default region split size, 96MB.
	// See https://github.com/tikv/tikv/blob/v4.0.8/components/raftstore/src/coprocessor/config.rs#L35-L38
	DefaultMergeRegionSizeBytes uint64 = 96 * units.MiB

	// DefaultMergeRegionKeyCount is the default region key count, 960000.
	DefaultMergeRegionKeyCount uint64 = 960000
)
View Source
const (
	SplitRetryTimes       = 32
	SplitRetryInterval    = 50 * time.Millisecond
	SplitMaxRetryInterval = time.Second

	SplitCheckMaxRetryTimes = 64
	SplitCheckInterval      = 8 * time.Millisecond
	SplitMaxCheckInterval   = time.Second

	ScatterWaitMaxRetryTimes = 64
	ScatterWaitInterval      = 50 * time.Millisecond
	ScatterMaxWaitInterval   = time.Second
	ScatterWaitUpperInterval = 180 * time.Second

	ScanRegionPaginationLimit = 128

	RejectStoreCheckRetryTimes  = 64
	RejectStoreCheckInterval    = 100 * time.Millisecond
	RejectStoreMaxCheckInterval = 2 * time.Second
)

Constants for split retry machinery.

Variables

This section is empty.

Functions

func DDLJobBlockListRule

func DDLJobBlockListRule(ddlJob *model.Job) bool

DDLJobBlockListRule rule for filter ddl job with type in block list.

func EstimateRangeSize

func EstimateRangeSize(files []*backuppb.File) int

EstimateRangeSize estimates the total range count by file.

func Exhaust

func Exhaust(ec <-chan error) []error

Exhaust drains all remaining errors in the channel, into a slice of errors.

func FilterDDLJobByRules

func FilterDDLJobByRules(srcDDLJobs []*model.Job, rules ...DDLJobFilterRule) (dstDDLJobs []*model.Job)

FilterDDLJobByRules if one of rules returns true, the job in srcDDLJobs will be filtered.

func FilterDDLJobs

func FilterDDLJobs(allDDLJobs []*model.Job, tables []*metautil.Table) (ddlJobs []*model.Job)

FilterDDLJobs filters ddl jobs.

func GetSSTMetaFromFile

func GetSSTMetaFromFile(
	id []byte,
	file *backuppb.File,
	region *metapb.Region,
	regionRule *import_sstpb.RewriteRule,
) import_sstpb.SSTMeta

GetSSTMetaFromFile compares the keys in file, region and rewrite rules, then returns a sst conn. The range of the returned sst meta is [regionRule.NewKeyPrefix, append(regionRule.NewKeyPrefix, 0xff)].

func GoValidateFileRanges

func GoValidateFileRanges(
	ctx context.Context,
	tableStream <-chan CreatedTable,
	fileOfTable map[int64][]*backuppb.File,
	splitSizeBytes, splitKeyCount uint64,
	errCh chan<- error,
) <-chan TableWithRange

GoValidateFileRanges validate files by a stream of tables and yields tables with range.

func MapTableToFiles

func MapTableToFiles(files []*backuppb.File) map[int64][]*backuppb.File

MapTableToFiles makes a map that mapping table ID to its backup files. aware that one file can and only can hold one table.

func ParseQuoteName

func ParseQuoteName(name string) (db, table string)

ParseQuoteName parse the quote `db`.`table` name, and split it.

func SortRanges

func SortRanges(ranges []rtree.Range, rewriteRules *RewriteRules) ([]rtree.Range, error)

SortRanges checks if the range overlapped and sort them.

func SplitRanges

func SplitRanges(
	ctx context.Context,
	client *Client,
	ranges []rtree.Range,
	rewriteRules *RewriteRules,
	updateCh glue.Progress,
) error

SplitRanges splits region by 1. data range after rewrite. 2. rewrite rules.

func ValidateFileRewriteRule

func ValidateFileRewriteRule(file *backuppb.File, rewriteRules *RewriteRules) error

ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file.

func ZapTables

func ZapTables(tables []CreatedTable) zapcore.Field

ZapTables make zap field of table for debuging, including table names.

Types

type BatchSender

type BatchSender interface {
	// PutSink sets the sink of this sender, user to this interface promise
	// call this function at least once before first call to `RestoreBatch`.
	PutSink(sink TableSink)
	// RestoreBatch will send the restore request.
	RestoreBatch(ranges DrainResult)
	Close()
}

BatchSender is the abstract of how the batcher send a batch.

func NewTiKVSender

func NewTiKVSender(
	ctx context.Context,
	cli *Client,
	updateCh glue.Progress,
) (BatchSender, error)

NewTiKVSender make a sender that send restore requests to TiKV.

type Batcher

type Batcher struct {
	// contains filtered or unexported fields
}

Batcher collects ranges to restore and send batching split/ingest request.

func NewBatcher

func NewBatcher(
	ctx context.Context,
	sender BatchSender,
	manager ContextManager,
	errCh chan<- error,
) (*Batcher, <-chan CreatedTable)

NewBatcher creates a new batcher by a sender and a context manager. the former defines how the 'restore' a batch(i.e. send, or 'push down' the task to where). the context manager defines the 'lifetime' of restoring tables(i.e. how to enter 'restore' mode, and how to exit). this batcher will work background, send batches per second, or batch size reaches limit. and it will emit full-restored tables to the output channel returned.

func (*Batcher) Add

func (b *Batcher) Add(tbs TableWithRange)

Add adds a task to the Batcher.

func (*Batcher) Close

func (b *Batcher) Close()

Close closes the batcher, sending all pending requests, close updateCh.

func (*Batcher) DisableAutoCommit

func (b *Batcher) DisableAutoCommit()

DisableAutoCommit blocks the current goroutine until the worker can gracefully stop, and then disable auto commit.

func (*Batcher) EnableAutoCommit

func (b *Batcher) EnableAutoCommit(ctx context.Context, delay time.Duration)

EnableAutoCommit enables the batcher commit batch periodically even batcher size isn't big enough. we make this function for disable AutoCommit in some case.

func (*Batcher) Len

func (b *Batcher) Len() int

Len calculate the current size of this batcher.

func (*Batcher) Send

func (b *Batcher) Send(ctx context.Context)

Send sends all pending requests in the batcher. returns tables sent FULLY in the current batch.

func (*Batcher) SetThreshold

func (b *Batcher) SetThreshold(newThreshold int)

SetThreshold sets the threshold that how big the batch size reaching need to send batch. note this function isn't goroutine safe yet, just set threshold before anything starts(e.g. EnableAutoCommit), please.

type Client

type Client struct {
	// contains filtered or unexported fields
}

Client sends requests to restore files.

func NewRestoreClient

func NewRestoreClient(
	g glue.Glue,
	pdClient pd.Client,
	store kv.Storage,
	tlsConf *tls.Config,
	keepaliveConf keepalive.ClientParameters,
) (*Client, error)

NewRestoreClient returns a new RestoreClient.

func (*Client) Close

func (rc *Client) Close()

Close a client.

func (*Client) CreateDatabase

func (rc *Client) CreateDatabase(ctx context.Context, db *model.DBInfo) error

CreateDatabase creates a database.

func (*Client) CreateTables

func (rc *Client) CreateTables(
	dom *domain.Domain,
	tables []*metautil.Table,
	newTS uint64,
) (*RewriteRules, []*model.TableInfo, error)

CreateTables creates multiple tables, and returns their rewrite rules.

func (*Client) EnableOnline

func (rc *Client) EnableOnline()

EnableOnline sets the mode of restore to online.

func (*Client) EnableSkipCreateSQL

func (rc *Client) EnableSkipCreateSQL()

EnableSkipCreateSQL sets switch of skip create schema and tables.

func (*Client) ExecDDLs

func (rc *Client) ExecDDLs(ctx context.Context, ddlJobs []*model.Job) error

ExecDDLs executes the queries of the ddl jobs.

func (*Client) GetDDLJobs

func (rc *Client) GetDDLJobs() []*model.Job

GetDDLJobs returns ddl jobs.

func (*Client) GetDatabase

func (rc *Client) GetDatabase(name string) *utils.Database

GetDatabase returns a database by name.

func (*Client) GetDatabases

func (rc *Client) GetDatabases() []*utils.Database

GetDatabases returns all databases.

func (*Client) GetFilesInRawRange

func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) ([]*backuppb.File, error)

GetFilesInRawRange gets all files that are in the given range or intersects with the given range.

func (*Client) GetPDClient

func (rc *Client) GetPDClient() pd.Client

GetPDClient returns a pd client.

func (*Client) GetPlacementRules

func (rc *Client) GetPlacementRules(ctx context.Context, pdAddrs []string) ([]placement.Rule, error)

GetPlacementRules return the current placement rules.

func (*Client) GetTLSConfig

func (rc *Client) GetTLSConfig() *tls.Config

GetTLSConfig returns the tls config.

func (*Client) GetTS

func (rc *Client) GetTS(ctx context.Context) (uint64, error)

GetTS gets a new timestamp from PD.

func (*Client) GetTableSchema

func (rc *Client) GetTableSchema(
	dom *domain.Domain,
	dbName model.CIStr,
	tableName model.CIStr,
) (*model.TableInfo, error)

GetTableSchema returns the schema of a table from TiDB.

func (*Client) GoCreateTables

func (rc *Client) GoCreateTables(
	ctx context.Context,
	dom *domain.Domain,
	tables []*metautil.Table,
	newTS uint64,
	dbPool []*DB,
	errCh chan<- error,
) <-chan CreatedTable

GoCreateTables create tables, and generate their information. this function will use workers as the same number of sessionPool, leave sessionPool nil to send DDLs sequential.

func (*Client) GoValidateChecksum

func (rc *Client) GoValidateChecksum(
	ctx context.Context,
	tableStream <-chan CreatedTable,
	kvClient kv.Client,
	errCh chan<- error,
	updateCh glue.Progress,
	concurrency uint,
) <-chan struct{}

GoValidateChecksum forks a goroutine to validate checksum after restore. it returns a channel fires a struct{} when all things get done.

func (*Client) InitBackupMeta

func (rc *Client) InitBackupMeta(c context.Context, backupMeta *backuppb.BackupMeta, backend *backuppb.StorageBackend, externalStorage storage.ExternalStorage, reader *metautil.MetaReader) error

InitBackupMeta loads schemas from BackupMeta to initialize RestoreClient.

func (*Client) IsIncremental

func (rc *Client) IsIncremental() bool

IsIncremental returns whether this backup is incremental.

func (*Client) IsOnline

func (rc *Client) IsOnline() bool

IsOnline tells if it's a online restore.

func (*Client) IsRawKvMode

func (rc *Client) IsRawKvMode() bool

IsRawKvMode checks whether the backup data is in raw kv format, in which case transactional recover is forbidden.

func (*Client) IsSkipCreateSQL

func (rc *Client) IsSkipCreateSQL() bool

IsSkipCreateSQL returns whether we need skip create schema and tables in restore.

func (*Client) LoadRestoreStores

func (rc *Client) LoadRestoreStores(ctx context.Context) error

LoadRestoreStores loads the stores used to restore data.

func (*Client) PreCheckTableClusterIndex

func (rc *Client) PreCheckTableClusterIndex(
	tables []*metautil.Table,
	ddlJobs []*model.Job,
	dom *domain.Domain,
) error

PreCheckTableClusterIndex checks whether backup tables and existed tables have different cluster index options。

func (*Client) PreCheckTableTiFlashReplica

func (rc *Client) PreCheckTableTiFlashReplica(
	ctx context.Context,
	tables []*metautil.Table,
) error

PreCheckTableTiFlashReplica checks whether TiFlash replica is less than TiFlash node.

func (*Client) ResetPlacementRules

func (rc *Client) ResetPlacementRules(ctx context.Context, tables []*model.TableInfo) error

ResetPlacementRules removes placement rules for tables.

func (*Client) ResetRestoreLabels

func (rc *Client) ResetRestoreLabels(ctx context.Context) error

ResetRestoreLabels removes the exclusive labels of the restore stores.

func (*Client) ResetTS

func (rc *Client) ResetTS(ctx context.Context, pdAddrs []string) error

ResetTS resets the timestamp of PD to a bigger value.

func (*Client) RestoreFiles

func (rc *Client) RestoreFiles(
	ctx context.Context,
	files []*backuppb.File,
	rewriteRules *RewriteRules,
	updateCh glue.Progress,
) (err error)

RestoreFiles tries to restore the files.

func (*Client) RestoreRaw

func (rc *Client) RestoreRaw(
	ctx context.Context, startKey []byte, endKey []byte, files []*backuppb.File, updateCh glue.Progress,
) error

RestoreRaw tries to restore raw keys in the specified range.

func (*Client) RestoreSystemSchemas

func (rc *Client) RestoreSystemSchemas(ctx context.Context, f filter.Filter)

RestoreSystemSchemas restores the system schema(i.e. the `mysql` schema). Detail see https://github.com/pingcap/br/issues/679#issuecomment-762592254.

func (*Client) SetConcurrency

func (rc *Client) SetConcurrency(c uint)

SetConcurrency sets the concurrency of dbs tables files.

func (*Client) SetRateLimit

func (rc *Client) SetRateLimit(rateLimit uint64)

SetRateLimit to set rateLimit.

func (*Client) SetStorage

func (rc *Client) SetStorage(ctx context.Context, backend *backuppb.StorageBackend, opts *storage.ExternalStorageOptions) error

SetStorage set ExternalStorage for client.

func (*Client) SetSwitchModeInterval

func (rc *Client) SetSwitchModeInterval(interval time.Duration)

SetSwitchModeInterval set switch mode interval for client.

func (*Client) SetupPlacementRules

func (rc *Client) SetupPlacementRules(ctx context.Context, tables []*model.TableInfo) error

SetupPlacementRules sets rules for the tables' regions.

func (*Client) SwitchToImportMode

func (rc *Client) SwitchToImportMode(ctx context.Context)

SwitchToImportMode switch tikv cluster to import mode.

func (*Client) SwitchToNormalMode

func (rc *Client) SwitchToNormalMode(ctx context.Context) error

SwitchToNormalMode switch tikv cluster to normal mode.

func (*Client) WaitPlacementSchedule

func (rc *Client) WaitPlacementSchedule(ctx context.Context, tables []*model.TableInfo) error

WaitPlacementSchedule waits PD to move tables to restore stores.

type ContextManager

type ContextManager interface {
	// Enter make some tables 'enter' this context(a.k.a., prepare for restore).
	Enter(ctx context.Context, tables []CreatedTable) error
	// Leave make some tables 'leave' this context(a.k.a., restore is done, do some post-works).
	Leave(ctx context.Context, tables []CreatedTable) error
	// Close closes the context manager, sometimes when the manager is 'killed' and should do some cleanup
	// it would be call.
	Close(ctx context.Context)
}

ContextManager is the struct to manage a TiKV 'context' for restore. Batcher will call Enter when any table should be restore on batch, so you can do some prepare work here(e.g. set placement rules for online restore).

func NewBRContextManager

func NewBRContextManager(client *Client) ContextManager

NewBRContextManager makes a BR context manager, that is, set placement rules for online restore when enter(see <splitPrepareWork>), unset them when leave.

type CreatedTable

type CreatedTable struct {
	RewriteRule *RewriteRules
	Table       *model.TableInfo
	OldTable    *metautil.Table
}

CreatedTable is a table created on restore process, but not yet filled with data.

type DB

type DB struct {
	// contains filtered or unexported fields
}

DB is a TiDB instance, not thread-safe.

func MakeDBPool

func MakeDBPool(size uint, dbFactory func() (*DB, error)) ([]*DB, error)

MakeDBPool makes a session pool with specficated size by sessionFactory.

func NewDB

func NewDB(g glue.Glue, store kv.Storage) (*DB, error)

NewDB returns a new DB.

func (*DB) Close

func (db *DB) Close()

Close closes the connection.

func (*DB) CreateDatabase

func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error

CreateDatabase executes a CREATE DATABASE SQL.

func (*DB) CreateTable

func (db *DB) CreateTable(ctx context.Context, table *metautil.Table) error

CreateTable executes a CREATE TABLE SQL.

func (*DB) ExecDDL

func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error

ExecDDL executes the query of a ddl job.

type DDLJobFilterRule

type DDLJobFilterRule func(ddlJob *model.Job) bool

DDLJobFilterRule judges whether a ddl job should be ignored

type DrainResult

type DrainResult struct {
	// TablesToSend are tables that would be send at this batch.
	TablesToSend []CreatedTable
	// BlankTablesAfterSend are tables that will be full-restored after this batch send.
	BlankTablesAfterSend []CreatedTable
	RewriteRules         *RewriteRules
	Ranges               []rtree.Range
}

DrainResult is the collection of some ranges and theirs metadata.

func (DrainResult) Files

func (result DrainResult) Files() []*backuppb.File

Files returns all files of this drain result.

type FileImporter

type FileImporter struct {
	// contains filtered or unexported fields
}

FileImporter used to import a file to TiKV.

func NewFileImporter

func NewFileImporter(
	metaClient SplitClient,
	importClient ImporterClient,
	backend *backuppb.StorageBackend,
	isRawKvMode bool,
	rateLimit uint64,
) FileImporter

NewFileImporter returns a new file importClient.

func (*FileImporter) CheckMultiIngestSupport

func (importer *FileImporter) CheckMultiIngestSupport(ctx context.Context, pdClient pd.Client) error

CheckMultiIngestSupport checks whether all stores support multi-ingest

func (*FileImporter) Import

func (importer *FileImporter) Import(
	ctx context.Context,
	files []*backuppb.File,
	rewriteRules *RewriteRules,
) error

Import tries to import a file. All rules must contain encoded keys.

func (*FileImporter) SetRawRange

func (importer *FileImporter) SetRawRange(startKey, endKey []byte) error

SetRawRange sets the range to be restored in raw kv mode.

type ImporterClient

type ImporterClient interface {
	DownloadSST(
		ctx context.Context,
		storeID uint64,
		req *import_sstpb.DownloadRequest,
	) (*import_sstpb.DownloadResponse, error)

	IngestSST(
		ctx context.Context,
		storeID uint64,
		req *import_sstpb.IngestRequest,
	) (*import_sstpb.IngestResponse, error)
	MultiIngest(
		ctx context.Context,
		storeID uint64,
		req *import_sstpb.MultiIngestRequest,
	) (*import_sstpb.IngestResponse, error)

	SetDownloadSpeedLimit(
		ctx context.Context,
		storeID uint64,
		req *import_sstpb.SetDownloadSpeedLimitRequest,
	) (*import_sstpb.SetDownloadSpeedLimitResponse, error)

	GetImportClient(
		ctx context.Context,
		storeID uint64,
	) (import_sstpb.ImportSSTClient, error)

	SupportMultiIngest(ctx context.Context, stores []uint64) (bool, error)
}

ImporterClient is used to import a file to TiKV.

func NewImportClient

func NewImportClient(metaClient SplitClient, tlsConf *tls.Config, keepaliveConf keepalive.ClientParameters) ImporterClient

NewImportClient returns a new ImporterClient.

type Ingester

type Ingester struct {
	// commit ts appends to key in tikv
	TS uint64

	WorkerPool *utils.WorkerPool
	// contains filtered or unexported fields
}

Ingester writes and ingests kv to TiKV. which used for both BR log restore and Lightning local backend.

func NewIngester

func NewIngester(
	splitCli SplitClient, cfg concurrencyCfg, commitTS uint64, tlsConf *tls.Config,
) *Ingester

NewIngester creates Ingester.

type LogClient

type LogClient struct {
	// contains filtered or unexported fields
}

LogClient sends requests to restore files.

func NewLogRestoreClient

func NewLogRestoreClient(
	ctx context.Context,
	restoreClient *Client,
	startTS uint64,
	endTS uint64,
	tableFilter filter.Filter,
	concurrency uint,
	batchFlushPairs int,
	batchFlushSize int64,
	batchWriteKVPairs int,
) (*LogClient, error)

NewLogRestoreClient returns a new LogRestoreClient.

func (*LogClient) NeedRestoreDDL

func (l *LogClient) NeedRestoreDDL(fileName string) (bool, error)

NeedRestoreDDL determines whether to collect ddl file by ts range.

func (*LogClient) NeedRestoreRowChange

func (l *LogClient) NeedRestoreRowChange(fileName string) (bool, error)

NeedRestoreRowChange determine whether to collect this file by ts range.

func (*LogClient) ResetTSRange

func (l *LogClient) ResetTSRange(startTS uint64, endTS uint64)

ResetTSRange used for test.

func (*LogClient) RestoreLogData

func (l *LogClient) RestoreLogData(ctx context.Context, dom *domain.Domain) error

RestoreLogData restore specify log data from storage.

type LogMeta

type LogMeta struct {
	Names            map[int64]string `json:"names"`
	GlobalResolvedTS uint64           `json:"global_resolved_ts"`
}

LogMeta represents the log.meta generated by cdc log backup.

type MergeRangesStat

type MergeRangesStat struct {
	TotalFiles           int
	TotalWriteCFFile     int
	TotalDefaultCFFile   int
	TotalRegions         int
	RegionKeysAvg        int
	RegionBytesAvg       int
	MergedRegions        int
	MergedRegionKeysAvg  int
	MergedRegionBytesAvg int
}

MergeRangesStat holds statistics for the MergeRanges.

func MergeFileRanges

func MergeFileRanges(
	files []*backuppb.File, splitSizeBytes, splitKeyCount uint64,
) ([]rtree.Range, *MergeRangesStat, error)

MergeFileRanges returns ranges of the files are merged based on splitSizeBytes and splitKeyCount.

By merging small ranges, it speeds up restoring a backup that contains many small ranges (regions) as it reduces split region and scatter region.

type OnSplitFunc

type OnSplitFunc func(key [][]byte)

OnSplitFunc is called before split a range.

type Range

type Range struct {
	Start []byte
	End   []byte
}

Range record start and end key for localStoreDir.DB so we can write it to tikv in streaming

type RegionInfo

type RegionInfo struct {
	Region *metapb.Region
	Leader *metapb.Peer
}

RegionInfo includes a region and the leader of the region.

func NeedSplit

func NeedSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo

NeedSplit checks whether a key is necessary to split, if true returns the split region.

func PaginateScanRegion

func PaginateScanRegion(
	ctx context.Context, client SplitClient, startKey, endKey []byte, limit int,
) ([]*RegionInfo, error)

PaginateScanRegion scan regions with a limit pagination and return all regions at once. It reduces max gRPC message size.

func (*RegionInfo) ContainsInterior

func (region *RegionInfo) ContainsInterior(key []byte) bool

ContainsInterior returns whether the region contains the given key, and also that the key does not fall on the boundary (start key) of the region.

type RegionSplitter

type RegionSplitter struct {
	// contains filtered or unexported fields
}

RegionSplitter is a executor of region split by rules.

func NewRegionSplitter

func NewRegionSplitter(client SplitClient) *RegionSplitter

NewRegionSplitter returns a new RegionSplitter.

func (*RegionSplitter) Split

func (rs *RegionSplitter) Split(
	ctx context.Context,
	ranges []rtree.Range,
	rewriteRules *RewriteRules,
	onSplit OnSplitFunc,
) error

Split executes a region split. It will split regions by the rewrite rules, then it will split regions by the end key of each range. tableRules includes the prefix of a table, since some ranges may have a prefix with record sequence or index sequence. note: all ranges and rewrite rules must have raw key.

type RewriteRules

type RewriteRules struct {
	Table []*import_sstpb.RewriteRule
	Data  []*import_sstpb.RewriteRule
}

RewriteRules contains rules for rewriting keys of tables.

func EmptyRewriteRule

func EmptyRewriteRule() *RewriteRules

EmptyRewriteRule make a new, empty rewrite rule.

func GetRewriteRules

func GetRewriteRules(
	newTable, oldTable *model.TableInfo, newTimeStamp uint64,
) *RewriteRules

GetRewriteRules returns the rewrite rule of the new table and the old table.

func (*RewriteRules) Append

func (r *RewriteRules) Append(other RewriteRules)

Append append its argument to this rewrite rules.

type SendType

type SendType int

SendType is the 'type' of a send. when we make a 'send' command to worker, we may want to flush all pending ranges (when auto commit enabled), or, we just want to clean overflowing ranges(when just adding a table to batcher).

const (
	// SendUntilLessThanBatch will make the batcher send batch until
	// its remaining range is less than its batchSizeThreshold.
	SendUntilLessThanBatch SendType = iota
	// SendAll will make the batcher send all pending ranges.
	SendAll
	// SendAllThenClose will make the batcher send all pending ranges and then close itself.
	SendAllThenClose
)

type SplitClient

type SplitClient interface {
	// GetStore gets a store by a store id.
	GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error)
	// GetRegion gets a region which includes a specified key.
	GetRegion(ctx context.Context, key []byte) (*RegionInfo, error)
	// GetRegionByID gets a region by a region id.
	GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error)
	// SplitRegion splits a region from a key, if key is not included in the region, it will return nil.
	// note: the key should not be encoded
	SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error)
	// BatchSplitRegions splits a region from a batch of keys.
	// note: the keys should not be encoded
	BatchSplitRegions(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) ([]*RegionInfo, error)
	// BatchSplitRegionsWithOrigin splits a region from a batch of keys and return the original region and split new regions
	BatchSplitRegionsWithOrigin(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) (*RegionInfo, []*RegionInfo, error)
	// ScatterRegion scatters a specified region.
	ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error
	// GetOperator gets the status of operator of the specified region.
	GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error)
	// ScanRegion gets a list of regions, starts from the region that contains key.
	// Limit limits the maximum number of regions returned.
	ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error)
	// GetPlacementRule loads a placement rule from PD.
	GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error)
	// SetPlacementRule insert or update a placement rule to PD.
	SetPlacementRule(ctx context.Context, rule placement.Rule) error
	// DeletePlacementRule removes a placement rule from PD.
	DeletePlacementRule(ctx context.Context, groupID, ruleID string) error
	// SetStoreLabel add or update specified label of stores. If labelValue
	// is empty, it clears the label.
	SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error
}

SplitClient is an external client used by RegionSplitter.

func NewSplitClient

func NewSplitClient(client pd.Client, tlsConf *tls.Config) SplitClient

NewSplitClient returns a client used by RegionSplitter.

type TableSink

type TableSink interface {
	EmitTables(tables ...CreatedTable)
	EmitError(error)
	Close()
}

TableSink is the 'sink' of restored data by a sender.

type TableWithRange

type TableWithRange struct {
	CreatedTable

	Range []rtree.Range
}

TableWithRange is a CreatedTable that has been bind to some of key ranges.

type UniqueTableName

type UniqueTableName struct {
	DB    string
	Table string
}

UniqueTableName identifies a unique table

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL