Documentation ¶
Index ¶
- Constants
- Variables
- func AddDelRangeJobInternal(ctx context.Context, wrapper DelRangeExecWrapper, job *model.Job) error
- func AddHistoryDDLJob(sess *sess.Session, t *meta.Meta, job *model.Job, updateRawArgs bool) error
- func AddIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo)
- func AllocateColumnID(tblInfo *model.TableInfo) int64
- func AllocateIndexID(tblInfo *model.TableInfo) int64
- func AppendPartitionDefs(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
- func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
- func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, spec *ast.AlterTableSpec) (*model.PartitionInfo, error)
- func BuildElements(changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) []*meta.Element
- func BuildHiddenColumnInfo(ctx sessionctx.Context, indexPartSpecifications []*ast.IndexPartSpecification, ...) ([]*model.ColumnInfo, error)
- func BuildIndexInfo(ctx sessionctx.Context, allTableColumns []*model.ColumnInfo, ...) (*model.IndexInfo, error)
- func BuildSessionTemporaryTableInfo(ctx sessionctx.Context, is infoschema.InfoSchema, s *ast.CreateTableStmt, ...) (*model.TableInfo, error)
- func BuildTableInfo(ctx sessionctx.Context, tableName model.CIStr, cols []*table.Column, ...) (tbInfo *model.TableInfo, err error)
- func BuildTableInfoFromAST(s *ast.CreateTableStmt) (*model.TableInfo, error)
- func BuildTableInfoWithLike(ctx sessionctx.Context, ident ast.Ident, referTblInfo *model.TableInfo, ...) (*model.TableInfo, error)
- func BuildTableInfoWithStmt(ctx sessionctx.Context, s *ast.CreateTableStmt, dbCharset, dbCollate string, ...) (*model.TableInfo, error)
- func BuildViewInfo(_ sessionctx.Context, s *ast.CreateViewStmt) (*model.ViewInfo, error)
- func CancelJobs(se sessionctx.Context, ids []int64) (errs []error, err error)
- func CancelJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
- func CheckAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error
- func CheckDropTablePartition(meta *model.TableInfo, partLowerNames []string) error
- func CheckIsDropPrimaryKey(indexName model.CIStr, indexInfo *model.IndexInfo, t table.Table) (bool, error)
- func CheckPKOnGeneratedColumn(tblInfo *model.TableInfo, ...) (*model.ColumnInfo, error)
- func CheckPlacementPolicyNotInUseFromInfoSchema(is infoschema.InfoSchema, policy *model.PolicyInfo) error
- func CheckPlacementPolicyNotInUseFromMeta(t *meta.Meta, policy *model.PolicyInfo) error
- func CheckTableInfoValidWithStmt(ctx sessionctx.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) (err error)
- func CleanupDDLReorgHandles(job *model.Job, s *sess.Session)
- func ConvertBetweenCharAndVarchar(oldCol, newCol byte) bool
- func CreateNewColumn(ctx sessionctx.Context, schema *model.DBInfo, spec *ast.AlterTableSpec, ...) (*table.Column, error)
- func DisableTiFlashPoll(d any)
- func DropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo)
- func EnableTiFlashPoll(d any)
- func ExtractTblInfos(is infoschema.InfoSchema, oldIdent, newIdent ast.Ident, isAlterTable bool, ...) ([]*model.DBInfo, int64, error)
- func FindColumnNamesInExpr(expr ast.ExprNode) []*ast.ColumnName
- func FindRelatedIndexesToChange(tblInfo *model.TableInfo, colName model.CIStr) []changingIndex
- func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTableType, tbInfo *model.TableInfo, ...) error
- func GetAllDDLJobs(se sessionctx.Context) ([]*model.Job, error)
- func GetAllHistoryDDLJobs(m *meta.Meta) ([]*model.Job, error)
- func GetCharsetAndCollateInTableOption(sessVars *variable.SessionVars, startIdx int, options []*ast.TableOption) (chs, coll string, err error)
- func GetColumnForeignKeyInfo(colName string, fkInfos []*model.FKInfo) *model.FKInfo
- func GetCustomizedHook(s string) (func(do DomainReloader) Callback, error)
- func GetDefaultCollation(sessVars *variable.SessionVars, cs string) (string, error)
- func GetDropOrTruncateTableInfoFromJobsByStore(jobs []*model.Job, gcSafePoint uint64, ...) (bool, error)
- func GetFlashbackKeyRanges(sess sessionctx.Context, flashbackTS uint64) ([]kv.KeyRange, error)
- func GetHistoryJobByID(sess sessionctx.Context, id int64) (*model.Job, error)
- func GetLastHistoryDDLJobsIterator(m *meta.Meta) (meta.LastJobIterator, error)
- func GetLastNHistoryDDLJobs(t *meta.Meta, maxNumJobs int) ([]*model.Job, error)
- func GetModifiableColumnJob(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, ...) (*model.Job, error)
- func GetName4AnonymousIndex(t table.Table, colName model.CIStr, idxName model.CIStr) model.CIStr
- func GetOriginDefaultValueForModifyColumn(sessCtx sessionctx.Context, changingCol, oldCol *model.ColumnInfo) (any, error)
- func GetRangeEndKey(ctx *JobContext, store kv.Storage, priority int, keyPrefix kv.Key, ...) (kv.Key, error)
- func GetRangePlacementPolicyName(ctx context.Context, rangeBundleID string) (string, error)
- func GetTableDataKeyRanges(nonFlashbackTableIDs []int64) []kv.KeyRange
- func GetTableInfoAndCancelFaultJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error)
- func GetWaitTimeWhenErrorOccurred() time.Duration
- func InitAndAddColumnToTable(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) *model.ColumnInfo
- func IsAutoRandomColumnID(tblInfo *model.TableInfo, colID int64) bool
- func IsColumnDroppableWithCheckConstraint(col model.CIStr, tblInfo *model.TableInfo) error
- func IsColumnRenameableWithCheckConstraint(col model.CIStr, tblInfo *model.TableInfo) error
- func IsElemsChangedToModifyColumn(oldElems, newElems []string) bool
- func IterAllDDLJobs(ctx sessionctx.Context, txn kv.Transaction, ...) error
- func IterHistoryDDLJobs(txn kv.Transaction, finishFn func([]*model.Job) (bool, error)) error
- func JobNeedGC(job *model.Job) bool
- func LoadTiFlashReplicaInfo(tblInfo *model.TableInfo, tableList *[]TiFlashReplicaStatus)
- func LocateOffsetToMove(currentOffset int, pos *ast.ColumnPosition, tblInfo *model.TableInfo) (destOffset int, err error)
- func MockTableInfo(ctx sessionctx.Context, stmt *ast.CreateTableStmt, tableID int64) (*model.TableInfo, error)
- func NeedToOverwriteColCharset(options []*ast.TableOption) bool
- func NewAddIndexIngestPipeline(ctx *OperatorCtx, store kv.Storage, sessPool opSessPool, ...) (*operator.AsyncPipeline, error)
- func NewBackfillingSchedulerExt(d DDL) (scheduler.Extension, error)
- func NewDDLReorgMeta(ctx sessionctx.Context) *model.DDLReorgMeta
- func NewMockSchemaSyncer() syncer.SchemaSyncer
- func NewMockStateSyncer() syncer.StateSyncer
- func NewReorgHandlerForTest(se sessionctx.Context) *reorgHandler
- func NewWriteIndexToExternalStoragePipeline(ctx *OperatorCtx, store kv.Storage, extStoreURI string, sessPool opSessPool, ...) (*operator.AsyncPipeline, error)
- func OverwriteCollationWithBinaryFlag(sessVars *variable.SessionVars, colDef *ast.ColumnDef, chs, coll string) (newChs string, newColl string)
- func PauseAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
- func PauseJobs(se sessionctx.Context, ids []int64) ([]error, error)
- func PauseJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
- func PollAvailableTableProgress(schemas infoschema.InfoSchema, _ sessionctx.Context, ...)
- func ProcessColumnCharsetAndCollation(sctx sessionctx.Context, col *table.Column, newCol *table.Column, ...) error
- func ProcessModifyColumnOptions(ctx sessionctx.Context, col *table.Column, options []*ast.ColumnOption) error
- func RemoveDependentHiddenColumns(tblInfo *model.TableInfo, idxInfo *model.IndexInfo)
- func ResolveAlterAlgorithm(alterSpec *ast.AlterTableSpec, specify ast.AlgorithmType) (ast.AlgorithmType, error)
- func ResolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) ([]*ast.AlterTableSpec, error)
- func ResolveCharsetCollation(sessVars *variable.SessionVars, charsetOpts ...ast.CharsetOpt) (chs string, coll string, err error)
- func ResumeAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
- func ResumeJobs(se sessionctx.Context, ids []int64) ([]error, error)
- func ResumeJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
- func ScanHistoryDDLJobs(m *meta.Meta, startJobID int64, limit int) ([]*model.Job, error)
- func SendFlashbackToVersionRPC(ctx context.Context, s tikv.Storage, version uint64, startTS, commitTS uint64, ...) (rangetask.TaskStat, error)
- func SendPrepareFlashbackToVersionRPC(ctx context.Context, s tikv.Storage, flashbackTS, startTS uint64, ...) (rangetask.TaskStat, error)
- func SetBackfillTaskChanSizeForTest(n int)
- func SetBatchInsertDeleteRangeSize(i int)
- func SetDefaultValue(ctx sessionctx.Context, col *table.Column, option *ast.ColumnOption) (hasDefaultValue bool, err error)
- func SetDirectPlacementOpt(placementSettings *model.PlacementSettings, ...) error
- func SetDirectResourceGroupBackgroundOption(resourceGroupSettings *model.ResourceGroupSettings, ...) error
- func SetDirectResourceGroupRunawayOption(resourceGroupSettings *model.ResourceGroupSettings, typ ast.RunawayOptionType, ...) error
- func SetDirectResourceGroupSettings(groupInfo *model.ResourceGroupInfo, opt *ast.ResourceGroupOption) error
- func SetIdxColNameOffset(idxCol *model.IndexColumn, changingCol *model.ColumnInfo)
- func SetWaitTimeWhenErrorOccurred(dur time.Duration)
- func ShouldBuildClusteredIndex(ctx sessionctx.Context, opt *ast.IndexOption, isSingleIntPK bool) bool
- func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, ...) uint64
- func UpdateColsNull2NotNull(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error
- func ValidateFlashbackTS(ctx context.Context, sctx sessionctx.Context, flashBackTS uint64) error
- func ValidateRenameIndex(from, to model.CIStr, tbl *model.TableInfo) (ignore bool, err error)
- func WaitScatterRegionFinish(ctx context.Context, store kv.SplittableStore, regionIDs ...uint64)
- type AllocTableIDIf
- type AlterAlgorithm
- type AvailableTableID
- type BackfillCleanUpS3
- type BackfillSubTaskMeta
- type BackfillTaskMeta
- type BackfillingSchedulerExt
- func (*BackfillingSchedulerExt) GetEligibleInstances(_ context.Context, _ *proto.Task) ([]string, error)
- func (sch *BackfillingSchedulerExt) GetNextStep(task *proto.TaskBase) proto.Step
- func (*BackfillingSchedulerExt) IsRetryableErr(error) bool
- func (*BackfillingSchedulerExt) OnDone(_ context.Context, _ diststorage.TaskHandle, _ *proto.Task) error
- func (sch *BackfillingSchedulerExt) OnNextSubtasksBatch(ctx context.Context, taskHandle diststorage.TaskHandle, task *proto.Task, ...) (taskMeta [][]byte, err error)
- func (*BackfillingSchedulerExt) OnTick(_ context.Context, _ *proto.Task)
- type BaseCallback
- func (*BaseCallback) OnChanged(err error) error
- func (*BaseCallback) OnGetJobAfter(_ string, _ *model.Job)
- func (*BaseCallback) OnGetJobBefore(_ string)
- func (*BaseCallback) OnJobRunAfter(_ *model.Job)
- func (*BaseCallback) OnJobRunBefore(_ *model.Job)
- func (*BaseCallback) OnJobUpdated(_ *model.Job)
- func (*BaseCallback) OnSchemaStateChanged(_ int64)
- func (*BaseCallback) OnUpdateReorgInfo(_ *model.Job, _ int64)
- func (*BaseCallback) OnWatched(_ context.Context)
- type BaseInterceptor
- type Callback
- type CreateTableWithInfoConfig
- type CreateTableWithInfoConfigurier
- type DDL
- type DefaultCallback
- type DelRangeExecWrapper
- type DomainReloader
- type IndexIngestOperator
- type IndexRecordChunk
- type IndexWriteResult
- type Info
- type Interceptor
- type JobContext
- type LitBackfillScheduler
- type LockTablesArg
- type MockSchemaSyncer
- func (*MockSchemaSyncer) Close()
- func (s *MockSchemaSyncer) CloseSession()
- func (s *MockSchemaSyncer) Done() <-chan struct{}
- func (s *MockSchemaSyncer) GlobalVersionCh() clientv3.WatchChan
- func (s *MockSchemaSyncer) Init(_ context.Context) error
- func (s *MockSchemaSyncer) OwnerCheckAllVersions(ctx context.Context, jobID int64, latestVer int64) error
- func (s *MockSchemaSyncer) OwnerUpdateGlobalVersion(_ context.Context, _ int64) error
- func (s *MockSchemaSyncer) Restart(_ context.Context) error
- func (s *MockSchemaSyncer) UpdateSelfVersion(_ context.Context, jobID int64, version int64) error
- func (*MockSchemaSyncer) WatchGlobalSchemaVer(context.Context)
- type MockStateSyncer
- func (*MockStateSyncer) GetGlobalState(context.Context) (*syncer.StateInfo, error)
- func (s *MockStateSyncer) Init(context.Context) error
- func (*MockStateSyncer) IsUpgradingState() bool
- func (*MockStateSyncer) Rewatch(context.Context)
- func (s *MockStateSyncer) UpdateGlobalState(_ context.Context, stateInfo *syncer.StateInfo) error
- func (s *MockStateSyncer) WatchChan() clientv3.WatchChan
- type OnExist
- type OperatorCtx
- type Option
- type Options
- type PollTiFlashBackoffContext
- func (b *PollTiFlashBackoffContext) Get(id int64) (*PollTiFlashBackoffElement, bool)
- func (b *PollTiFlashBackoffContext) Len() int
- func (b *PollTiFlashBackoffContext) Put(id int64) bool
- func (b *PollTiFlashBackoffContext) Remove(id int64) bool
- func (b *PollTiFlashBackoffContext) Tick(id int64) (grew bool, exist bool, cnt int)
- type PollTiFlashBackoffElement
- type RecoverInfo
- type RecoverSchemaInfo
- type ReorgCallback
- type TableScanOperator
- type TableScanTask
- type TableScanTaskSource
- type TiFlashManagementContext
- type TiFlashReplicaStatus
- type TiFlashTick
- type WriteExternalStoreOperator
Constants ¶
const ( // JobTable stores the information of DDL jobs. JobTable = "tidb_ddl_job" // ReorgTable stores the information of DDL reorganization. ReorgTable = "tidb_ddl_reorg" // HistoryTable stores the history DDL jobs. HistoryTable = "tidb_ddl_history" // JobTableID is the table ID of `tidb_ddl_job`. JobTableID = meta.MaxInt48 - 1 // ReorgTableID is the table ID of `tidb_ddl_reorg`. ReorgTableID = meta.MaxInt48 - 2 // HistoryTableID is the table ID of `tidb_ddl_history`. HistoryTableID = meta.MaxInt48 - 3 // MDLTableID is the table ID of `tidb_mdl_info`. MDLTableID = meta.MaxInt48 - 4 // BackgroundSubtaskTableID is the table ID of `tidb_background_subtask`. BackgroundSubtaskTableID = meta.MaxInt48 - 5 // BackgroundSubtaskHistoryTableID is the table ID of `tidb_background_subtask_history`. BackgroundSubtaskHistoryTableID = meta.MaxInt48 - 6 // JobTableSQL is the CREATE TABLE SQL of `tidb_ddl_job`. JobTableSQL = "create table " + JobTable + `( job_id bigint not null, reorg int, schema_ids text(65535), table_ids text(65535), job_meta longblob, type int, processing int, primary key(job_id))` // ReorgTableSQL is the CREATE TABLE SQL of `tidb_ddl_reorg`. ReorgTableSQL = "create table " + ReorgTable + `( job_id bigint not null, ele_id bigint, ele_type blob, start_key blob, end_key blob, physical_id bigint, reorg_meta longblob, unique key(job_id, ele_id, ele_type(20)))` // HistoryTableSQL is the CREATE TABLE SQL of `tidb_ddl_history`. HistoryTableSQL = "create table " + HistoryTable + `( job_id bigint not null, job_meta longblob, db_name char(64), table_name char(64), schema_ids text(65535), table_ids text(65535), create_time datetime, primary key(job_id))` // BackgroundSubtaskTableSQL is the CREATE TABLE SQL of `tidb_background_subtask`. BackgroundSubtaskTableSQL = `` /* 602-byte string literal not displayed */ // BackgroundSubtaskHistoryTableSQL is the CREATE TABLE SQL of `tidb_background_subtask_history`. BackgroundSubtaskHistoryTableSQL = `` /* 569-byte string literal not displayed */ )
const ( BRInsertDeleteRangeSQLPrefix = insertDeleteRangeSQLPrefix BRInsertDeleteRangeSQLValue = insertDeleteRangeSQLValue )
Only used in the BR unit test. Once these const variables modified, please make sure compatible with BR.
const (
// DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing.
DDLOwnerKey = "/tidb/ddl/fg/owner"
)
const (
DefNumHistoryJobs = 10
)
DefNumHistoryJobs is default value of the default number of history job
const (
// MaxCommentLength is exported for testing.
MaxCommentLength = 1024
)
Variables ¶
var ( // TestCheckWorkerNumCh use for test adjust backfill worker. TestCheckWorkerNumCh = make(chan *sync.WaitGroup) // TestCheckWorkerNumber use for test adjust backfill worker. TestCheckWorkerNumber = int32(variable.DefTiDBDDLReorgWorkerCount) // TestCheckReorgTimeout is used to mock timeout when reorg data. TestCheckReorgTimeout = int32(0) )
var ( // PollTiFlashInterval is the interval between every pollTiFlashReplicaStatus call. PollTiFlashInterval = 2 * time.Second // PullTiFlashPdTick indicates the number of intervals before we fully sync all TiFlash pd rules and tables. PullTiFlashPdTick = atomicutil.NewUint64(30 * 5) // UpdateTiFlashStoreTick indicates the number of intervals before we fully update TiFlash stores. UpdateTiFlashStoreTick = atomicutil.NewUint64(5) // PollTiFlashBackoffMaxTick is the max tick before we try to update TiFlash replica availability for one table. PollTiFlashBackoffMaxTick TiFlashTick = 10 // PollTiFlashBackoffMinTick is the min tick before we try to update TiFlash replica availability for one table. PollTiFlashBackoffMinTick TiFlashTick = 1 // PollTiFlashBackoffCapacity is the cache size of backoff struct. PollTiFlashBackoffCapacity = 1000 // PollTiFlashBackoffRate is growth rate of exponential backoff threshold. PollTiFlashBackoffRate TiFlashTick = 1.5 // RefreshProgressMaxTableCount is the max count of table to refresh progress after available each poll. RefreshProgressMaxTableCount uint64 = 1000 )
var ( // WaitTimeWhenErrorOccurred is waiting interval when processing DDL jobs encounter errors. WaitTimeWhenErrorOccurred = int64(1 * time.Second) // TestNotifyBeginTxnCh is used for if the txn is beginning in runInTxn. TestNotifyBeginTxnCh = make(chan struct{}) )
var ( CheckBackfillJobFinishInterval = 300 * time.Millisecond // UpdateBackfillJobRowCountInterval is the interval of updating the job row count. UpdateBackfillJobRowCountInterval = 3 * time.Second )
CheckBackfillJobFinishInterval is export for test.
var DDLBackfillers = map[model.ActionType]string{ model.ActionAddIndex: "add_index", model.ActionModifyColumn: "modify_column", model.ActionDropIndex: "drop_index", model.ActionReorganizePartition: "reorganize_partition", }
DDLBackfillers contains the DDL need backfill step.
var ( // EnableSplitTableRegion is a flag to decide whether to split a new region for // a newly created table. It takes effect only if the Storage supports split // region. EnableSplitTableRegion = uint32(0) )
var EstimateTableRowSizeForTest = estimateTableRowSize
EstimateTableRowSizeForTest is used for test.
var LastReorgMetaFastReorgDisabled bool
LastReorgMetaFastReorgDisabled is used for test.
var MockDMLExecution func()
MockDMLExecution is only used for test.
var MockDMLExecutionAddIndexSubTaskFinish func()
MockDMLExecutionAddIndexSubTaskFinish is used to mock DML execution during distributed add index.
var MockDMLExecutionMerging func()
MockDMLExecutionMerging is only used for test.
var MockDMLExecutionOnDDLPaused func()
MockDMLExecutionOnDDLPaused is used to mock DML execution when ddl job paused.
var MockDMLExecutionOnTaskFinished func()
MockDMLExecutionOnTaskFinished is used to mock DML execution when tasks finished.
var MockDMLExecutionStateBeforeImport func()
MockDMLExecutionStateBeforeImport is only used for test.
var MockDMLExecutionStateBeforeMerge func()
MockDMLExecutionStateBeforeMerge is only used for test.
var MockDMLExecutionStateMerging func()
MockDMLExecutionStateMerging is only used for test.
var OperatorCallBackForTest func()
OperatorCallBackForTest is used for test to mock scan record error.
var ReorgWaitTimeout = 5 * time.Second
ReorgWaitTimeout is the timeout that wait ddl in write reorganization stage.
var ResultCounterForTest *atomic.Int32
ResultCounterForTest is used for test.
var ( // RunInGoTest is used to identify whether ddl in running in the test. RunInGoTest bool )
var ( // SuppressErrorTooLongKeyKey is used by SchemaTracker to suppress err too long key error SuppressErrorTooLongKeyKey stringutil.StringerStr = "suppressErrorTooLongKeyKey" )
var TestReorgGoroutineRunning = make(chan any)
TestReorgGoroutineRunning is only used in test to indicate the reorg goroutine has been started.
var TestSyncChan = make(chan struct{})
TestSyncChan is used to sync the test.
Functions ¶
func AddDelRangeJobInternal ¶
AddDelRangeJobInternal implements the generation the delete ranges for the provided job and consumes the delete ranges through delRangeExecWrapper.
func AddHistoryDDLJob ¶
AddHistoryDDLJob record the history job.
func AddIndexColumnFlag ¶
AddIndexColumnFlag aligns the column flags of columns in TableInfo to IndexInfo.
func AllocateColumnID ¶
AllocateColumnID allocates next column ID from TableInfo.
func AllocateIndexID ¶
AllocateIndexID allocates an index ID from TableInfo.
func AppendPartitionDefs ¶
func AppendPartitionDefs(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
AppendPartitionDefs generates a list of partition definitions needed for SHOW CREATE TABLE (in executor/show.go) as well as needed for generating the ADD PARTITION query for INTERVAL partitioning of ALTER TABLE t LAST PARTITION and generating the CREATE TABLE query from CREATE TABLE ... INTERVAL
func AppendPartitionInfo ¶
func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)
AppendPartitionInfo is used in SHOW CREATE TABLE as well as generation the SQL syntax for the PartitionInfo during validation of various DDL commands
func BuildAddedPartitionInfo ¶
func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, spec *ast.AlterTableSpec) (*model.PartitionInfo, error)
BuildAddedPartitionInfo build alter table add partition info
func BuildElements ¶
BuildElements is exported for testing.
func BuildHiddenColumnInfo ¶
func BuildHiddenColumnInfo(ctx sessionctx.Context, indexPartSpecifications []*ast.IndexPartSpecification, indexName model.CIStr, tblInfo *model.TableInfo, existCols []*table.Column) ([]*model.ColumnInfo, error)
BuildHiddenColumnInfo builds hidden column info.
func BuildIndexInfo ¶
func BuildIndexInfo( ctx sessionctx.Context, allTableColumns []*model.ColumnInfo, indexName model.CIStr, isPrimary bool, isUnique bool, isGlobal bool, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption, state model.SchemaState, ) (*model.IndexInfo, error)
BuildIndexInfo builds a new IndexInfo according to the index information.
func BuildSessionTemporaryTableInfo ¶
func BuildSessionTemporaryTableInfo(ctx sessionctx.Context, is infoschema.InfoSchema, s *ast.CreateTableStmt, dbCharset, dbCollate string, placementPolicyRef *model.PolicyRefInfo) (*model.TableInfo, error)
BuildSessionTemporaryTableInfo builds model.TableInfo from a SQL statement.
func BuildTableInfo ¶
func BuildTableInfo( ctx sessionctx.Context, tableName model.CIStr, cols []*table.Column, constraints []*ast.Constraint, charset string, collate string, ) (tbInfo *model.TableInfo, err error)
BuildTableInfo creates a TableInfo.
func BuildTableInfoFromAST ¶
func BuildTableInfoFromAST(s *ast.CreateTableStmt) (*model.TableInfo, error)
BuildTableInfoFromAST builds model.TableInfo from a SQL statement. Note: TableID and PartitionID are left as uninitialized value.
func BuildTableInfoWithLike ¶
func BuildTableInfoWithLike(ctx sessionctx.Context, ident ast.Ident, referTblInfo *model.TableInfo, s *ast.CreateTableStmt) (*model.TableInfo, error)
BuildTableInfoWithLike builds a new table info according to CREATE TABLE ... LIKE statement.
func BuildTableInfoWithStmt ¶
func BuildTableInfoWithStmt(ctx sessionctx.Context, s *ast.CreateTableStmt, dbCharset, dbCollate string, placementPolicyRef *model.PolicyRefInfo) (*model.TableInfo, error)
BuildTableInfoWithStmt builds model.TableInfo from a SQL statement without validity check
func BuildViewInfo ¶
func BuildViewInfo(_ sessionctx.Context, s *ast.CreateViewStmt) (*model.ViewInfo, error)
BuildViewInfo builds a ViewInfo structure from an ast.CreateViewStmt.
func CancelJobs ¶
func CancelJobs(se sessionctx.Context, ids []int64) (errs []error, err error)
CancelJobs cancels the DDL jobs according to user command.
func CancelJobsBySystem ¶
func CancelJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
CancelJobsBySystem cancels Jobs because of internal reasons.
func CheckAfterPositionExists ¶
func CheckAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error
CheckAfterPositionExists makes sure the column specified in AFTER clause is exists. For example, ALTER TABLE t ADD COLUMN c3 INT AFTER c1.
func CheckDropTablePartition ¶
CheckDropTablePartition checks if the partition exists and does not allow deleting the last existing partition in the table.
func CheckIsDropPrimaryKey ¶
func CheckIsDropPrimaryKey(indexName model.CIStr, indexInfo *model.IndexInfo, t table.Table) (bool, error)
CheckIsDropPrimaryKey checks if we will drop PK, there are many PK implementations so we provide a helper function.
func CheckPKOnGeneratedColumn ¶
func CheckPKOnGeneratedColumn(tblInfo *model.TableInfo, indexPartSpecifications []*ast.IndexPartSpecification) (*model.ColumnInfo, error)
CheckPKOnGeneratedColumn checks the specification of PK is valid.
func CheckPlacementPolicyNotInUseFromInfoSchema ¶
func CheckPlacementPolicyNotInUseFromInfoSchema(is infoschema.InfoSchema, policy *model.PolicyInfo) error
CheckPlacementPolicyNotInUseFromInfoSchema export for test.
func CheckPlacementPolicyNotInUseFromMeta ¶
func CheckPlacementPolicyNotInUseFromMeta(t *meta.Meta, policy *model.PolicyInfo) error
CheckPlacementPolicyNotInUseFromMeta export for test.
func CheckTableInfoValidWithStmt ¶
func CheckTableInfoValidWithStmt(ctx sessionctx.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) (err error)
CheckTableInfoValidWithStmt exposes checkTableInfoValidWithStmt to SchemaTracker. Maybe one day we can delete it.
func CleanupDDLReorgHandles ¶
CleanupDDLReorgHandles removes the job reorganization related handles.
func ConvertBetweenCharAndVarchar ¶
ConvertBetweenCharAndVarchar check whether column converted between char and varchar TODO: it is used for plugins. so change plugin's using and remove it.
func CreateNewColumn ¶
func CreateNewColumn(ctx sessionctx.Context, schema *model.DBInfo, spec *ast.AlterTableSpec, t table.Table, specNewColumn *ast.ColumnDef) (*table.Column, error)
CreateNewColumn creates a new column according to the column information.
func DisableTiFlashPoll ¶
func DisableTiFlashPoll(d any)
DisableTiFlashPoll disables TiFlash poll loop aka PollTiFlashReplicaStatus.
func DropIndexColumnFlag ¶
DropIndexColumnFlag drops the column flag of columns in TableInfo according to the IndexInfo.
func EnableTiFlashPoll ¶
func EnableTiFlashPoll(d any)
EnableTiFlashPoll enables TiFlash poll loop aka PollTiFlashReplicaStatus.
func ExtractTblInfos ¶
func ExtractTblInfos(is infoschema.InfoSchema, oldIdent, newIdent ast.Ident, isAlterTable bool, tables map[string]int64) ([]*model.DBInfo, int64, error)
ExtractTblInfos extracts the table information from the infoschema.
func FindColumnNamesInExpr ¶
func FindColumnNamesInExpr(expr ast.ExprNode) []*ast.ColumnName
FindColumnNamesInExpr returns a slice of ast.ColumnName which is referred in expr.
func FindRelatedIndexesToChange ¶
FindRelatedIndexesToChange finds the indexes that covering the given column. The normal one will be overridden by the temp one.
func GeneratePartDefsFromInterval ¶
func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTableType, tbInfo *model.TableInfo, partitionOptions *ast.PartitionOptions) error
GeneratePartDefsFromInterval generates range partitions from INTERVAL partitioning. Handles
- CREATE TABLE: all partitions are generated
- ALTER TABLE FIRST PARTITION (expr): Drops all partitions before the partition matching the expr (i.e. sets that partition as the new first partition) i.e. will return the partitions from old FIRST partition to (and including) new FIRST partition
- ALTER TABLE LAST PARTITION (expr): Creates new partitions from (excluding) old LAST partition to (including) new LAST partition
partition definitions will be set on partitionOptions
func GetAllDDLJobs ¶
func GetAllDDLJobs(se sessionctx.Context) ([]*model.Job, error)
GetAllDDLJobs get all DDL jobs and sorts jobs by job.ID.
func GetAllHistoryDDLJobs ¶
GetAllHistoryDDLJobs get all the done DDL jobs.
func GetCharsetAndCollateInTableOption ¶
func GetCharsetAndCollateInTableOption(sessVars *variable.SessionVars, startIdx int, options []*ast.TableOption) (chs, coll string, err error)
GetCharsetAndCollateInTableOption will iterate the charset and collate in the options, and returns the last charset and collate in options. If there is no charset in the options, the returns charset will be "", the same as collate.
func GetColumnForeignKeyInfo ¶
GetColumnForeignKeyInfo returns the wanted foreign key info
func GetCustomizedHook ¶
func GetCustomizedHook(s string) (func(do DomainReloader) Callback, error)
GetCustomizedHook get the hook registered in the hookMap.
func GetDefaultCollation ¶
func GetDefaultCollation(sessVars *variable.SessionVars, cs string) (string, error)
GetDefaultCollation returns the default collation for charset and handle the default collation for UTF8MB4.
func GetDropOrTruncateTableInfoFromJobsByStore ¶
func GetDropOrTruncateTableInfoFromJobsByStore(jobs []*model.Job, gcSafePoint uint64, getTable func(uint64, int64, int64) (*model.TableInfo, error), fn func(*model.Job, *model.TableInfo) (bool, error)) (bool, error)
GetDropOrTruncateTableInfoFromJobsByStore implements GetDropOrTruncateTableInfoFromJobs
func GetFlashbackKeyRanges ¶
GetFlashbackKeyRanges get keyRanges for flashback cluster. It contains all non system table key ranges and meta data key ranges. The time complexity is O(nlogn).
func GetHistoryJobByID ¶
GetHistoryJobByID return history DDL job by ID.
func GetLastHistoryDDLJobsIterator ¶
func GetLastHistoryDDLJobsIterator(m *meta.Meta) (meta.LastJobIterator, error)
GetLastHistoryDDLJobsIterator gets latest N history DDL jobs iterator.
func GetLastNHistoryDDLJobs ¶
GetLastNHistoryDDLJobs returns the DDL history jobs and an error. The maximum count of history jobs is num.
func GetModifiableColumnJob ¶
func GetModifiableColumnJob( ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, ident ast.Ident, originalColName model.CIStr, schema *model.DBInfo, t table.Table, spec *ast.AlterTableSpec, ) (*model.Job, error)
GetModifiableColumnJob returns a DDL job of model.ActionModifyColumn.
func GetName4AnonymousIndex ¶
GetName4AnonymousIndex returns a valid name for anonymous index.
func GetOriginDefaultValueForModifyColumn ¶
func GetOriginDefaultValueForModifyColumn(sessCtx sessionctx.Context, changingCol, oldCol *model.ColumnInfo) (any, error)
GetOriginDefaultValueForModifyColumn gets the original default value for modifying column. Since column type change is implemented as adding a new column then substituting the old one. Case exists when update-where statement fetch a NULL for not-null column without any default data, it will errors. So we set original default value here to prevent this error. If the oldCol has the original default value, we use it. Otherwise we set the zero value as original default value. Besides, in insert & update records, we have already implement using the casted value of relative column to insert rather than the original default value.
func GetRangeEndKey ¶
func GetRangeEndKey(ctx *JobContext, store kv.Storage, priority int, keyPrefix kv.Key, startKey, endKey kv.Key) (kv.Key, error)
GetRangeEndKey gets the actual end key for the range of [startKey, endKey).
func GetRangePlacementPolicyName ¶
GetRangePlacementPolicyName get the placement policy name used by range. rangeBundleID is limited to TiDBBundleRangePrefixForGlobal and TiDBBundleRangePrefixForMeta.
func GetTableDataKeyRanges ¶
GetTableDataKeyRanges get keyRanges by `flashbackIDs`. This func will return all flashback table data key ranges.
func GetTableInfoAndCancelFaultJob ¶
func GetTableInfoAndCancelFaultJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error)
GetTableInfoAndCancelFaultJob is exported for test.
func GetWaitTimeWhenErrorOccurred ¶
GetWaitTimeWhenErrorOccurred return waiting interval when processing DDL jobs encounter errors.
func InitAndAddColumnToTable ¶
func InitAndAddColumnToTable(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) *model.ColumnInfo
InitAndAddColumnToTable initializes the ColumnInfo in-place and adds it to the table.
func IsAutoRandomColumnID ¶
IsAutoRandomColumnID returns true if the given column ID belongs to an auto_random column.
func IsColumnDroppableWithCheckConstraint ¶
IsColumnDroppableWithCheckConstraint check whether the column in check-constraint whose dependent col is more than 1
func IsColumnRenameableWithCheckConstraint ¶
IsColumnRenameableWithCheckConstraint check whether the column is referenced in check-constraint
func IsElemsChangedToModifyColumn ¶
IsElemsChangedToModifyColumn check elems changed
func IterAllDDLJobs ¶
func IterAllDDLJobs(ctx sessionctx.Context, txn kv.Transaction, finishFn func([]*model.Job) (bool, error)) error
IterAllDDLJobs will iterates running DDL jobs first, return directly if `finishFn` return true or error, then iterates history DDL jobs until the `finishFn` return true or error.
func IterHistoryDDLJobs ¶
IterHistoryDDLJobs iterates history DDL jobs until the `finishFn` return true or error.
func JobNeedGC ¶
JobNeedGC is called to determine whether delete-ranges need to be generated for the provided job.
NOTICE: BR also uses jobNeedGC to determine whether delete-ranges need to be generated for the provided job. Therefore, please make sure any modification is compatible with BR.
func LoadTiFlashReplicaInfo ¶
func LoadTiFlashReplicaInfo(tblInfo *model.TableInfo, tableList *[]TiFlashReplicaStatus)
LoadTiFlashReplicaInfo parses model.TableInfo into []TiFlashReplicaStatus.
func LocateOffsetToMove ¶
func LocateOffsetToMove(currentOffset int, pos *ast.ColumnPosition, tblInfo *model.TableInfo) (destOffset int, err error)
LocateOffsetToMove returns the offset of the column to move.
func MockTableInfo ¶
func MockTableInfo(ctx sessionctx.Context, stmt *ast.CreateTableStmt, tableID int64) (*model.TableInfo, error)
MockTableInfo mocks a table info by create table stmt ast and a specified table id.
func NeedToOverwriteColCharset ¶
func NeedToOverwriteColCharset(options []*ast.TableOption) bool
NeedToOverwriteColCharset return true for altering charset and specified CONVERT TO.
func NewAddIndexIngestPipeline ¶
func NewAddIndexIngestPipeline( ctx *OperatorCtx, store kv.Storage, sessPool opSessPool, backendCtx ingest.BackendCtx, engines []ingest.Engine, sessCtx sessionctx.Context, jobID int64, tbl table.PhysicalTable, idxInfos []*model.IndexInfo, startKey, endKey kv.Key, totalRowCount *atomic.Int64, metricCounter prometheus.Counter, reorgMeta *model.DDLReorgMeta, avgRowSize int, concurrency int, ) (*operator.AsyncPipeline, error)
NewAddIndexIngestPipeline creates a pipeline for adding index in ingest mode.
func NewBackfillingSchedulerExt ¶
NewBackfillingSchedulerExt creates a new backfillingSchedulerExt, only used for test now.
func NewDDLReorgMeta ¶
func NewDDLReorgMeta(ctx sessionctx.Context) *model.DDLReorgMeta
NewDDLReorgMeta create a DDL ReorgMeta.
func NewMockSchemaSyncer ¶
func NewMockSchemaSyncer() syncer.SchemaSyncer
NewMockSchemaSyncer creates a new mock SchemaSyncer.
func NewMockStateSyncer ¶
func NewMockStateSyncer() syncer.StateSyncer
NewMockStateSyncer creates a new mock StateSyncer.
func NewReorgHandlerForTest ¶
func NewReorgHandlerForTest(se sessionctx.Context) *reorgHandler
NewReorgHandlerForTest creates a new reorgHandler, only used in test.
func NewWriteIndexToExternalStoragePipeline ¶
func NewWriteIndexToExternalStoragePipeline( ctx *OperatorCtx, store kv.Storage, extStoreURI string, sessPool opSessPool, sessCtx sessionctx.Context, jobID, subtaskID int64, tbl table.PhysicalTable, idxInfos []*model.IndexInfo, startKey, endKey kv.Key, totalRowCount *atomic.Int64, metricCounter prometheus.Counter, onClose external.OnCloseFunc, reorgMeta *model.DDLReorgMeta, avgRowSize int, concurrency int, resource *proto.StepResource, ) (*operator.AsyncPipeline, error)
NewWriteIndexToExternalStoragePipeline creates a pipeline for writing index to external storage.
func OverwriteCollationWithBinaryFlag ¶
func OverwriteCollationWithBinaryFlag(sessVars *variable.SessionVars, colDef *ast.ColumnDef, chs, coll string) (newChs string, newColl string)
OverwriteCollationWithBinaryFlag is used to handle the case like
CREATE TABLE t (a VARCHAR(255) BINARY) CHARSET utf8 COLLATE utf8_general_ci;
The 'BINARY' sets the column collation to *_bin according to the table charset.
func PauseAllJobsBySystem ¶
func PauseAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
PauseAllJobsBySystem pauses all running Jobs because of internal reasons.
func PauseJobs ¶
func PauseJobs(se sessionctx.Context, ids []int64) ([]error, error)
PauseJobs pause all the DDL jobs according to user command.
func PauseJobsBySystem ¶
func PauseJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
PauseJobsBySystem pauses Jobs because of internal reasons.
func PollAvailableTableProgress ¶
func PollAvailableTableProgress(schemas infoschema.InfoSchema, _ sessionctx.Context, pollTiFlashContext *TiFlashManagementContext)
PollAvailableTableProgress will poll and check availability of available tables.
func ProcessColumnCharsetAndCollation ¶
func ProcessColumnCharsetAndCollation(sctx sessionctx.Context, col *table.Column, newCol *table.Column, meta *model.TableInfo, specNewColumn *ast.ColumnDef, schema *model.DBInfo) error
ProcessColumnCharsetAndCollation process column charset and collation
func ProcessModifyColumnOptions ¶
func ProcessModifyColumnOptions(ctx sessionctx.Context, col *table.Column, options []*ast.ColumnOption) error
ProcessModifyColumnOptions process column options.
func RemoveDependentHiddenColumns ¶
RemoveDependentHiddenColumns removes hidden columns by the indexInfo.
func ResolveAlterAlgorithm ¶
func ResolveAlterAlgorithm(alterSpec *ast.AlterTableSpec, specify ast.AlgorithmType) (ast.AlgorithmType, error)
ResolveAlterAlgorithm resolves the algorithm of the alterSpec. If specify is the ast.AlterAlgorithmDefault, then the default algorithm of the alter action will be returned. If specify algorithm is not supported by the alter action, it will try to find a better algorithm in the order `INSTANT > INPLACE > COPY`, errAlterOperationNotSupported will be returned. E.g. INSTANT may be returned if specify=INPLACE If failed to choose any valid algorithm, AlgorithmTypeDefault and errAlterOperationNotSupported will be returned
func ResolveAlterTableSpec ¶
func ResolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) ([]*ast.AlterTableSpec, error)
ResolveAlterTableSpec resolves alter table algorithm and removes ignore table spec in specs. returns valid specs, and the occurred error.
func ResolveCharsetCollation ¶
func ResolveCharsetCollation(sessVars *variable.SessionVars, charsetOpts ...ast.CharsetOpt) (chs string, coll string, err error)
ResolveCharsetCollation will resolve the charset and collate by the order of parameters: * If any given ast.CharsetOpt is not empty, the resolved charset and collate will be returned. * If all ast.CharsetOpts are empty, the default charset and collate will be returned.
func ResumeAllJobsBySystem ¶
func ResumeAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)
ResumeAllJobsBySystem resumes all paused Jobs because of internal reasons.
func ResumeJobs ¶
func ResumeJobs(se sessionctx.Context, ids []int64) ([]error, error)
ResumeJobs resume all the DDL jobs according to user command.
func ResumeJobsBySystem ¶
func ResumeJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)
ResumeJobsBySystem resumes Jobs that are paused by TiDB itself.
func ScanHistoryDDLJobs ¶
ScanHistoryDDLJobs get some of the done DDL jobs. When the DDL history is quite large, GetAllHistoryDDLJobs() API can't work well, because it makes the server OOM. The result is in descending order by job ID.
func SendFlashbackToVersionRPC ¶
func SendFlashbackToVersionRPC( ctx context.Context, s tikv.Storage, version uint64, startTS, commitTS uint64, r tikvstore.KeyRange, ) (rangetask.TaskStat, error)
SendFlashbackToVersionRPC flashback the MVCC key to the version Function also be called by BR for volume snapshot backup and restore
func SendPrepareFlashbackToVersionRPC ¶
func SendPrepareFlashbackToVersionRPC( ctx context.Context, s tikv.Storage, flashbackTS, startTS uint64, r tikvstore.KeyRange, ) (rangetask.TaskStat, error)
SendPrepareFlashbackToVersionRPC prepares regions for flashback, the purpose is to put region into flashback state which region stop write Function also be called by BR for volume snapshot backup and restore
func SetBackfillTaskChanSizeForTest ¶
func SetBackfillTaskChanSizeForTest(n int)
SetBackfillTaskChanSizeForTest is only used for test.
func SetBatchInsertDeleteRangeSize ¶
func SetBatchInsertDeleteRangeSize(i int)
SetBatchInsertDeleteRangeSize sets the batch insert/delete range size in the test
func SetDefaultValue ¶
func SetDefaultValue(ctx sessionctx.Context, col *table.Column, option *ast.ColumnOption) (hasDefaultValue bool, err error)
SetDefaultValue sets the default value of the column.
func SetDirectPlacementOpt ¶
func SetDirectPlacementOpt(placementSettings *model.PlacementSettings, placementOptionType ast.PlacementOptionType, stringVal string, uintVal uint64) error
SetDirectPlacementOpt tries to make the PlacementSettings assignments generic for Schema/Table/Partition
func SetDirectResourceGroupBackgroundOption ¶
func SetDirectResourceGroupBackgroundOption(resourceGroupSettings *model.ResourceGroupSettings, opt *ast.ResourceGroupBackgroundOption) error
SetDirectResourceGroupBackgroundOption set background configs of the ResourceGroupSettings.
func SetDirectResourceGroupRunawayOption ¶
func SetDirectResourceGroupRunawayOption(resourceGroupSettings *model.ResourceGroupSettings, typ ast.RunawayOptionType, stringVal string, intVal int32) error
SetDirectResourceGroupRunawayOption tries to set runaway part of the ResourceGroupSettings.
func SetDirectResourceGroupSettings ¶
func SetDirectResourceGroupSettings(groupInfo *model.ResourceGroupInfo, opt *ast.ResourceGroupOption) error
SetDirectResourceGroupSettings tries to set the ResourceGroupSettings.
func SetIdxColNameOffset ¶
func SetIdxColNameOffset(idxCol *model.IndexColumn, changingCol *model.ColumnInfo)
SetIdxColNameOffset sets index column name and offset from changing ColumnInfo.
func SetWaitTimeWhenErrorOccurred ¶
SetWaitTimeWhenErrorOccurred update waiting interval when processing DDL jobs encounter errors.
func ShouldBuildClusteredIndex ¶
func ShouldBuildClusteredIndex(ctx sessionctx.Context, opt *ast.IndexOption, isSingleIntPK bool) bool
ShouldBuildClusteredIndex is used to determine whether the CREATE TABLE statement should build a clustered index table.
func SplitRecordRegion ¶
func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatter bool) uint64
SplitRecordRegion is to split region in store by table prefix.
func UpdateColsNull2NotNull ¶
UpdateColsNull2NotNull changes the null option of columns of an index.
func ValidateFlashbackTS ¶
ValidateFlashbackTS validates that flashBackTS in range [gcSafePoint, currentTS).
func ValidateRenameIndex ¶
ValidateRenameIndex checks if index name is ok to be renamed.
func WaitScatterRegionFinish ¶
func WaitScatterRegionFinish(ctx context.Context, store kv.SplittableStore, regionIDs ...uint64)
WaitScatterRegionFinish will block until all regions are scattered.
Types ¶
type AllocTableIDIf ¶
AllocTableIDIf specifies whether to retain the old table ID. If this returns "false", then we would assume the table ID has been allocated before calling `CreateTableWithInfo` family.
func (AllocTableIDIf) Apply ¶
func (a AllocTableIDIf) Apply(c *CreateTableWithInfoConfig)
Apply implements Configurier.
type AlterAlgorithm ¶
type AlterAlgorithm struct {
// contains filtered or unexported fields
}
AlterAlgorithm is used to store supported alter algorithm. For now, TiDB only support AlterAlgorithmInplace and AlterAlgorithmInstant. The most alter operations are using instant algorithm, and only the add index is using inplace(not really inplace, because we never block the DML but costs some time to backfill the index data) See https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-performance.
type AvailableTableID ¶
AvailableTableID is the table id info of available table for waiting to update TiFlash replica progress.
type BackfillCleanUpS3 ¶
type BackfillCleanUpS3 struct { }
BackfillCleanUpS3 implements scheduler.CleanUpRoutine.
type BackfillSubTaskMeta ¶
type BackfillSubTaskMeta struct { PhysicalTableID int64 `json:"physical_table_id"` // Used by read index step. RowStart []byte `json:"row_start"` RowEnd []byte `json:"row_end"` // Used by global sort write & ingest step. RangeJobKeys [][]byte `json:"range_job_keys,omitempty"` RangeSplitKeys [][]byte `json:"range_split_keys,omitempty"` DataFiles []string `json:"data-files,omitempty"` StatFiles []string `json:"stat-files,omitempty"` TS uint64 `json:"ts,omitempty"` // Each group of MetaGroups represents a different index kvs meta. MetaGroups []*external.SortedKVMeta `json:"meta_groups,omitempty"` // Only used for adding one single index. // Keep this for compatibility with v7.5. external.SortedKVMeta `json:",inline"` }
BackfillSubTaskMeta is the sub-task meta for backfilling index.
type BackfillTaskMeta ¶
type BackfillTaskMeta struct { Job model.Job `json:"job"` // EleIDs stands for the index/column IDs to backfill with distributed framework. EleIDs []int64 `json:"ele_ids"` // EleTypeKey is the type of the element to backfill with distributed framework. // For now, only index type is supported. EleTypeKey []byte `json:"ele_type_key"` CloudStorageURI string `json:"cloud_storage_uri"` EstimateRowSize int `json:"estimate_row_size"` }
BackfillTaskMeta is the dist task meta for backfilling index.
type BackfillingSchedulerExt ¶
type BackfillingSchedulerExt struct { GlobalSort bool // contains filtered or unexported fields }
BackfillingSchedulerExt is an extension of litBackfillScheduler, exported for test.
func (*BackfillingSchedulerExt) GetEligibleInstances ¶
func (*BackfillingSchedulerExt) GetEligibleInstances(_ context.Context, _ *proto.Task) ([]string, error)
GetEligibleInstances implements scheduler.Extension interface.
func (*BackfillingSchedulerExt) GetNextStep ¶
func (sch *BackfillingSchedulerExt) GetNextStep(task *proto.TaskBase) proto.Step
GetNextStep implements scheduler.Extension interface.
func (*BackfillingSchedulerExt) IsRetryableErr ¶
func (*BackfillingSchedulerExt) IsRetryableErr(error) bool
IsRetryableErr implements scheduler.Extension.IsRetryableErr interface.
func (*BackfillingSchedulerExt) OnDone ¶
func (*BackfillingSchedulerExt) OnDone(_ context.Context, _ diststorage.TaskHandle, _ *proto.Task) error
OnDone implements scheduler.Extension interface.
func (*BackfillingSchedulerExt) OnNextSubtasksBatch ¶
func (sch *BackfillingSchedulerExt) OnNextSubtasksBatch( ctx context.Context, taskHandle diststorage.TaskHandle, task *proto.Task, execIDs []string, nextStep proto.Step, ) (taskMeta [][]byte, err error)
OnNextSubtasksBatch generate batch of next step's plan.
type BaseCallback ¶
type BaseCallback struct { }
BaseCallback implements Callback.OnChanged interface.
func (*BaseCallback) OnChanged ¶
func (*BaseCallback) OnChanged(err error) error
OnChanged implements Callback interface.
func (*BaseCallback) OnGetJobAfter ¶
func (*BaseCallback) OnGetJobAfter(_ string, _ *model.Job)
OnGetJobAfter implements Callback.OnGetJobAfter interface.
func (*BaseCallback) OnGetJobBefore ¶
func (*BaseCallback) OnGetJobBefore(_ string)
OnGetJobBefore implements Callback.OnGetJobBefore interface.
func (*BaseCallback) OnJobRunAfter ¶
func (*BaseCallback) OnJobRunAfter(_ *model.Job)
OnJobRunAfter implements Callback.OnJobRunAfter interface.
func (*BaseCallback) OnJobRunBefore ¶
func (*BaseCallback) OnJobRunBefore(_ *model.Job)
OnJobRunBefore implements Callback.OnJobRunBefore interface.
func (*BaseCallback) OnJobUpdated ¶
func (*BaseCallback) OnJobUpdated(_ *model.Job)
OnJobUpdated implements Callback.OnJobUpdated interface.
func (*BaseCallback) OnSchemaStateChanged ¶
func (*BaseCallback) OnSchemaStateChanged(_ int64)
OnSchemaStateChanged implements Callback interface.
func (*BaseCallback) OnUpdateReorgInfo ¶
func (*BaseCallback) OnUpdateReorgInfo(_ *model.Job, _ int64)
OnUpdateReorgInfo implements ReorgCallback interface.
func (*BaseCallback) OnWatched ¶
func (*BaseCallback) OnWatched(_ context.Context)
OnWatched implements Callback.OnWatched interface.
type BaseInterceptor ¶
type BaseInterceptor struct{}
BaseInterceptor implements Interceptor.
func (*BaseInterceptor) OnGetInfoSchema ¶
func (*BaseInterceptor) OnGetInfoSchema(_ sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema
OnGetInfoSchema implements Interceptor.OnGetInfoSchema interface.
type Callback ¶
type Callback interface { ReorgCallback // OnChanged is called after a ddl statement is finished. OnChanged(err error) error // OnSchemaStateChanged is called after a schema state is changed. OnSchemaStateChanged(schemaVer int64) // OnJobRunBefore is called before running job. OnJobRunBefore(job *model.Job) // OnJobRunAfter is called after running job. OnJobRunAfter(job *model.Job) // OnJobUpdated is called after the running job is updated. OnJobUpdated(job *model.Job) // OnWatched is called after watching owner is completed. OnWatched(ctx context.Context) // OnGetJobBefore is called before getting job. OnGetJobBefore(jobType string) // OnGetJobAfter is called after getting job. OnGetJobAfter(jobType string, job *model.Job) }
Callback is used for DDL.
type CreateTableWithInfoConfig ¶
type CreateTableWithInfoConfig struct { OnExist OnExist ShouldAllocTableID AllocTableIDIf }
CreateTableWithInfoConfig is the configuration of `CreateTableWithInfo`.
func GetCreateTableWithInfoConfig ¶
func GetCreateTableWithInfoConfig(cs []CreateTableWithInfoConfigurier) CreateTableWithInfoConfig
GetCreateTableWithInfoConfig applies the series of configurier from default config and returns the final config.
type CreateTableWithInfoConfigurier ¶
type CreateTableWithInfoConfigurier interface { // Apply the change over the config. Apply(*CreateTableWithInfoConfig) }
CreateTableWithInfoConfigurier is the "diff" which can be applied to the CreateTableWithInfoConfig, currently implementations are "OnExist" and "AllocTableIDIf".
type DDL ¶
type DDL interface { CreateSchema(ctx sessionctx.Context, stmt *ast.CreateDatabaseStmt) error AlterSchema(sctx sessionctx.Context, stmt *ast.AlterDatabaseStmt) error DropSchema(ctx sessionctx.Context, stmt *ast.DropDatabaseStmt) error CreateTable(ctx sessionctx.Context, stmt *ast.CreateTableStmt) error CreateView(ctx sessionctx.Context, stmt *ast.CreateViewStmt) error DropTable(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error) RecoverTable(ctx sessionctx.Context, recoverInfo *RecoverInfo) (err error) RecoverSchema(ctx sessionctx.Context, recoverSchemaInfo *RecoverSchemaInfo) error DropView(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error) CreateIndex(ctx sessionctx.Context, stmt *ast.CreateIndexStmt) error DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) error AlterTable(ctx context.Context, sctx sessionctx.Context, stmt *ast.AlterTableStmt) error TruncateTable(ctx sessionctx.Context, tableIdent ast.Ident) error RenameTable(ctx sessionctx.Context, stmt *ast.RenameTableStmt) error LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, available bool) error RepairTable(ctx sessionctx.Context, createStmt *ast.CreateTableStmt) error CreateSequence(ctx sessionctx.Context, stmt *ast.CreateSequenceStmt) error DropSequence(ctx sessionctx.Context, stmt *ast.DropSequenceStmt) (err error) AlterSequence(ctx sessionctx.Context, stmt *ast.AlterSequenceStmt) error CreatePlacementPolicy(ctx sessionctx.Context, stmt *ast.CreatePlacementPolicyStmt) error DropPlacementPolicy(ctx sessionctx.Context, stmt *ast.DropPlacementPolicyStmt) error AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacementPolicyStmt) error AddResourceGroup(ctx sessionctx.Context, stmt *ast.CreateResourceGroupStmt) error AlterResourceGroup(ctx sessionctx.Context, stmt *ast.AlterResourceGroupStmt) error DropResourceGroup(ctx sessionctx.Context, stmt *ast.DropResourceGroupStmt) error FlashbackCluster(ctx sessionctx.Context, flashbackTS uint64) error // CreateSchemaWithInfo creates a database (schema) given its database info. // // WARNING: the DDL owns the `info` after calling this function, and will modify its fields // in-place. If you want to keep using `info`, please call Clone() first. CreateSchemaWithInfo( ctx sessionctx.Context, info *model.DBInfo, onExist OnExist) error // CreateTableWithInfo creates a table, view or sequence given its table info. // // WARNING: the DDL owns the `info` after calling this function, and will modify its fields // in-place. If you want to keep using `info`, please call Clone() first. CreateTableWithInfo( ctx sessionctx.Context, schema model.CIStr, info *model.TableInfo, cs ...CreateTableWithInfoConfigurier) error // BatchCreateTableWithInfo is like CreateTableWithInfo, but can handle multiple tables. BatchCreateTableWithInfo(ctx sessionctx.Context, schema model.CIStr, info []*model.TableInfo, cs ...CreateTableWithInfoConfigurier) error // CreatePlacementPolicyWithInfo creates a placement policy // // WARNING: the DDL owns the `policy` after calling this function, and will modify its fields // in-place. If you want to keep using `policy`, please call Clone() first. CreatePlacementPolicyWithInfo(ctx sessionctx.Context, policy *model.PolicyInfo, onExist OnExist) error // Start campaigns the owner and starts workers. // ctxPool is used for the worker's delRangeManager and creates sessions. Start(ctxPool *pools.ResourcePool) error // GetLease returns current schema lease time. GetLease() time.Duration // Stats returns the DDL statistics. Stats(vars *variable.SessionVars) (map[string]any, error) // GetScope gets the status variables scope. GetScope(status string) variable.ScopeFlag // Stop stops DDL worker. Stop() error // RegisterStatsHandle registers statistics handle and its corresponding event channel for ddl. RegisterStatsHandle(*handle.Handle) // SchemaSyncer gets the schema syncer. SchemaSyncer() syncer.SchemaSyncer // StateSyncer gets the cluster state syncer. StateSyncer() syncer.StateSyncer // OwnerManager gets the owner manager. OwnerManager() owner.Manager // GetID gets the ddl ID. GetID() string // GetTableMaxHandle gets the max row ID of a normal table or a partition. GetTableMaxHandle(ctx *JobContext, startTS uint64, tbl table.PhysicalTable) (kv.Handle, bool, error) // SetBinlogClient sets the binlog client for DDL worker. It's exported for testing. SetBinlogClient(*pumpcli.PumpsClient) // GetHook gets the hook. It's exported for testing. GetHook() Callback // SetHook sets the hook. SetHook(h Callback) // GetInfoSchemaWithInterceptor gets the infoschema binding to d. It's exported for testing. GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.InfoSchema // DoDDLJob does the DDL job, it's exported for test. DoDDLJob(ctx sessionctx.Context, job *model.Job) error }
DDL is responsible for updating schema in data store and maintaining in-memory InfoSchema cache.
type DefaultCallback ¶
type DefaultCallback struct { *BaseCallback // contains filtered or unexported fields }
DefaultCallback is the default callback that TiDB will use.
func (*DefaultCallback) OnChanged ¶
func (c *DefaultCallback) OnChanged(err error) error
OnChanged overrides ddl Callback interface.
func (*DefaultCallback) OnSchemaStateChanged ¶
func (c *DefaultCallback) OnSchemaStateChanged(_ int64)
OnSchemaStateChanged overrides the ddl Callback interface.
type DelRangeExecWrapper ¶
type DelRangeExecWrapper interface { // generate a new tso for the next job UpdateTSOForJob() error // initialize the paramsList PrepareParamsList(sz int) // rewrite table id if necessary, used for BR RewriteTableID(tableID int64) (int64, bool) // (job_id, element_id, start_key, end_key, ts) // ts is generated by delRangeExecWrapper itself AppendParamsList(jobID, elemID int64, startKey, endKey string) // consume the delete range. For TiDB Server, it insert rows into mysql.gc_delete_range. ConsumeDeleteRange(ctx context.Context, sql string) error }
DelRangeExecWrapper consumes the delete ranges with the provided table ID(s) and index ID(s).
type DomainReloader ¶
type DomainReloader interface {
Reload() error
}
DomainReloader is used to avoid import loop.
type IndexIngestOperator ¶
type IndexIngestOperator struct { *operator.AsyncOperator[IndexRecordChunk, IndexWriteResult] }
IndexIngestOperator writes index records to ingest engine.
func NewIndexIngestOperator ¶
func NewIndexIngestOperator( ctx *OperatorCtx, copCtx copr.CopContext, backendCtx ingest.BackendCtx, sessPool opSessPool, tbl table.PhysicalTable, indexes []table.Index, engines []ingest.Engine, srcChunkPool chan *chunk.Chunk, concurrency int, reorgMeta *model.DDLReorgMeta, ) *IndexIngestOperator
NewIndexIngestOperator creates a new IndexIngestOperator.
type IndexRecordChunk ¶
IndexRecordChunk contains one of the chunk read from corresponding TableScanTask.
type IndexWriteResult ¶
IndexWriteResult contains the result of writing index records to ingest engine.
type Info ¶
type Info struct { SchemaVer int64 ReorgHandle kv.Key // It's only used for DDL information. Jobs []*model.Job // It's the currently running jobs. }
Info is for DDL information.
func GetDDLInfo ¶
func GetDDLInfo(s sessionctx.Context) (*Info, error)
GetDDLInfo returns DDL information and only uses for testing.
func GetDDLInfoWithNewTxn ¶
func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error)
GetDDLInfoWithNewTxn returns DDL information using a new txn.
type Interceptor ¶
type Interceptor interface { // OnGetInfoSchema is an intercept which is called in the function ddl.GetInfoSchema(). It is used in the tests. OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema }
Interceptor is used for DDL.
type JobContext ¶
type JobContext struct {
// contains filtered or unexported fields
}
JobContext is the ddl job execution context.
type LitBackfillScheduler ¶
type LitBackfillScheduler struct { *scheduler.BaseScheduler // contains filtered or unexported fields }
LitBackfillScheduler wraps BaseScheduler.
func (*LitBackfillScheduler) Close ¶
func (sch *LitBackfillScheduler) Close()
Close implements BaseScheduler interface.
func (*LitBackfillScheduler) Init ¶
func (sch *LitBackfillScheduler) Init() (err error)
Init implements BaseScheduler interface.
type LockTablesArg ¶
type LockTablesArg struct { LockTables []model.TableLockTpInfo IndexOfLock int UnlockTables []model.TableLockTpInfo IndexOfUnlock int SessionInfo model.SessionInfo IsCleanup bool }
LockTablesArg is the argument for LockTables, export for test.
type MockSchemaSyncer ¶
type MockSchemaSyncer struct {
// contains filtered or unexported fields
}
MockSchemaSyncer is a mock schema syncer, it is exported for testing.
func (*MockSchemaSyncer) Close ¶
func (*MockSchemaSyncer) Close()
Close implements SchemaSyncer.Close interface.
func (*MockSchemaSyncer) CloseSession ¶
func (s *MockSchemaSyncer) CloseSession()
CloseSession mockSession, it is exported for testing.
func (*MockSchemaSyncer) Done ¶
func (s *MockSchemaSyncer) Done() <-chan struct{}
Done implements SchemaSyncer.Done interface.
func (*MockSchemaSyncer) GlobalVersionCh ¶
func (s *MockSchemaSyncer) GlobalVersionCh() clientv3.WatchChan
GlobalVersionCh implements SchemaSyncer.GlobalVersionCh interface.
func (*MockSchemaSyncer) Init ¶
func (s *MockSchemaSyncer) Init(_ context.Context) error
Init implements SchemaSyncer.Init interface.
func (*MockSchemaSyncer) OwnerCheckAllVersions ¶
func (s *MockSchemaSyncer) OwnerCheckAllVersions(ctx context.Context, jobID int64, latestVer int64) error
OwnerCheckAllVersions implements SchemaSyncer.OwnerCheckAllVersions interface.
func (*MockSchemaSyncer) OwnerUpdateGlobalVersion ¶
func (s *MockSchemaSyncer) OwnerUpdateGlobalVersion(_ context.Context, _ int64) error
OwnerUpdateGlobalVersion implements SchemaSyncer.OwnerUpdateGlobalVersion interface.
func (*MockSchemaSyncer) Restart ¶
func (s *MockSchemaSyncer) Restart(_ context.Context) error
Restart implements SchemaSyncer.Restart interface.
func (*MockSchemaSyncer) UpdateSelfVersion ¶
UpdateSelfVersion implements SchemaSyncer.UpdateSelfVersion interface.
func (*MockSchemaSyncer) WatchGlobalSchemaVer ¶
func (*MockSchemaSyncer) WatchGlobalSchemaVer(context.Context)
WatchGlobalSchemaVer implements SchemaSyncer.WatchGlobalSchemaVer interface.
type MockStateSyncer ¶
type MockStateSyncer struct {
// contains filtered or unexported fields
}
MockStateSyncer is a mock state syncer, it is exported for testing.
func (*MockStateSyncer) GetGlobalState ¶
GetGlobalState implements StateSyncer.GetGlobalState interface.
func (*MockStateSyncer) Init ¶
func (s *MockStateSyncer) Init(context.Context) error
Init implements StateSyncer.Init interface.
func (*MockStateSyncer) IsUpgradingState ¶
func (*MockStateSyncer) IsUpgradingState() bool
IsUpgradingState implements StateSyncer.IsUpgradingState interface.
func (*MockStateSyncer) Rewatch ¶
func (*MockStateSyncer) Rewatch(context.Context)
Rewatch implements StateSyncer.Rewatch interface.
func (*MockStateSyncer) UpdateGlobalState ¶
UpdateGlobalState implements StateSyncer.UpdateGlobalState interface.
func (*MockStateSyncer) WatchChan ¶
func (s *MockStateSyncer) WatchChan() clientv3.WatchChan
WatchChan implements StateSyncer.WatchChan interface.
type OnExist ¶
type OnExist uint8
OnExist specifies what to do when a new object has a name collision.
const ( // OnExistError throws an error on name collision. OnExistError OnExist = iota // OnExistIgnore skips creating the new object. OnExistIgnore // OnExistReplace replaces the old object by the new object. This is only // supported by VIEWs at the moment. For other object types, this is // equivalent to OnExistError. OnExistReplace )
func (OnExist) Apply ¶
func (o OnExist) Apply(c *CreateTableWithInfoConfig)
Apply implements Configurier.
type OperatorCtx ¶
OperatorCtx is the context for AddIndexIngestPipeline. This is used to cancel the pipeline and collect errors.
func NewOperatorCtx ¶
func NewOperatorCtx(ctx context.Context, taskID, subtaskID int64) *OperatorCtx
NewOperatorCtx creates a new OperatorCtx.
func (*OperatorCtx) OperatorErr ¶
func (ctx *OperatorCtx) OperatorErr() error
OperatorErr returns the error of the operator.
type Option ¶
type Option func(*Options)
Option represents an option to initialize the DDL module
func WithAutoIDClient ¶
func WithAutoIDClient(cli *autoid.ClientDiscover) Option
WithAutoIDClient specifies the autoid client used by the autoid service for those AUTO_ID_CACHE=1 tables.
func WithEtcdClient ¶
WithEtcdClient specifies the `clientv3.Client` of DDL used to request the etcd service
func WithHook ¶
WithHook specifies the `Callback` of DDL used to notify the outer module when events are triggered
func WithInfoCache ¶
func WithInfoCache(ic *infoschema.InfoCache) Option
WithInfoCache specifies the `infoschema.InfoCache`
type Options ¶
type Options struct { EtcdCli *clientv3.Client Store kv.Storage AutoIDClient *autoid.ClientDiscover InfoCache *infoschema.InfoCache Hook Callback Lease time.Duration }
Options represents all the options of the DDL module needs
type PollTiFlashBackoffContext ¶
type PollTiFlashBackoffContext struct { MinThreshold TiFlashTick MaxThreshold TiFlashTick // Capacity limits tables a backoff pool can handle, in order to limit handling of big tables. Capacity int Rate TiFlashTick // contains filtered or unexported fields }
PollTiFlashBackoffContext is a collection of all backoff states.
func NewPollTiFlashBackoffContext ¶
func NewPollTiFlashBackoffContext(minThreshold, maxThreshold TiFlashTick, capacity int, rate TiFlashTick) (*PollTiFlashBackoffContext, error)
NewPollTiFlashBackoffContext creates an instance of PollTiFlashBackoffContext.
func (*PollTiFlashBackoffContext) Get ¶
func (b *PollTiFlashBackoffContext) Get(id int64) (*PollTiFlashBackoffElement, bool)
Get returns pointer to inner PollTiFlashBackoffElement. Only exported for test.
func (*PollTiFlashBackoffContext) Len ¶
func (b *PollTiFlashBackoffContext) Len() int
Len gets size of PollTiFlashBackoffContext.
func (*PollTiFlashBackoffContext) Put ¶
func (b *PollTiFlashBackoffContext) Put(id int64) bool
Put will record table into backoff pool, if there is enough room, or returns false.
func (*PollTiFlashBackoffContext) Remove ¶
func (b *PollTiFlashBackoffContext) Remove(id int64) bool
Remove will reset table from backoff.
func (*PollTiFlashBackoffContext) Tick ¶
func (b *PollTiFlashBackoffContext) Tick(id int64) (grew bool, exist bool, cnt int)
Tick will first check increase Counter. It returns: 1. A bool indicates whether threshold is grown during this tick. 2. A bool indicates whether this ID exists. 3. A int indicates how many ticks ID has counted till now.
type PollTiFlashBackoffElement ¶
type PollTiFlashBackoffElement struct { Counter int Threshold TiFlashTick TotalCounter int }
PollTiFlashBackoffElement records backoff for each TiFlash Table. `Counter` increases every `Tick`, if it reached `Threshold`, it will be reset to 0 while `Threshold` grows. `TotalCounter` records total `Tick`s this element has since created.
func NewPollTiFlashBackoffElement ¶
func NewPollTiFlashBackoffElement() *PollTiFlashBackoffElement
NewPollTiFlashBackoffElement initialize backoff element for a TiFlash table.
func (*PollTiFlashBackoffElement) MaybeGrow ¶
func (e *PollTiFlashBackoffElement) MaybeGrow(b *PollTiFlashBackoffContext) bool
MaybeGrow grows threshold and reset counter when needed.
func (*PollTiFlashBackoffElement) NeedGrow ¶
func (e *PollTiFlashBackoffElement) NeedGrow() bool
NeedGrow returns if we need to grow. It is exported for testing.
type RecoverInfo ¶
type RecoverInfo struct { SchemaID int64 TableInfo *model.TableInfo DropJobID int64 SnapshotTS uint64 AutoIDs meta.AutoIDGroup OldSchemaName string OldTableName string }
RecoverInfo contains information needed by DDL.RecoverTable.
type RecoverSchemaInfo ¶
type RecoverSchemaInfo struct { *model.DBInfo RecoverTabsInfo []*RecoverInfo DropJobID int64 SnapshotTS uint64 OldSchemaName model.CIStr }
RecoverSchemaInfo contains information needed by DDL.RecoverSchema.
type ReorgCallback ¶
type ReorgCallback interface { // OnUpdateReorgInfo is called after updating reorg info for partitions. OnUpdateReorgInfo(job *model.Job, pid int64) }
ReorgCallback is the callback for DDL reorganization.
type TableScanOperator ¶
type TableScanOperator struct { *operator.AsyncOperator[TableScanTask, IndexRecordChunk] }
TableScanOperator scans table records in given key ranges from kv store.
func NewTableScanOperator ¶
func NewTableScanOperator( ctx *OperatorCtx, sessPool opSessPool, copCtx copr.CopContext, srcChkPool chan *chunk.Chunk, concurrency int, ) *TableScanOperator
NewTableScanOperator creates a new TableScanOperator.
type TableScanTask ¶
TableScanTask contains the start key and the end key of a region.
func (*TableScanTask) String ¶
func (t *TableScanTask) String() string
String implement fmt.Stringer interface.
type TableScanTaskSource ¶
type TableScanTaskSource struct {
// contains filtered or unexported fields
}
TableScanTaskSource produces TableScanTask by splitting table records into ranges.
func NewTableScanTaskSource ¶
func NewTableScanTaskSource( ctx context.Context, store kv.Storage, physicalTable table.PhysicalTable, startKey kv.Key, endKey kv.Key, ) *TableScanTaskSource
NewTableScanTaskSource creates a new TableScanTaskSource.
func (*TableScanTaskSource) Close ¶
func (src *TableScanTaskSource) Close() error
Close implements Operator interface.
func (*TableScanTaskSource) Open ¶
func (src *TableScanTaskSource) Open() error
Open implements Operator interface.
func (*TableScanTaskSource) SetSink ¶
func (src *TableScanTaskSource) SetSink(sink operator.DataChannel[TableScanTask])
SetSink implements WithSink interface.
func (*TableScanTaskSource) String ¶
func (*TableScanTaskSource) String() string
String implements fmt.Stringer interface.
type TiFlashManagementContext ¶
type TiFlashManagementContext struct { TiFlashStores map[int64]pd.StoreInfo PollCounter uint64 Backoff *PollTiFlashBackoffContext // tables waiting for updating progress after become available. UpdatingProgressTables *list.List }
TiFlashManagementContext is the context for TiFlash Replica Management
func NewTiFlashManagementContext ¶
func NewTiFlashManagementContext() (*TiFlashManagementContext, error)
NewTiFlashManagementContext creates an instance for TiFlashManagementContext.
type TiFlashReplicaStatus ¶
type TiFlashReplicaStatus struct { ID int64 Count uint64 LocationLabels []string Available bool LogicalTableAvailable bool HighPriority bool IsPartition bool }
TiFlashReplicaStatus records status for each TiFlash replica.
type WriteExternalStoreOperator ¶
type WriteExternalStoreOperator struct { *operator.AsyncOperator[IndexRecordChunk, IndexWriteResult] }
WriteExternalStoreOperator writes index records to external storage.
func NewWriteExternalStoreOperator ¶
func NewWriteExternalStoreOperator( ctx *OperatorCtx, copCtx copr.CopContext, sessPool opSessPool, jobID int64, subtaskID int64, tbl table.PhysicalTable, indexes []table.Index, store storage.ExternalStorage, srcChunkPool chan *chunk.Chunk, concurrency int, onClose external.OnCloseFunc, memoryQuota uint64, reorgMeta *model.DDLReorgMeta, ) *WriteExternalStoreOperator
NewWriteExternalStoreOperator creates a new WriteExternalStoreOperator.
Source Files ¶
- backfilling.go
- backfilling_clean_s3.go
- backfilling_dist_executor.go
- backfilling_dist_scheduler.go
- backfilling_import_cloud.go
- backfilling_merge_sort.go
- backfilling_operators.go
- backfilling_read_index.go
- backfilling_scheduler.go
- bdr.go
- callback.go
- cluster.go
- column.go
- constant.go
- constraint.go
- ddl.go
- ddl_algorithm.go
- ddl_api.go
- ddl_history.go
- ddl_running_jobs.go
- ddl_tiflash_api.go
- ddl_worker.go
- ddl_workerpool.go
- delete_range.go
- delete_range_util.go
- dist_owner.go
- foreign_key.go
- generated_column.go
- index.go
- index_cop.go
- index_merge_tmp.go
- job_table.go
- mock.go
- multi_schema_change.go
- options.go
- partition.go
- placement_policy.go
- reorg.go
- resource_group.go
- rollingback.go
- sanity_check.go
- schema.go
- sequence.go
- split_region.go
- stat.go
- table.go
- table_lock.go
- ttl.go