ddl

package
v1.1.0-beta.0...-abef9b1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 15, 2024 License: Apache-2.0 Imports: 153 Imported by: 0

Documentation

Overview

Package ddl is the core of TiDB DDL layer. It is used to manage the schema of TiDB Cluster.

TiDB executes using the Online DDL algorithm, see docs/design/2018-10-08-online-DDL.md for more details.

DDL maintains the following invariant:

At any time, for each schema object, such as a table, there are at most 2 versions
can exist for it, current version N loaded by all TiDBs and version N+1 pushed
forward by DDL, before we can finish the DDL or continue to next operation, we
need to make sure all TiDBs have synchronized to version N+1.
Note that we are using a global version number for all schema objects, so the
versions related some table might not be continuous, as DDLs are executed in parallel.

Index

Constants

View Source
const (
	// JobTable stores the information of DDL jobs.
	JobTable = "tidb_ddl_job"
	// ReorgTable stores the information of DDL reorganization.
	ReorgTable = "tidb_ddl_reorg"
	// HistoryTable stores the history DDL jobs.
	HistoryTable = "tidb_ddl_history"

	// JobTableID is the table ID of `tidb_ddl_job`.
	JobTableID = meta.MaxInt48 - 1
	// ReorgTableID is the table ID of `tidb_ddl_reorg`.
	ReorgTableID = meta.MaxInt48 - 2
	// HistoryTableID is the table ID of `tidb_ddl_history`.
	HistoryTableID = meta.MaxInt48 - 3
	// MDLTableID is the table ID of `tidb_mdl_info`.
	MDLTableID = meta.MaxInt48 - 4
	// BackgroundSubtaskTableID is the table ID of `tidb_background_subtask`.
	BackgroundSubtaskTableID = meta.MaxInt48 - 5
	// BackgroundSubtaskHistoryTableID is the table ID of `tidb_background_subtask_history`.
	BackgroundSubtaskHistoryTableID = meta.MaxInt48 - 6
	// NotifierTableID is the table ID of `tidb_ddl_notifier`.
	NotifierTableID = meta.MaxInt48 - 7

	// JobTableSQL is the CREATE TABLE SQL of `tidb_ddl_job`.
	JobTableSQL = "create table " + JobTable + `(
		job_id bigint not null,
		reorg int,
		schema_ids text(65535),
		table_ids text(65535),
		job_meta longblob,
		type int,
		processing int,
		primary key(job_id))`
	// ReorgTableSQL is the CREATE TABLE SQL of `tidb_ddl_reorg`.
	ReorgTableSQL = "create table " + ReorgTable + `(
		job_id bigint not null,
		ele_id bigint,
		ele_type blob,
		start_key blob,
		end_key blob,
		physical_id bigint,
		reorg_meta longblob,
		unique key(job_id, ele_id, ele_type(20)))`
	// HistoryTableSQL is the CREATE TABLE SQL of `tidb_ddl_history`.
	HistoryTableSQL = "create table " + HistoryTable + `(
		job_id bigint not null,
		job_meta longblob,
		db_name char(64),
		table_name char(64),
		schema_ids text(65535),
		table_ids text(65535),
		create_time datetime,
		primary key(job_id))`
	// BackgroundSubtaskTableSQL is the CREATE TABLE SQL of `tidb_background_subtask`.
	BackgroundSubtaskTableSQL = `` /* 602-byte string literal not displayed */

	// BackgroundSubtaskHistoryTableSQL is the CREATE TABLE SQL of `tidb_background_subtask_history`.
	BackgroundSubtaskHistoryTableSQL = `` /* 569-byte string literal not displayed */

	// NotifierTableName is `tidb_ddl_notifier`.
	NotifierTableName = "tidb_ddl_notifier"

	// NotifierTableSQL is the CREATE TABLE SQL of `tidb_ddl_notifier`.
	NotifierTableSQL = `CREATE TABLE ` + NotifierTableName + ` (
		ddl_job_id BIGINT,
		sub_job_id BIGINT COMMENT '-1 if the schema change does not belong to a multi-schema change DDL or a merged DDL. 0 or positive numbers representing the sub-job index of a multi-schema change DDL or a merged DDL',
		schema_change LONGBLOB COMMENT 'SchemaChangeEvent at rest',
		processed_by_flag BIGINT UNSIGNED DEFAULT 0 COMMENT 'flag to mark which subscriber has processed the event',
		PRIMARY KEY(ddl_job_id, sub_job_id))`
)
View Source
const (
	// DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing.
	DDLOwnerKey = "/tidb/ddl/fg/owner"

	// Prompt is the prompt for ddl owner manager.
	Prompt = "ddl"
)
View Source
const (
	DefNumHistoryJobs = 10

	// DefNumGetDDLHistoryJobs is the max count for getting the ddl history once.
	DefNumGetDDLHistoryJobs = 2048
)

DefNumHistoryJobs is default value of the default number of history job

View Source
const (
	BRInsertDeleteRangeSQLPrefix = insertDeleteRangeSQLPrefix
	BRInsertDeleteRangeSQLValue  = insertDeleteRangeSQLValue
)

Only used in the BR unit test. Once these const variables modified, please make sure compatible with BR.

View Source
const (
	// MaxCommentLength is exported for testing.
	MaxCommentLength = 1024
)
View Source
const UpdateDDLJobReorgCfgInterval = 2 * time.Second

UpdateDDLJobReorgCfgInterval is the interval to check and update reorg configuration.

Variables

View Source
var (
	// TestCheckWorkerNumCh use for test adjust backfill worker.
	TestCheckWorkerNumCh = make(chan *sync.WaitGroup)
	// TestCheckWorkerNumber use for test adjust backfill worker.
	TestCheckWorkerNumber = int32(variable.DefTiDBDDLReorgWorkerCount)
	// TestCheckReorgTimeout is used to mock timeout when reorg data.
	TestCheckReorgTimeout = int32(0)
)
View Source
var (
	// PollTiFlashInterval is the interval between every pollTiFlashReplicaStatus call.
	PollTiFlashInterval = 2 * time.Second
	// PullTiFlashPdTick indicates the number of intervals before we fully sync all TiFlash pd rules and tables.
	PullTiFlashPdTick = atomicutil.NewUint64(30 * 5)
	// UpdateTiFlashStoreTick indicates the number of intervals before we fully update TiFlash stores.
	UpdateTiFlashStoreTick = atomicutil.NewUint64(5)
	// PollTiFlashBackoffMaxTick is the max tick before we try to update TiFlash replica availability for one table.
	PollTiFlashBackoffMaxTick TiFlashTick = 10
	// PollTiFlashBackoffMinTick is the min tick before we try to update TiFlash replica availability for one table.
	PollTiFlashBackoffMinTick TiFlashTick = 1
	// PollTiFlashBackoffCapacity is the cache size of backoff struct.
	PollTiFlashBackoffCapacity = 1000
	// PollTiFlashBackoffRate is growth rate of exponential backoff threshold.
	PollTiFlashBackoffRate TiFlashTick = 1.5
	// RefreshProgressMaxTableCount is the max count of table to refresh progress after available each poll.
	RefreshProgressMaxTableCount uint64 = 1000
)
View Source
var (
	CheckBackfillJobFinishInterval = 300 * time.Millisecond
	// UpdateBackfillJobRowCountInterval is the interval of updating the job row count.
	UpdateBackfillJobRowCountInterval = 3 * time.Second
)

CheckBackfillJobFinishInterval is export for test.

View Source
var (

	// WaitTimeWhenErrorOccurred is waiting interval when processing DDL jobs encounter errors.
	WaitTimeWhenErrorOccurred = int64(1 * time.Second)

	// TestNotifyBeginTxnCh is used for if the txn is beginning in runInTxn.
	TestNotifyBeginTxnCh = make(chan struct{})
)
View Source
var DDLBackfillers = map[model.ActionType]string{
	model.ActionAddIndex:            "add_index",
	model.ActionModifyColumn:        "modify_column",
	model.ActionDropIndex:           "drop_index",
	model.ActionReorganizePartition: "reorganize_partition",
}

DDLBackfillers contains the DDL need backfill step.

View Source
var (
	// EnableSplitTableRegion is a flag to decide whether to split a new region for
	// a newly created table. It takes effect only if the Storage supports split
	// region.
	EnableSplitTableRegion = uint32(0)
)
View Source
var EstimateTableRowSizeForTest = estimateTableRowSize

EstimateTableRowSizeForTest is used for test.

View Source
var LastReorgMetaFastReorgDisabled bool

LastReorgMetaFastReorgDisabled is used for test.

View Source
var MockDMLExecution func()

MockDMLExecution is only used for test.

View Source
var MockDMLExecutionMerging func()

MockDMLExecutionMerging is only used for test.

View Source
var MockDMLExecutionStateBeforeImport func()

MockDMLExecutionStateBeforeImport is only used for test.

View Source
var MockDMLExecutionStateBeforeMerge func()

MockDMLExecutionStateBeforeMerge is only used for test.

View Source
var MockDMLExecutionStateMerging func()

MockDMLExecutionStateMerging is only used for test.

View Source
var ResultCounterForTest *atomic.Int32

ResultCounterForTest is used for test.

View Source
var (
	// RunInGoTest is used to identify whether ddl in running in the test.
	RunInGoTest bool
)
View Source
var TestReorgGoroutineRunning = make(chan struct{})

TestReorgGoroutineRunning is only used in test to indicate the reorg goroutine has been started.

Functions

func AddDelRangeJobInternal

func AddDelRangeJobInternal(ctx context.Context, wrapper DelRangeExecWrapper, job *model.Job) error

AddDelRangeJobInternal implements the generation the delete ranges for the provided job and consumes the delete ranges through delRangeExecWrapper.

func AddHistoryDDLJob

func AddHistoryDDLJob(ctx context.Context, sess *sess.Session, t *meta.Mutator, job *model.Job, updateRawArgs bool) error

AddHistoryDDLJob record the history job.

func AddIndexColumnFlag

func AddIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo)

AddIndexColumnFlag aligns the column flags of columns in TableInfo to IndexInfo.

func AllocateColumnID

func AllocateColumnID(tblInfo *model.TableInfo) int64

AllocateColumnID allocates next column ID from TableInfo.

func AllocateIndexID

func AllocateIndexID(tblInfo *model.TableInfo) int64

AllocateIndexID allocates an index ID from TableInfo.

func AppendPartitionDefs

func AppendPartitionDefs(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)

AppendPartitionDefs generates a list of partition definitions needed for SHOW CREATE TABLE (in executor/show.go) as well as needed for generating the ADD PARTITION query for INTERVAL partitioning of ALTER TABLE t LAST PARTITION and generating the CREATE TABLE query from CREATE TABLE ... INTERVAL

func AppendPartitionInfo

func AppendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer, sqlMode mysql.SQLMode)

AppendPartitionInfo is used in SHOW CREATE TABLE as well as generation the SQL syntax for the PartitionInfo during validation of various DDL commands

func BackupFillerTypeCount

func BackupFillerTypeCount() int

BackupFillerTypeCount represents the count of ddl jobs that need to do backfill.

func BuildAddedPartitionInfo

func BuildAddedPartitionInfo(ctx expression.BuildContext, meta *model.TableInfo, spec *ast.AlterTableSpec) (*model.PartitionInfo, error)

BuildAddedPartitionInfo build alter table add partition info

func BuildElements

func BuildElements(changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) []*meta.Element

BuildElements is exported for testing.

func BuildHandle

func BuildHandle(pkDts []types.Datum, tblInfo *model.TableInfo,
	pkInfo *model.IndexInfo, loc *time.Location, errCtx errctx.Context) (kv.Handle, error)

BuildHandle is exported for test.

func BuildHiddenColumnInfo

func BuildHiddenColumnInfo(ctx *metabuild.Context, indexPartSpecifications []*ast.IndexPartSpecification, indexName pmodel.CIStr, tblInfo *model.TableInfo, existCols []*table.Column) ([]*model.ColumnInfo, error)

BuildHiddenColumnInfo builds hidden column info.

func BuildIndexInfo

func BuildIndexInfo(
	ctx *metabuild.Context,
	tblInfo *model.TableInfo,
	indexName pmodel.CIStr,
	isPrimary, isUnique, isVector bool,
	indexPartSpecifications []*ast.IndexPartSpecification,
	indexOption *ast.IndexOption,
	state model.SchemaState,
) (*model.IndexInfo, error)

BuildIndexInfo builds a new IndexInfo according to the index information.

func BuildSessionTemporaryTableInfo

func BuildSessionTemporaryTableInfo(ctx *metabuild.Context, store kv.Storage, is infoschema.InfoSchema, s *ast.CreateTableStmt,
	dbCharset, dbCollate string, placementPolicyRef *model.PolicyRefInfo) (*model.TableInfo, error)

BuildSessionTemporaryTableInfo builds model.TableInfo from a SQL statement.

func BuildTableInfo

func BuildTableInfo(
	ctx *metabuild.Context,
	tableName pmodel.CIStr,
	cols []*table.Column,
	constraints []*ast.Constraint,
	charset string,
	collate string,
) (tbInfo *model.TableInfo, err error)

BuildTableInfo creates a TableInfo.

func BuildTableInfoFromAST

func BuildTableInfoFromAST(ctx *metabuild.Context, s *ast.CreateTableStmt) (*model.TableInfo, error)

BuildTableInfoFromAST builds model.TableInfo from a SQL statement. Note: TableID and PartitionID are left as uninitialized value.

func BuildTableInfoWithLike

func BuildTableInfoWithLike(ident ast.Ident, referTblInfo *model.TableInfo, s *ast.CreateTableStmt) (*model.TableInfo, error)

BuildTableInfoWithLike builds a new table info according to CREATE TABLE ... LIKE statement.

func BuildTableInfoWithStmt

func BuildTableInfoWithStmt(ctx *metabuild.Context, s *ast.CreateTableStmt, dbCharset, dbCollate string, placementPolicyRef *model.PolicyRefInfo) (*model.TableInfo, error)

BuildTableInfoWithStmt builds model.TableInfo from a SQL statement without validity check

func BuildViewInfo

func BuildViewInfo(s *ast.CreateViewStmt) (*model.ViewInfo, error)

BuildViewInfo builds a ViewInfo structure from an ast.CreateViewStmt.

func CalculateRegionBatch

func CalculateRegionBatch(totalRegionCnt int, instanceCnt int, useLocalDisk bool) int

CalculateRegionBatch is exported for test.

func CancelJobs

func CancelJobs(ctx context.Context, se sessionctx.Context, ids []int64) (errs []error, err error)

CancelJobs cancels the DDL jobs according to user command.

func CancelJobsBySystem

func CancelJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)

CancelJobsBySystem cancels Jobs because of internal reasons.

func CheckAfterPositionExists

func CheckAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error

CheckAfterPositionExists makes sure the column specified in AFTER clause is exists. For example, ALTER TABLE t ADD COLUMN c3 INT AFTER c1.

func CheckDropTablePartition

func CheckDropTablePartition(meta *model.TableInfo, partLowerNames []string) error

CheckDropTablePartition checks if the partition exists and does not allow deleting the last existing partition in the table.

func CheckIsDropPrimaryKey

func CheckIsDropPrimaryKey(indexName pmodel.CIStr, indexInfo *model.IndexInfo, t table.Table) (bool, error)

CheckIsDropPrimaryKey checks if we will drop PK, there are many PK implementations so we provide a helper function.

func CheckPKOnGeneratedColumn

func CheckPKOnGeneratedColumn(tblInfo *model.TableInfo, indexPartSpecifications []*ast.IndexPartSpecification) (*model.ColumnInfo, error)

CheckPKOnGeneratedColumn checks the specification of PK is valid.

func CheckPlacementPolicyNotInUseFromInfoSchema

func CheckPlacementPolicyNotInUseFromInfoSchema(is infoschema.InfoSchema, policy *model.PolicyInfo) error

CheckPlacementPolicyNotInUseFromInfoSchema export for test.

func CheckPlacementPolicyNotInUseFromMeta

func CheckPlacementPolicyNotInUseFromMeta(t *meta.Mutator, policy *model.PolicyInfo) error

CheckPlacementPolicyNotInUseFromMeta export for test.

func CheckTableInfoValidWithStmt

func CheckTableInfoValidWithStmt(ctx *metabuild.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) (err error)

CheckTableInfoValidWithStmt exposes checkTableInfoValidWithStmt to SchemaTracker. Maybe one day we can delete it.

func CloseOwnerManager

func CloseOwnerManager()

CloseOwnerManager closes the global DDL owner manager.

func ConvertBetweenCharAndVarchar

func ConvertBetweenCharAndVarchar(oldCol, newCol byte) bool

ConvertBetweenCharAndVarchar check whether column converted between char and varchar TODO: it is used for plugins. so change plugin's using and remove it.

func CreateNewColumn

func CreateNewColumn(ctx sessionctx.Context, schema *model.DBInfo, spec *ast.AlterTableSpec, t table.Table, specNewColumn *ast.ColumnDef) (*table.Column, error)

CreateNewColumn creates a new column according to the column information.

func DeniedByBDR

func DeniedByBDR(role ast.BDRRole, action model.ActionType, args model.JobArgs) (denied bool)

DeniedByBDR checks whether the DDL is denied by BDR.

func DisableTiFlashPoll

func DisableTiFlashPoll(d any)

DisableTiFlashPoll disables TiFlash poll loop aka PollTiFlashReplicaStatus.

func DropIndexColumnFlag

func DropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo)

DropIndexColumnFlag drops the column flag of columns in TableInfo according to the IndexInfo.

func EnableTiFlashPoll

func EnableTiFlashPoll(d any)

EnableTiFlashPoll enables TiFlash poll loop aka PollTiFlashReplicaStatus.

func ExtractDatumByOffsets

func ExtractDatumByOffsets(ctx expression.EvalContext, row chunk.Row, offsets []int, expCols []*expression.Column, buf []types.Datum) []types.Datum

ExtractDatumByOffsets is exported for test.

func ExtractTblInfos

func ExtractTblInfos(is infoschema.InfoSchema, oldIdent, newIdent ast.Ident, isAlterTable bool, tables map[string]int64) ([]*model.DBInfo, int64, error)

ExtractTblInfos extracts the table information from the infoschema.

func FindColumnNamesInExpr

func FindColumnNamesInExpr(expr ast.ExprNode) []*ast.ColumnName

FindColumnNamesInExpr returns a slice of ast.ColumnName which is referred in expr.

func FindRelatedIndexesToChange

func FindRelatedIndexesToChange(tblInfo *model.TableInfo, colName pmodel.CIStr) []changingIndex

FindRelatedIndexesToChange finds the indexes that covering the given column. The normal one will be overwritten by the temp one.

func GeneratePartDefsFromInterval

func GeneratePartDefsFromInterval(ctx expression.BuildContext, tp ast.AlterTableType, tbInfo *model.TableInfo, partitionOptions *ast.PartitionOptions) error

GeneratePartDefsFromInterval generates range partitions from INTERVAL partitioning. Handles

  • CREATE TABLE: all partitions are generated
  • ALTER TABLE FIRST PARTITION (expr): Drops all partitions before the partition matching the expr (i.e. sets that partition as the new first partition) i.e. will return the partitions from old FIRST partition to (and including) new FIRST partition
  • ALTER TABLE LAST PARTITION (expr): Creates new partitions from (excluding) old LAST partition to (including) new LAST partition

partition definitions will be set on partitionOptions

func GetAllDDLJobs

func GetAllDDLJobs(ctx context.Context, se sessionctx.Context) ([]*model.Job, error)

GetAllDDLJobs get all DDL jobs and sorts jobs by job.ID.

func GetAllHistoryDDLJobs

func GetAllHistoryDDLJobs(m meta.Reader) ([]*model.Job, error)

GetAllHistoryDDLJobs get all the done DDL jobs.

func GetCharsetAndCollateInTableOption

func GetCharsetAndCollateInTableOption(startIdx int, options []*ast.TableOption, defaultUTF8MB4Coll string) (chs, coll string, err error)

GetCharsetAndCollateInTableOption will iterate the charset and collate in the options, and returns the last charset and collate in options. If there is no charset in the options, the returns charset will be "", the same as collate.

func GetColumnForeignKeyInfo

func GetColumnForeignKeyInfo(colName string, fkInfos []*model.FKInfo) *model.FKInfo

GetColumnForeignKeyInfo returns the wanted foreign key info

func GetDefaultCollation

func GetDefaultCollation(cs string, defaultUTF8MB4Collation string) (string, error)

GetDefaultCollation returns the default collation for charset and handle the default collation for UTF8MB4.

func GetDropOrTruncateTableInfoFromJobsByStore

func GetDropOrTruncateTableInfoFromJobsByStore(jobs []*model.Job, gcSafePoint uint64, getTable func(uint64, int64, int64) (*model.TableInfo, error), fn func(*model.Job, *model.TableInfo) (bool, error)) (bool, error)

GetDropOrTruncateTableInfoFromJobsByStore implements GetDropOrTruncateTableInfoFromJobs

func GetHistoryJobByID

func GetHistoryJobByID(sess sessionctx.Context, id int64) (*model.Job, error)

GetHistoryJobByID return history DDL job by ID.

func GetLastHistoryDDLJobsIterator

func GetLastHistoryDDLJobsIterator(m meta.Reader) (meta.LastJobIterator, error)

GetLastHistoryDDLJobsIterator gets latest N history DDL jobs iterator.

func GetLastNHistoryDDLJobs

func GetLastNHistoryDDLJobs(t meta.Reader, maxNumJobs int) ([]*model.Job, error)

GetLastNHistoryDDLJobs returns the DDL history jobs and an error. The maximum count of history jobs is num.

func GetName4AnonymousIndex

func GetName4AnonymousIndex(t table.Table, colName pmodel.CIStr, idxName pmodel.CIStr) pmodel.CIStr

GetName4AnonymousIndex returns a valid name for anonymous index.

func GetOriginDefaultValueForModifyColumn

func GetOriginDefaultValueForModifyColumn(ctx exprctx.BuildContext, changingCol, oldCol *model.ColumnInfo) (any, error)

GetOriginDefaultValueForModifyColumn gets the original default value for modifying column. Since column type change is implemented as adding a new column then substituting the old one. Case exists when update-where statement fetch a NULL for not-null column without any default data, it will errors. So we set original default value here to prevent this error. If the oldCol has the original default value, we use it. Otherwise we set the zero value as original default value. Besides, in insert & update records, we have already implement using the casted value of relative column to insert rather than the original default value.

func GetRangeEndKey

func GetRangeEndKey(ctx *ReorgContext, store kv.Storage, priority int, keyPrefix kv.Key, startKey, endKey kv.Key) (kv.Key, error)

GetRangeEndKey gets the actual end key for the range of [startKey, endKey).

func GetRangePlacementPolicyName

func GetRangePlacementPolicyName(ctx context.Context, rangeBundleID string) (string, error)

GetRangePlacementPolicyName get the placement policy name used by range. rangeBundleID is limited to TiDBBundleRangePrefixForGlobal and TiDBBundleRangePrefixForMeta.

func GetTableInfoAndCancelFaultJob

func GetTableInfoAndCancelFaultJob(t *meta.Mutator, job *model.Job, schemaID int64) (*model.TableInfo, error)

GetTableInfoAndCancelFaultJob is exported for test.

func GetTableMaxHandle

func GetTableMaxHandle(ctx *ReorgContext, store kv.Storage, startTS uint64, tbl table.PhysicalTable) (maxHandle kv.Handle, emptyTable bool, err error)

GetTableMaxHandle gets the max handle of a PhysicalTable.

func GetWaitTimeWhenErrorOccurred

func GetWaitTimeWhenErrorOccurred() time.Duration

GetWaitTimeWhenErrorOccurred return waiting interval when processing DDL jobs encounter errors.

func HandleLockTablesOnFinish

func HandleLockTablesOnFinish(ctx sessionctx.Context, jobW *JobWrapper, ddlErr error)

HandleLockTablesOnFinish handles the table lock for the job which is finished. exported for testing purpose.

func HandleLockTablesOnSuccessSubmit

func HandleLockTablesOnSuccessSubmit(ctx sessionctx.Context, jobW *JobWrapper)

HandleLockTablesOnSuccessSubmit handles the table lock for the job which is submitted successfully. exported for testing purpose.

func InitAndAddColumnToTable

func InitAndAddColumnToTable(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) *model.ColumnInfo

InitAndAddColumnToTable initializes the ColumnInfo in-place and adds it to the table.

func IsAutoRandomColumnID

func IsAutoRandomColumnID(tblInfo *model.TableInfo, colID int64) bool

IsAutoRandomColumnID returns true if the given column ID belongs to an auto_random column.

func IsColumnDroppableWithCheckConstraint

func IsColumnDroppableWithCheckConstraint(col pmodel.CIStr, tblInfo *model.TableInfo) error

IsColumnDroppableWithCheckConstraint check whether the column in check-constraint whose dependent col is more than 1

func IsColumnRenameableWithCheckConstraint

func IsColumnRenameableWithCheckConstraint(col pmodel.CIStr, tblInfo *model.TableInfo) error

IsColumnRenameableWithCheckConstraint check whether the column is referenced in check-constraint

func IsElemsChangedToModifyColumn

func IsElemsChangedToModifyColumn(oldElems, newElems []string) bool

IsElemsChangedToModifyColumn check elems changed

func IterAllDDLJobs

func IterAllDDLJobs(ctx sessionctx.Context, txn kv.Transaction, finishFn func([]*model.Job) (bool, error)) error

IterAllDDLJobs will iterates running DDL jobs first, return directly if `finishFn` return true or error, then iterates history DDL jobs until the `finishFn` return true or error.

func IterHistoryDDLJobs

func IterHistoryDDLJobs(txn kv.Transaction, finishFn func([]*model.Job) (bool, error)) error

IterHistoryDDLJobs iterates history DDL jobs until the `finishFn` return true or error.

func JobNeedGC

func JobNeedGC(job *model.Job) bool

JobNeedGC is called to determine whether delete-ranges need to be generated for the provided job.

NOTICE: BR also uses jobNeedGC to determine whether delete-ranges need to be generated for the provided job. Therefore, please make sure any modification is compatible with BR.

func LoadTiFlashReplicaInfo

func LoadTiFlashReplicaInfo(tblInfo *model.TableInfo, tableList *[]TiFlashReplicaStatus)

LoadTiFlashReplicaInfo parses model.TableInfo into []TiFlashReplicaStatus.

func LocateOffsetToMove

func LocateOffsetToMove(currentOffset int, pos *ast.ColumnPosition, tblInfo *model.TableInfo) (destOffset int, err error)

LocateOffsetToMove returns the offset of the column to move.

func MockTableInfo

func MockTableInfo(sctx sessionctx.Context, stmt *ast.CreateTableStmt, tableID int64) (*model.TableInfo, error)

MockTableInfo mocks a table info by create table stmt ast and a specified table id.

func NeedToOverwriteColCharset

func NeedToOverwriteColCharset(options []*ast.TableOption) bool

NeedToOverwriteColCharset return true for altering charset and specified CONVERT TO.

func NewAddIndexIngestPipeline

func NewAddIndexIngestPipeline(
	ctx *OperatorCtx,
	store kv.Storage,
	sessPool opSessPool,
	backendCtx ingest.BackendCtx,
	engines []ingest.Engine,
	jobID int64,
	tbl table.PhysicalTable,
	idxInfos []*model.IndexInfo,
	startKey, endKey kv.Key,
	reorgMeta *model.DDLReorgMeta,
	avgRowSize int,
	concurrency int,
	cpMgr *ingest.CheckpointManager,
	rowCntListener RowCountListener,
) (*operator.AsyncPipeline, error)

NewAddIndexIngestPipeline creates a pipeline for adding index in ingest mode.

func NewBackfillingSchedulerExt

func NewBackfillingSchedulerExt(d DDL) (scheduler.Extension, error)

NewBackfillingSchedulerExt creates a new backfillingSchedulerExt, only used for test now.

func NewDDL

func NewDDL(ctx context.Context, options ...Option) (DDL, Executor)

NewDDL creates a new DDL. TODO remove it, to simplify this PR we use this way.

func NewDDLReorgMeta

func NewDDLReorgMeta(ctx sessionctx.Context) *model.DDLReorgMeta

NewDDLReorgMeta create a DDL ReorgMeta.

func NewMetaBuildContextWithSctx

func NewMetaBuildContextWithSctx(sctx sessionctx.Context, otherOpts ...metabuild.Option) *metabuild.Context

NewMetaBuildContextWithSctx creates a new MetaBuildContext with the given session context.

func NewReorgCopContext

func NewReorgCopContext(
	store kv.Storage,
	reorgMeta *model.DDLReorgMeta,
	tblInfo *model.TableInfo,
	allIdxInfo []*model.IndexInfo,
	requestSource string,
) (copr.CopContext, error)

NewReorgCopContext creates a CopContext for reorg

func NewReorgHandlerForTest

func NewReorgHandlerForTest(se sessionctx.Context) *reorgHandler

NewReorgHandlerForTest creates a new reorgHandler, only used in test.

func NewWriteIndexToExternalStoragePipeline

func NewWriteIndexToExternalStoragePipeline(
	ctx *OperatorCtx,
	store kv.Storage,
	extStoreURI string,
	sessPool opSessPool,
	jobID, subtaskID int64,
	tbl table.PhysicalTable,
	idxInfos []*model.IndexInfo,
	startKey, endKey kv.Key,
	onClose external.OnCloseFunc,
	reorgMeta *model.DDLReorgMeta,
	avgRowSize int,
	concurrency int,
	resource *proto.StepResource,
	rowCntListener RowCountListener,
) (*operator.AsyncPipeline, error)

NewWriteIndexToExternalStoragePipeline creates a pipeline for writing index to external storage.

func OverwriteCollationWithBinaryFlag

func OverwriteCollationWithBinaryFlag(colDef *ast.ColumnDef, chs, coll string, defaultUTF8MB4Coll string) (newChs string, newColl string)

OverwriteCollationWithBinaryFlag is used to handle the case like

CREATE TABLE t (a VARCHAR(255) BINARY) CHARSET utf8 COLLATE utf8_general_ci;

The 'BINARY' sets the column collation to *_bin according to the table charset.

func PauseAllJobsBySystem

func PauseAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)

PauseAllJobsBySystem pauses all running Jobs because of internal reasons.

func PauseJobs

func PauseJobs(ctx context.Context, se sessionctx.Context, ids []int64) ([]error, error)

PauseJobs pause all the DDL jobs according to user command.

func PauseJobsBySystem

func PauseJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)

PauseJobsBySystem pauses Jobs because of internal reasons.

func PollAvailableTableProgress

func PollAvailableTableProgress(schemas infoschema.InfoSchema, _ sessionctx.Context, pollTiFlashContext *TiFlashManagementContext)

PollAvailableTableProgress will poll and check availability of available tables.

func ProcessColumnCharsetAndCollation

func ProcessColumnCharsetAndCollation(ctx *metabuild.Context, col *table.Column, newCol *table.Column, meta *model.TableInfo, specNewColumn *ast.ColumnDef, schema *model.DBInfo) error

ProcessColumnCharsetAndCollation process column charset and collation

func ProcessModifyColumnOptions

func ProcessModifyColumnOptions(ctx sessionctx.Context, col *table.Column, options []*ast.ColumnOption) error

ProcessModifyColumnOptions process column options.

func RemoveDependentHiddenColumns

func RemoveDependentHiddenColumns(tblInfo *model.TableInfo, idxInfo *model.IndexInfo)

RemoveDependentHiddenColumns removes hidden columns by the indexInfo.

func ResolveAlterAlgorithm

func ResolveAlterAlgorithm(alterSpec *ast.AlterTableSpec, specify ast.AlgorithmType) (ast.AlgorithmType, error)

ResolveAlterAlgorithm resolves the algorithm of the alterSpec. If specify is the ast.AlterAlgorithmDefault, then the default algorithm of the alter action will be returned. If specify algorithm is not supported by the alter action, it will try to find a better algorithm in the order `INSTANT > INPLACE > COPY`, errAlterOperationNotSupported will be returned. E.g. INSTANT may be returned if specify=INPLACE If failed to choose any valid algorithm, AlgorithmTypeDefault and errAlterOperationNotSupported will be returned

func ResolveAlterTableSpec

func ResolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) ([]*ast.AlterTableSpec, error)

ResolveAlterTableSpec resolves alter table algorithm and removes ignore table spec in specs. returns valid specs, and the occurred error.

func ResolveCharsetCollation

func ResolveCharsetCollation(charsetOpts []ast.CharsetOpt, utf8MB4DefaultColl string) (chs string, coll string, err error)

ResolveCharsetCollation will resolve the charset and collate by the order of parameters: * If any given ast.CharsetOpt is not empty, the resolved charset and collate will be returned. * If all ast.CharsetOpts are empty, the default charset and collate will be returned.

func ResumeAllJobsBySystem

func ResumeAllJobsBySystem(se sessionctx.Context) (map[int64]error, error)

ResumeAllJobsBySystem resumes all paused Jobs because of internal reasons.

func ResumeJobs

func ResumeJobs(ctx context.Context, se sessionctx.Context, ids []int64) ([]error, error)

ResumeJobs resume all the DDL jobs according to user command.

func ResumeJobsBySystem

func ResumeJobsBySystem(se sessionctx.Context, ids []int64) (errs []error, err error)

ResumeJobsBySystem resumes Jobs that are paused by TiDB itself.

func ScanHistoryDDLJobs

func ScanHistoryDDLJobs(m *meta.Mutator, startJobID int64, limit int) ([]*model.Job, error)

ScanHistoryDDLJobs get some of the done DDL jobs. When the DDL history is quite large, GetAllHistoryDDLJobs() API can't work well, because it makes the server OOM. The result is in descending order by job ID.

func SendFlashbackToVersionRPC

func SendFlashbackToVersionRPC(
	ctx context.Context,
	s tikv.Storage,
	version uint64,
	startTS, commitTS uint64,
	r tikvstore.KeyRange,
) (rangetask.TaskStat, error)

SendFlashbackToVersionRPC flashback the MVCC key to the version Function also be called by BR for volume snapshot backup and restore

func SendPrepareFlashbackToVersionRPC

func SendPrepareFlashbackToVersionRPC(
	ctx context.Context,
	s tikv.Storage,
	flashbackTS, startTS uint64,
	r tikvstore.KeyRange,
) (rangetask.TaskStat, error)

SendPrepareFlashbackToVersionRPC prepares regions for flashback, the purpose is to put region into flashback state which region stop write Function also be called by BR for volume snapshot backup and restore

func SetBatchInsertDeleteRangeSize

func SetBatchInsertDeleteRangeSize(i int)

SetBatchInsertDeleteRangeSize sets the batch insert/delete range size in the test

func SetDefaultValue

func SetDefaultValue(ctx expression.BuildContext, col *table.Column, option *ast.ColumnOption) (hasDefaultValue bool, err error)

SetDefaultValue sets the default value of the column.

func SetDirectPlacementOpt

func SetDirectPlacementOpt(placementSettings *model.PlacementSettings, placementOptionType ast.PlacementOptionType, stringVal string, uintVal uint64) error

SetDirectPlacementOpt tries to make the PlacementSettings assignments generic for Schema/Table/Partition

func SetDirectResourceGroupBackgroundOption

func SetDirectResourceGroupBackgroundOption(resourceGroupSettings *model.ResourceGroupSettings, opt *ast.ResourceGroupBackgroundOption) error

SetDirectResourceGroupBackgroundOption set background configs of the ResourceGroupSettings.

func SetDirectResourceGroupRUSecondOption

func SetDirectResourceGroupRUSecondOption(resourceGroupSettings *model.ResourceGroupSettings, intVal uint64, unlimited bool) error

SetDirectResourceGroupRUSecondOption tries to set ru second part of the ResourceGroupSettings.

func SetDirectResourceGroupRunawayOption

func SetDirectResourceGroupRunawayOption(resourceGroupSettings *model.ResourceGroupSettings, opt *ast.ResourceGroupRunawayOption) error

SetDirectResourceGroupRunawayOption tries to set runaway part of the ResourceGroupSettings.

func SetDirectResourceGroupSettings

func SetDirectResourceGroupSettings(groupInfo *model.ResourceGroupInfo, opt *ast.ResourceGroupOption) error

SetDirectResourceGroupSettings tries to set the ResourceGroupSettings.

func SetIdxColNameOffset

func SetIdxColNameOffset(idxCol *model.IndexColumn, changingCol *model.ColumnInfo)

SetIdxColNameOffset sets index column name and offset from changing ColumnInfo.

func SetSchemaDiffForCreateTable

func SetSchemaDiffForCreateTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error

SetSchemaDiffForCreateTable set SchemaDiff for ActionCreateTable.

func SetSchemaDiffForCreateTables

func SetSchemaDiffForCreateTables(diff *model.SchemaDiff, job *model.Job) error

SetSchemaDiffForCreateTables set SchemaDiff for ActionCreateTables.

func SetSchemaDiffForCreateView

func SetSchemaDiffForCreateView(diff *model.SchemaDiff, job *model.Job) error

SetSchemaDiffForCreateView set SchemaDiff for ActionCreateView.

func SetSchemaDiffForDropTable

func SetSchemaDiffForDropTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)

SetSchemaDiffForDropTable set SchemaDiff for ActionDropTable.

func SetSchemaDiffForDropTablePartition

func SetSchemaDiffForDropTablePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)

SetSchemaDiffForDropTablePartition set SchemaDiff for ActionDropTablePartition.

func SetSchemaDiffForExchangeTablePartition

func SetSchemaDiffForExchangeTablePartition(diff *model.SchemaDiff, job *model.Job, multiInfos ...schemaIDAndTableInfo) error

SetSchemaDiffForExchangeTablePartition set SchemaDiff for ActionExchangeTablePartition.

func SetSchemaDiffForFlashbackCluster

func SetSchemaDiffForFlashbackCluster(diff *model.SchemaDiff, job *model.Job)

SetSchemaDiffForFlashbackCluster set SchemaDiff for ActionFlashbackCluster.

func SetSchemaDiffForMultiInfos

func SetSchemaDiffForMultiInfos(diff *model.SchemaDiff, multiInfos ...schemaIDAndTableInfo)

SetSchemaDiffForMultiInfos set SchemaDiff for multiInfos.

func SetSchemaDiffForPartitionModify

func SetSchemaDiffForPartitionModify(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)

SetSchemaDiffForPartitionModify set SchemaDiff for ActionRemovePartitioning, ActionAlterTablePartitioning.

func SetSchemaDiffForRecoverSchema

func SetSchemaDiffForRecoverSchema(diff *model.SchemaDiff, job *model.Job) error

SetSchemaDiffForRecoverSchema set SchemaDiff for ActionRecoverSchema.

func SetSchemaDiffForRecoverTable

func SetSchemaDiffForRecoverTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)

SetSchemaDiffForRecoverTable set SchemaDiff for ActionRecoverTable.

func SetSchemaDiffForRenameTable

func SetSchemaDiffForRenameTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error

SetSchemaDiffForRenameTable set SchemaDiff for ActionRenameTable.

func SetSchemaDiffForRenameTables

func SetSchemaDiffForRenameTables(diff *model.SchemaDiff, _ *model.Job, jobCtx *jobContext) error

SetSchemaDiffForRenameTables set SchemaDiff for ActionRenameTables.

func SetSchemaDiffForReorganizePartition

func SetSchemaDiffForReorganizePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)

SetSchemaDiffForReorganizePartition set SchemaDiff for ActionReorganizePartition.

func SetSchemaDiffForTruncateTable

func SetSchemaDiffForTruncateTable(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext) error

SetSchemaDiffForTruncateTable set SchemaDiff for ActionTruncateTable.

func SetSchemaDiffForTruncateTablePartition

func SetSchemaDiffForTruncateTablePartition(diff *model.SchemaDiff, job *model.Job, jobCtx *jobContext)

SetSchemaDiffForTruncateTablePartition set SchemaDiff for ActionTruncateTablePartition.

func SetWaitTimeWhenErrorOccurred

func SetWaitTimeWhenErrorOccurred(dur time.Duration)

SetWaitTimeWhenErrorOccurred update waiting interval when processing DDL jobs encounter errors.

func ShouldBuildClusteredIndex

func ShouldBuildClusteredIndex(mode variable.ClusteredIndexDefMode, opt *ast.IndexOption, isSingleIntPK bool) bool

ShouldBuildClusteredIndex is used to determine whether the CREATE TABLE statement should build a clustered index table.

func SplitRecordRegion

func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatterScope string) uint64

SplitRecordRegion is to split region in store by table prefix.

func StartOwnerManager

func StartOwnerManager(ctx context.Context, store kv.Storage) error

StartOwnerManager starts a global DDL owner manager.

func UpdateColsNull2NotNull

func UpdateColsNull2NotNull(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error

UpdateColsNull2NotNull changes the null option of columns of an index.

func ValidateFlashbackTS

func ValidateFlashbackTS(ctx context.Context, sctx sessionctx.Context, flashBackTS uint64) error

ValidateFlashbackTS validates that flashBackTS in range [gcSafePoint, currentTS).

func ValidateRenameIndex

func ValidateRenameIndex(from, to pmodel.CIStr, tbl *model.TableInfo) (ignore bool, err error)

ValidateRenameIndex checks if index name is ok to be renamed.

func WaitScatterRegionFinish

func WaitScatterRegionFinish(ctx context.Context, store kv.SplittableStore, regionIDs ...uint64)

WaitScatterRegionFinish will block until all regions are scattered.

Types

type AlterAlgorithm

type AlterAlgorithm struct {
	// contains filtered or unexported fields
}

AlterAlgorithm is used to store supported alter algorithm. For now, TiDB only support AlterAlgorithmInplace and AlterAlgorithmInstant. The most alter operations are using instant algorithm, and only the add index is using inplace(not really inplace, because we never block the DML but costs some time to backfill the index data) See https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-performance.

type AvailableTableID

type AvailableTableID struct {
	ID          int64
	IsPartition bool
}

AvailableTableID is the table id info of available table for waiting to update TiFlash replica progress.

type BackfillCleanUpS3

type BackfillCleanUpS3 struct {
}

BackfillCleanUpS3 implements scheduler.CleanUpRoutine.

func (*BackfillCleanUpS3) CleanUp

func (*BackfillCleanUpS3) CleanUp(ctx context.Context, task *proto.Task) error

CleanUp implements the CleanUpRoutine.CleanUp interface.

type BackfillSubTaskMeta

type BackfillSubTaskMeta struct {
	PhysicalTableID int64 `json:"physical_table_id"`

	// Used by read index step.
	RowStart []byte `json:"row_start"`
	RowEnd   []byte `json:"row_end"`

	// Used by global sort write & ingest step.
	RangeJobKeys   [][]byte `json:"range_job_keys,omitempty"`
	RangeSplitKeys [][]byte `json:"range_split_keys,omitempty"`
	DataFiles      []string `json:"data-files,omitempty"`
	StatFiles      []string `json:"stat-files,omitempty"`
	TS             uint64   `json:"ts,omitempty"`
	// Each group of MetaGroups represents a different index kvs meta.
	MetaGroups []*external.SortedKVMeta `json:"meta_groups,omitempty"`
	// EleIDs stands for the index/column IDs to backfill with distributed framework.
	// After the subtask is finished, EleIDs should have the same length as
	// MetaGroups, and they are in the same order.
	EleIDs []int64 `json:"ele_ids,omitempty"`

	// Only used for adding one single index.
	// Keep this for compatibility with v7.5.
	external.SortedKVMeta `json:",inline"`
}

BackfillSubTaskMeta is the sub-task meta for backfilling index.

type BackfillTaskMeta

type BackfillTaskMeta struct {
	Job model.Job `json:"job"`
	// EleIDs stands for the index/column IDs to backfill with distributed framework.
	EleIDs []int64 `json:"ele_ids"`
	// EleTypeKey is the type of the element to backfill with distributed framework.
	// For now, only index type is supported.
	EleTypeKey []byte `json:"ele_type_key"`

	CloudStorageURI string `json:"cloud_storage_uri"`
	EstimateRowSize int    `json:"estimate_row_size"`
}

BackfillTaskMeta is the dist task meta for backfilling index.

type BackfillingSchedulerExt

type BackfillingSchedulerExt struct {
	GlobalSort bool
	// contains filtered or unexported fields
}

BackfillingSchedulerExt is an extension of litBackfillScheduler, exported for test.

func (*BackfillingSchedulerExt) GetEligibleInstances

func (*BackfillingSchedulerExt) GetEligibleInstances(_ context.Context, _ *proto.Task) ([]string, error)

GetEligibleInstances implements scheduler.Extension interface.

func (*BackfillingSchedulerExt) GetNextStep

func (sch *BackfillingSchedulerExt) GetNextStep(task *proto.TaskBase) proto.Step

GetNextStep implements scheduler.Extension interface.

func (*BackfillingSchedulerExt) IsRetryableErr

func (*BackfillingSchedulerExt) IsRetryableErr(error) bool

IsRetryableErr implements scheduler.Extension.IsRetryableErr interface.

func (*BackfillingSchedulerExt) OnDone

OnDone implements scheduler.Extension interface.

func (*BackfillingSchedulerExt) OnNextSubtasksBatch

func (sch *BackfillingSchedulerExt) OnNextSubtasksBatch(
	ctx context.Context,
	taskHandle diststorage.TaskHandle,
	task *proto.Task,
	execIDs []string,
	nextStep proto.Step,
) (subtaskMeta [][]byte, err error)

OnNextSubtasksBatch generate batch of next step's plan.

func (*BackfillingSchedulerExt) OnTick

OnTick implements scheduler.Extension interface.

type CreateTableConfig

type CreateTableConfig struct {
	OnExist OnExist
	// IDAllocated indicates whether the job has allocated all IDs for tables affected
	// in the job, if true, DDL will not allocate IDs for them again, it's only used
	// by BR now. By reusing IDs BR can save a lot of works such as rewriting table
	// IDs in backed up KVs.
	IDAllocated bool
}

CreateTableConfig is the configuration of `CreateTableWithInfo`.

func GetCreateTableConfig

func GetCreateTableConfig(cs []CreateTableOption) CreateTableConfig

GetCreateTableConfig applies the series of config options from default config and returns the final config.

type CreateTableOption

type CreateTableOption func(*CreateTableConfig)

CreateTableOption is the option for creating table.

func WithIDAllocated

func WithIDAllocated(idAllocated bool) CreateTableOption

WithIDAllocated applies the IDAllocated option. WARNING!!!: if idAllocated == true, DDL will NOT allocate IDs by itself. That means if the caller can not promise ID is unique, then we got inconsistency. This option is only exposed to be used by BR.

func WithOnExist

func WithOnExist(o OnExist) CreateTableOption

WithOnExist applies the OnExist option.

type DDL

type DDL interface {
	// Start campaigns the owner and starts workers.
	// ctxPool is used for the worker's delRangeManager and creates sessions.
	Start(startMode StartMode, ctxPool *pools.ResourcePool) error
	// Stats returns the DDL statistics.
	Stats(vars *variable.SessionVars) (map[string]any, error)
	// GetScope gets the status variables scope.
	GetScope(status string) variable.ScopeFlag
	// Stop stops DDL worker.
	Stop() error
	// RegisterStatsHandle registers statistics handle and its corresponding event channel for ddl.
	RegisterStatsHandle(*handle.Handle)
	// SchemaSyncer gets the schema syncer.
	SchemaSyncer() schemaver.Syncer
	// StateSyncer gets the cluster state syncer.
	StateSyncer() serverstate.Syncer
	// OwnerManager gets the owner manager.
	OwnerManager() owner.Manager
	// GetID gets the ddl ID.
	GetID() string
	// GetMinJobIDRefresher gets the MinJobIDRefresher, this api only works after Start.
	GetMinJobIDRefresher() *systable.MinJobIDRefresher
}

DDL is responsible for updating schema in data store and maintaining in-memory InfoSchema cache.

type DelRangeExecWrapper

type DelRangeExecWrapper interface {
	// generate a new tso for the next job
	UpdateTSOForJob() error

	// initialize the paramsList
	PrepareParamsList(sz int)

	// rewrite table id if necessary, used for BR
	RewriteTableID(tableID int64) (int64, bool)

	// (job_id, element_id, start_key, end_key, ts)
	// ts is generated by delRangeExecWrapper itself
	AppendParamsList(jobID, elemID int64, startKey, endKey string)

	// consume the delete range. For TiDB Server, it insert rows into mysql.gc_delete_range.
	ConsumeDeleteRange(ctx context.Context, sql string) error
}

DelRangeExecWrapper consumes the delete ranges with the provided table ID(s) and index ID(s).

type EmptyRowCntListener

type EmptyRowCntListener struct{}

EmptyRowCntListener implements a noop RowCountListener.

func (*EmptyRowCntListener) SetTotal

func (*EmptyRowCntListener) SetTotal(_ int)

SetTotal implements RowCountListener.

func (*EmptyRowCntListener) Written

func (*EmptyRowCntListener) Written(_ int)

Written implements RowCountListener.

type Executor

type Executor interface {
	CreateSchema(ctx sessionctx.Context, stmt *ast.CreateDatabaseStmt) error
	AlterSchema(sctx sessionctx.Context, stmt *ast.AlterDatabaseStmt) error
	DropSchema(ctx sessionctx.Context, stmt *ast.DropDatabaseStmt) error
	CreateTable(ctx sessionctx.Context, stmt *ast.CreateTableStmt) error
	CreateView(ctx sessionctx.Context, stmt *ast.CreateViewStmt) error
	DropTable(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error)
	RecoverTable(ctx sessionctx.Context, recoverTableInfo *model.RecoverTableInfo) (err error)
	RecoverSchema(ctx sessionctx.Context, recoverSchemaInfo *model.RecoverSchemaInfo) error
	DropView(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error)
	CreateIndex(ctx sessionctx.Context, stmt *ast.CreateIndexStmt) error
	DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) error
	AlterTable(ctx context.Context, sctx sessionctx.Context, stmt *ast.AlterTableStmt) error
	TruncateTable(ctx sessionctx.Context, tableIdent ast.Ident) error
	RenameTable(ctx sessionctx.Context, stmt *ast.RenameTableStmt) error
	LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error
	UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error
	CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error
	UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, available bool) error
	RepairTable(ctx sessionctx.Context, createStmt *ast.CreateTableStmt) error
	CreateSequence(ctx sessionctx.Context, stmt *ast.CreateSequenceStmt) error
	DropSequence(ctx sessionctx.Context, stmt *ast.DropSequenceStmt) (err error)
	AlterSequence(ctx sessionctx.Context, stmt *ast.AlterSequenceStmt) error
	CreatePlacementPolicy(ctx sessionctx.Context, stmt *ast.CreatePlacementPolicyStmt) error
	DropPlacementPolicy(ctx sessionctx.Context, stmt *ast.DropPlacementPolicyStmt) error
	AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacementPolicyStmt) error
	AddResourceGroup(ctx sessionctx.Context, stmt *ast.CreateResourceGroupStmt) error
	AlterResourceGroup(ctx sessionctx.Context, stmt *ast.AlterResourceGroupStmt) error
	DropResourceGroup(ctx sessionctx.Context, stmt *ast.DropResourceGroupStmt) error
	FlashbackCluster(ctx sessionctx.Context, flashbackTS uint64) error

	// CreateSchemaWithInfo creates a database (schema) given its database info.
	//
	// WARNING: the DDL owns the `info` after calling this function, and will modify its fields
	// in-place. If you want to keep using `info`, please call Clone() first.
	CreateSchemaWithInfo(
		ctx sessionctx.Context,
		info *model.DBInfo,
		onExist OnExist) error

	// CreateTableWithInfo creates a table, view or sequence given its table info.
	//
	// WARNING: the DDL owns the `info` after calling this function, and will modify its fields
	// in-place. If you want to keep using `info`, please call Clone() first.
	CreateTableWithInfo(
		ctx sessionctx.Context,
		schema pmodel.CIStr,
		info *model.TableInfo,
		involvingRef []model.InvolvingSchemaInfo,
		cs ...CreateTableOption) error

	// BatchCreateTableWithInfo is like CreateTableWithInfo, but can handle multiple tables.
	BatchCreateTableWithInfo(ctx sessionctx.Context,
		schema pmodel.CIStr,
		info []*model.TableInfo,
		cs ...CreateTableOption) error

	// CreatePlacementPolicyWithInfo creates a placement policy
	//
	// WARNING: the DDL owns the `policy` after calling this function, and will modify its fields
	// in-place. If you want to keep using `policy`, please call Clone() first.
	CreatePlacementPolicyWithInfo(ctx sessionctx.Context, policy *model.PolicyInfo, onExist OnExist) error
}

Executor is the interface for executing DDL statements. it's mostly called by SQL executor. DDL statements are converted into DDL jobs, JobSubmitter will submit the jobs to DDL job table. Then jobScheduler will schedule them to run on workers asynchronously in parallel. Executor will wait them to finish.

type ExecutorForTest

type ExecutorForTest interface {
	// DoDDLJob does the DDL job, it's exported for test.
	DoDDLJob(ctx sessionctx.Context, job *model.Job) error
	// DoDDLJobWrapper similar to DoDDLJob, but with JobWrapper as input.
	DoDDLJobWrapper(ctx sessionctx.Context, jobW *JobWrapper) error
}

ExecutorForTest is the interface for executing DDL statements in tests. TODO remove it later

type IndexIngestOperator

type IndexIngestOperator struct {
	*operator.AsyncOperator[IndexRecordChunk, IndexWriteResult]
}

IndexIngestOperator writes index records to ingest engine.

func NewIndexIngestOperator

func NewIndexIngestOperator(
	ctx *OperatorCtx,
	copCtx copr.CopContext,
	backendCtx ingest.BackendCtx,
	sessPool opSessPool,
	tbl table.PhysicalTable,
	indexes []table.Index,
	engines []ingest.Engine,
	srcChunkPool *sync.Pool,
	concurrency int,
	reorgMeta *model.DDLReorgMeta,
	cpMgr *ingest.CheckpointManager,
	rowCntListener RowCountListener,
) *IndexIngestOperator

NewIndexIngestOperator creates a new IndexIngestOperator.

type IndexRecordChunk

type IndexRecordChunk struct {
	ID    int
	Chunk *chunk.Chunk
	Err   error
	Done  bool
	// contains filtered or unexported fields
}

IndexRecordChunk contains one of the chunk read from corresponding TableScanTask.

func (IndexRecordChunk) RecoverArgs

func (t IndexRecordChunk) RecoverArgs() (metricsLabel string, funcInfo string, recoverFn func(), quit bool)

RecoverArgs implements workerpool.TaskMayPanic interface.

type IndexWriteResult

type IndexWriteResult struct {
	ID    int
	Added int
	Total int
	Next  kv.Key
}

IndexWriteResult contains the result of writing index records to ingest engine.

type Info

type Info struct {
	SchemaVer   int64
	ReorgHandle kv.Key       // It's only used for DDL information.
	Jobs        []*model.Job // It's the currently running jobs.
}

Info is for DDL information.

func GetDDLInfo

func GetDDLInfo(s sessionctx.Context) (*Info, error)

GetDDLInfo returns DDL information and only uses for testing.

func GetDDLInfoWithNewTxn

func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error)

GetDDLInfoWithNewTxn returns DDL information using a new txn.

type JobSubmitter

type JobSubmitter struct {
	// contains filtered or unexported fields
}

JobSubmitter collects the DDL jobs and submits them to job tables in batch, it's also responsible allocating IDs for the jobs. when fast-create is enabled, it will merge the create-table jobs to a single batch create-table job. export for testing.

func (*JobSubmitter) GenGIDAndInsertJobsWithRetry

func (s *JobSubmitter) GenGIDAndInsertJobsWithRetry(ctx context.Context, ddlSe *sess.Session, jobWs []*JobWrapper) error

GenGIDAndInsertJobsWithRetry generate job related global ID and inserts DDL jobs to the DDL job table with retry. job id allocation and job insertion are in the same transaction, as we want to make sure DDL jobs are inserted in id order, then we can query from a min job ID when scheduling DDL jobs to mitigate https://github.com/pingcap/tidb/issues/52905. so this function has side effect, it will set table/db/job id of 'jobs'.

type JobWrapper

type JobWrapper struct {
	*model.Job
	// IDAllocated see config of same name in CreateTableConfig.
	// exported for test.
	IDAllocated bool
	JobArgs     model.JobArgs
	// job submission is run in async, we use this channel to notify the caller.
	// when fast create table enabled, we might combine multiple jobs into one, and
	// append the channel to this slice.
	ResultCh []chan jobSubmitResult
	// contains filtered or unexported fields
}

JobWrapper is used to wrap a job and some other information. exported for testing.

func GetModifiableColumnJob

func GetModifiableColumnJob(
	ctx context.Context,
	sctx sessionctx.Context,
	is infoschema.InfoSchema,
	ident ast.Ident,
	originalColName pmodel.CIStr,
	schema *model.DBInfo,
	t table.Table,
	spec *ast.AlterTableSpec,
) (*JobWrapper, error)

GetModifiableColumnJob returns a DDL job of model.ActionModifyColumn.

func NewJobWrapper

func NewJobWrapper(job *model.Job, idAllocated bool) *JobWrapper

NewJobWrapper creates a new JobWrapper. exported for testing.

func NewJobWrapperWithArgs

func NewJobWrapperWithArgs(job *model.Job, args model.JobArgs, idAllocated bool) *JobWrapper

NewJobWrapperWithArgs creates a new JobWrapper with job args. TODO: merge with NewJobWrapper later.

func (*JobWrapper) FillArgsWithSubJobs

func (jobW *JobWrapper) FillArgsWithSubJobs()

FillArgsWithSubJobs fill args for job and its sub jobs

func (*JobWrapper) NotifyResult

func (jobW *JobWrapper) NotifyResult(err error)

NotifyResult notifies the job submit result.

type LitBackfillScheduler

type LitBackfillScheduler struct {
	*scheduler.BaseScheduler
	// contains filtered or unexported fields
}

LitBackfillScheduler wraps BaseScheduler.

func (*LitBackfillScheduler) Close

func (sch *LitBackfillScheduler) Close()

Close implements BaseScheduler interface.

func (*LitBackfillScheduler) Init

func (sch *LitBackfillScheduler) Init() (err error)

Init implements BaseScheduler interface.

type OnExist

type OnExist uint8

OnExist specifies what to do when a new object has a name collision.

const (
	// OnExistError throws an error on name collision.
	OnExistError OnExist = iota
	// OnExistIgnore skips creating the new object.
	OnExistIgnore
	// OnExistReplace replaces the old object by the new object. This is only
	// supported by VIEWs at the moment. For other object types, this is
	// equivalent to OnExistError.
	OnExistReplace
)

type OperatorCtx

type OperatorCtx struct {
	context.Context
	// contains filtered or unexported fields
}

OperatorCtx is the context for AddIndexIngestPipeline. This is used to cancel the pipeline and collect errors.

func NewDistTaskOperatorCtx

func NewDistTaskOperatorCtx(
	ctx context.Context,
	taskID, subtaskID int64,
) (*OperatorCtx, context.CancelFunc)

NewDistTaskOperatorCtx is used for adding index with dist framework.

func NewLocalOperatorCtx

func NewLocalOperatorCtx(ctx context.Context, jobID int64) (*OperatorCtx, context.CancelFunc)

NewLocalOperatorCtx is used for adding index with local ingest mode.

func (*OperatorCtx) OperatorErr

func (ctx *OperatorCtx) OperatorErr() error

OperatorErr returns the error of the operator.

type Option

type Option func(*Options)

Option represents an option to initialize the DDL module

func WithAutoIDClient

func WithAutoIDClient(cli *autoid.ClientDiscover) Option

WithAutoIDClient specifies the autoid client used by the autoid service for those AUTO_ID_CACHE=1 tables.

func WithEtcdClient

func WithEtcdClient(client *clientv3.Client) Option

WithEtcdClient specifies the `clientv3.Client` of DDL used to request the etcd service

func WithEventPublishStore

func WithEventPublishStore(store notifier.Store) Option

WithEventPublishStore specifies the store used to publish DDL events

func WithInfoCache

func WithInfoCache(ic *infoschema.InfoCache) Option

WithInfoCache specifies the `infoschema.InfoCache`

func WithLease

func WithLease(lease time.Duration) Option

WithLease specifies the schema lease duration

func WithSchemaLoader

func WithSchemaLoader(loader SchemaLoader) Option

WithSchemaLoader specifies the schema loader used to load schema from storage

func WithStore

func WithStore(store kv.Storage) Option

WithStore specifies the `kv.Storage` of DDL used to request the KV service

type Options

type Options struct {
	EtcdCli           *clientv3.Client
	Store             kv.Storage
	AutoIDClient      *autoid.ClientDiscover
	InfoCache         *infoschema.InfoCache
	Lease             time.Duration
	SchemaLoader      SchemaLoader
	EventPublishStore notifier.Store
}

Options represents all the options of the DDL module needs

type PollTiFlashBackoffContext

type PollTiFlashBackoffContext struct {
	MinThreshold TiFlashTick
	MaxThreshold TiFlashTick
	// Capacity limits tables a backoff pool can handle, in order to limit handling of big tables.
	Capacity int
	Rate     TiFlashTick
	// contains filtered or unexported fields
}

PollTiFlashBackoffContext is a collection of all backoff states.

func NewPollTiFlashBackoffContext

func NewPollTiFlashBackoffContext(minThreshold, maxThreshold TiFlashTick, capacity int, rate TiFlashTick) (*PollTiFlashBackoffContext, error)

NewPollTiFlashBackoffContext creates an instance of PollTiFlashBackoffContext.

func (*PollTiFlashBackoffContext) Get

Get returns pointer to inner PollTiFlashBackoffElement. Only exported for test.

func (*PollTiFlashBackoffContext) Len

func (b *PollTiFlashBackoffContext) Len() int

Len gets size of PollTiFlashBackoffContext.

func (*PollTiFlashBackoffContext) Put

Put will record table into backoff pool, if there is enough room, or returns false.

func (*PollTiFlashBackoffContext) Remove

func (b *PollTiFlashBackoffContext) Remove(id int64) bool

Remove will reset table from backoff.

func (*PollTiFlashBackoffContext) Tick

func (b *PollTiFlashBackoffContext) Tick(id int64) (grew bool, exist bool, cnt int)

Tick will first check increase Counter. It returns: 1. A bool indicates whether threshold is grown during this tick. 2. A bool indicates whether this ID exists. 3. A int indicates how many ticks ID has counted till now.

type PollTiFlashBackoffElement

type PollTiFlashBackoffElement struct {
	Counter      int
	Threshold    TiFlashTick
	TotalCounter int
}

PollTiFlashBackoffElement records backoff for each TiFlash Table. `Counter` increases every `Tick`, if it reached `Threshold`, it will be reset to 0 while `Threshold` grows. `TotalCounter` records total `Tick`s this element has since created.

func NewPollTiFlashBackoffElement

func NewPollTiFlashBackoffElement() *PollTiFlashBackoffElement

NewPollTiFlashBackoffElement initialize backoff element for a TiFlash table.

func (*PollTiFlashBackoffElement) MaybeGrow

MaybeGrow grows threshold and reset counter when needed.

func (*PollTiFlashBackoffElement) NeedGrow

func (e *PollTiFlashBackoffElement) NeedGrow() bool

NeedGrow returns if we need to grow. It is exported for testing.

type ReorgContext

type ReorgContext struct {
	// contains filtered or unexported fields
}

ReorgContext contains context info for reorg job. TODO there is another reorgCtx, merge them.

func NewReorgContext

func NewReorgContext() *ReorgContext

NewReorgContext returns a new ddl job context.

type RowCountListener

type RowCountListener interface {
	Written(rowCnt int)
	SetTotal(total int)
}

RowCountListener is invoked when some index records are flushed to disk or imported to TiKV.

type SchemaLoader

type SchemaLoader interface {
	Reload() error
}

SchemaLoader is used to reload info schema, the only impl is domain currently.

type StartMode

type StartMode string

StartMode is an enum type for the start mode of the DDL.

const (
	// Normal mode, cluster is in normal state.
	Normal StartMode = "normal"
	// Bootstrap mode, cluster is during bootstrap.
	Bootstrap StartMode = "bootstrap"
	// Upgrade mode, cluster is during upgrade, we will force current node to be
	// the DDL owner, to make sure all upgrade related DDLs are run on new version
	// TiDB instance.
	Upgrade StartMode = "upgrade"
)

type TableScanOperator

type TableScanOperator struct {
	*operator.AsyncOperator[TableScanTask, IndexRecordChunk]
	// contains filtered or unexported fields
}

TableScanOperator scans table records in given key ranges from kv store.

func NewTableScanOperator

func NewTableScanOperator(
	ctx *OperatorCtx,
	sessPool opSessPool,
	copCtx copr.CopContext,
	srcChkPool *sync.Pool,
	concurrency int,
	cpMgr *ingest.CheckpointManager,
	hintBatchSize int,
	reorgMeta *model.DDLReorgMeta,
) *TableScanOperator

NewTableScanOperator creates a new TableScanOperator.

func (*TableScanOperator) Close

func (o *TableScanOperator) Close() error

Close implements operator.Operator interface.

type TableScanTask

type TableScanTask struct {
	ID    int
	Start kv.Key
	End   kv.Key
	// contains filtered or unexported fields
}

TableScanTask contains the start key and the end key of a region.

func (TableScanTask) RecoverArgs

func (t TableScanTask) RecoverArgs() (metricsLabel string, funcInfo string, recoverFn func(), quit bool)

RecoverArgs implements workerpool.TaskMayPanic interface.

func (TableScanTask) String

func (t TableScanTask) String() string

String implement fmt.Stringer interface.

type TableScanTaskSource

type TableScanTaskSource struct {
	// contains filtered or unexported fields
}

TableScanTaskSource produces TableScanTask by splitting table records into ranges.

func NewTableScanTaskSource

func NewTableScanTaskSource(
	ctx *OperatorCtx,
	store kv.Storage,
	physicalTable table.PhysicalTable,
	startKey kv.Key,
	endKey kv.Key,
	cpMgr *ingest.CheckpointManager,
) *TableScanTaskSource

NewTableScanTaskSource creates a new TableScanTaskSource.

func (*TableScanTaskSource) Close

func (src *TableScanTaskSource) Close() error

Close implements Operator interface.

func (*TableScanTaskSource) Open

func (src *TableScanTaskSource) Open() error

Open implements Operator interface.

func (*TableScanTaskSource) SetSink

SetSink implements WithSink interface.

func (*TableScanTaskSource) String

func (*TableScanTaskSource) String() string

String implements fmt.Stringer interface.

type TiFlashManagementContext

type TiFlashManagementContext struct {
	TiFlashStores map[int64]pd.StoreInfo
	PollCounter   uint64
	Backoff       *PollTiFlashBackoffContext
	// tables waiting for updating progress after become available.
	UpdatingProgressTables *list.List
}

TiFlashManagementContext is the context for TiFlash Replica Management

func NewTiFlashManagementContext

func NewTiFlashManagementContext() (*TiFlashManagementContext, error)

NewTiFlashManagementContext creates an instance for TiFlashManagementContext.

type TiFlashReplicaStatus

type TiFlashReplicaStatus struct {
	ID                    int64
	Count                 uint64
	LocationLabels        []string
	Available             bool
	LogicalTableAvailable bool
	HighPriority          bool
	IsPartition           bool
}

TiFlashReplicaStatus records status for each TiFlash replica.

type TiFlashTick

type TiFlashTick float64

TiFlashTick is type for backoff threshold.

type WriteExternalStoreOperator

type WriteExternalStoreOperator struct {
	*operator.AsyncOperator[IndexRecordChunk, IndexWriteResult]
	// contains filtered or unexported fields
}

WriteExternalStoreOperator writes index records to external storage.

func NewWriteExternalStoreOperator

func NewWriteExternalStoreOperator(
	ctx *OperatorCtx,
	copCtx copr.CopContext,
	sessPool opSessPool,
	jobID int64,
	subtaskID int64,
	tbl table.PhysicalTable,
	indexes []table.Index,
	store storage.ExternalStorage,
	srcChunkPool *sync.Pool,
	concurrency int,
	onClose external.OnCloseFunc,
	memoryQuota uint64,
	reorgMeta *model.DDLReorgMeta,
) *WriteExternalStoreOperator

NewWriteExternalStoreOperator creates a new WriteExternalStoreOperator.

func (*WriteExternalStoreOperator) Close

func (o *WriteExternalStoreOperator) Close() error

Close implements operator.Operator interface.

Directories

Path Synopsis
Package mock is a generated GoMock package.
Package mock is a generated GoMock package.
Package systable contains all constants/methods related accessing system tables related to DDL job execution
Package systable contains all constants/methods related accessing system tables related to DDL job execution
tests

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL