model

package
v1.1.0-beta.0...-e3248e7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 13, 2024 License: Apache-2.0 Imports: 22 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// ColumnInfoVersion0 means the column info version is 0.
	ColumnInfoVersion0 = uint64(0)
	// ColumnInfoVersion1 means the column info version is 1.
	ColumnInfoVersion1 = uint64(1)
	// ColumnInfoVersion2 means the column info version is 2.
	// This is for v2.1.7 to Compatible with older versions charset problem.
	// Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8.
	// After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error.
	// This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number.
	ColumnInfoVersion2 = uint64(2)

	// CurrLatestColumnInfoVersion means the latest column info in the current TiDB.
	CurrLatestColumnInfoVersion = ColumnInfoVersion2
)
View Source
const (
	// FlagIgnoreTruncate indicates if truncate error should be ignored.
	// Read-only statements should ignore truncate error, write statements should not ignore truncate error.
	FlagIgnoreTruncate uint64 = 1
	// FlagTruncateAsWarning indicates if truncate error should be returned as warning.
	// This flag only matters if FlagIgnoreTruncate is not set, in strict sql mode, truncate error should
	// be returned as error, in non-strict sql mode, truncate error should be saved as warning.
	FlagTruncateAsWarning = 1 << 1
	// FlagPadCharToFullLength indicates if sql_mode 'PAD_CHAR_TO_FULL_LENGTH' is set.
	FlagPadCharToFullLength = 1 << 2
	// FlagInInsertStmt indicates if this is a INSERT statement.
	FlagInInsertStmt = 1 << 3
	// FlagInUpdateOrDeleteStmt indicates if this is a UPDATE statement or a DELETE statement.
	FlagInUpdateOrDeleteStmt = 1 << 4
	// FlagInSelectStmt indicates if this is a SELECT statement.
	FlagInSelectStmt = 1 << 5
	// FlagOverflowAsWarning indicates if overflow error should be returned as warning.
	// In strict sql mode, overflow error should be returned as error,
	// in non-strict sql mode, overflow error should be saved as warning.
	FlagOverflowAsWarning = 1 << 6
	// FlagIgnoreZeroInDate indicates if ZeroInDate error should be ignored.
	// Read-only statements should ignore ZeroInDate error.
	// Write statements should not ignore ZeroInDate error in strict sql mode.
	FlagIgnoreZeroInDate = 1 << 7
	// FlagDividedByZeroAsWarning indicates if DividedByZero should be returned as warning.
	FlagDividedByZeroAsWarning = 1 << 8
	// FlagInSetOprStmt indicates if this is a UNION/EXCEPT/INTERSECT statement.
	FlagInSetOprStmt = 1 << 9
	// FlagInLoadDataStmt indicates if this is a LOAD DATA statement.
	FlagInLoadDataStmt = 1 << 10
	// FlagInRestrictedSQL indicates if this request is in a restricted SQL. Auto Analyze is one example
	FlagInRestrictedSQL = 1 << 11
)

Flags are used by tipb.SelectRequest.Flags to handle execution mode, like how to handle truncate error.

View Source
const (
	// InvolvingAll means all schemas/tables are affected. It's used in
	// InvolvingSchemaInfo.Database/Tables fields. When both the Database and Tables
	// are InvolvingAll it also means all placement policies and resource groups are
	// affected. Currently the only case is FLASHBACK CLUSTER.
	InvolvingAll = "*"
	// InvolvingNone means no schema/table is affected.
	InvolvingNone = ""
)
View Source
const (
	OpAddIndex = iota
	OpDropIndex
	OpRollbackAddIndex
)

List op types.

View Source
const (
	// ReorgMetaVersion0 is the minimum version of DDLReorgMeta.
	ReorgMetaVersion0 = int64(0)
	// CurrentReorgMetaVersion is the current version of DDLReorgMeta.
	// For fix #46306(whether end key is included or not in the table range) to add the version to 1.
	CurrentReorgMetaVersion = int64(1)
)
View Source
const (
	// TableInfoVersion0 means the table info version is 0.
	// Upgrade from v2.1.1 or v2.1.2 to v2.1.3 and later, and then execute a "change/modify column" statement
	// that does not specify a charset value for column. Then the following error may be reported:
	// ERROR 1105 (HY000): unsupported modify charset from utf8mb4 to utf8.
	// To eliminate this error, we will not modify the charset of this column
	// when executing a change/modify column statement that does not specify a charset value for column.
	// This behavior is not compatible with MySQL.
	TableInfoVersion0 = uint16(0)
	// TableInfoVersion1 means the table info version is 1.
	// When we execute a change/modify column statement that does not specify a charset value for column,
	// we set the charset of this column to the charset of table. This behavior is compatible with MySQL.
	TableInfoVersion1 = uint16(1)
	// TableInfoVersion2 means the table info version is 2.
	// This is for v2.1.7 to Compatible with older versions charset problem.
	// Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8.
	// After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error.
	// This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number.
	TableInfoVersion2 = uint16(2)
	// TableInfoVersion3 means the table info version is 3.
	// This version aims to deal with upper-cased charset name in TableInfo stored by versions prior to TiDB v2.1.9:
	// TiDB always suppose all charsets / collations as lower-cased and try to convert them if they're not.
	// However, the convert is missed in some scenarios before v2.1.9, so for all those tables prior to TableInfoVersion3, their
	// charsets / collations will be converted to lower-case while loading from the storage.
	TableInfoVersion3 = uint16(3)
	// TableInfoVersion4 is not used.
	TableInfoVersion4 = uint16(4)
	// TableInfoVersion5 indicates that the auto_increment allocator in TiDB has been separated from
	// _tidb_rowid allocator when AUTO_ID_CACHE is 1. This version is introduced to preserve the compatibility of old tables:
	// the tables with version <= TableInfoVersion4 still use a single allocator for auto_increment and _tidb_rowid.
	// Also see https://github.com/pingcap/tidb/issues/982.
	TableInfoVersion5 = uint16(5)

	// CurrLatestTableInfoVersion means the latest table info in the current TiDB.
	CurrLatestTableInfoVersion = TableInfoVersion5
)
View Source
const (
	DefaultSequenceCacheBool          = true
	DefaultSequenceCycleBool          = false
	DefaultSequenceOrderBool          = false
	DefaultSequenceCacheValue         = int64(1000)
	DefaultSequenceIncrementValue     = int64(1)
	DefaultPositiveSequenceStartValue = int64(1)
	DefaultNegativeSequenceStartValue = int64(-1)
	DefaultPositiveSequenceMinValue   = int64(1)
	DefaultPositiveSequenceMaxValue   = int64(9223372036854775806)
	DefaultNegativeSequenceMaxValue   = int64(-1)
	DefaultNegativeSequenceMinValue   = int64(-9223372036854775807)
)

Some constants for sequence.

View Source
const (
	// FKVersion0 indicate the FKInfo version is 0.
	// In FKVersion0, TiDB only supported syntax of foreign key, but the foreign key constraint doesn't take effect.
	FKVersion0 = 0
	// FKVersion1 indicate the FKInfo version is 1.
	// In FKVersion1, TiDB supports the foreign key constraint.
	FKVersion1 = 1
)
View Source
const DefaultTTLJobInterval = "24h"

DefaultTTLJobInterval is the default interval of TTL jobs.

View Source
const EmptyColumnInfoSize = int64(unsafe.Sizeof(ColumnInfo{}))

EmptyColumnInfoSize is the memory usage of ColumnInfoSize

View Source
const ExtraHandleID = -1

ExtraHandleID is the column ID of column which we need to append to schema to occupy the handle's position for use of execution phase.

View Source
const ExtraPhysTblID = -3

ExtraPhysTblID is the column ID of column that should be filled in with the physical table id. Primarily used for table partition dynamic prune mode, to return which partition (physical table id) the row came from. If used with a global index, the partition ID decoded from the key value will be filled in.

View Source
const ExtraRowChecksumID = -4

ExtraRowChecksumID is the column ID of column which holds the row checksum info.

View Source
const OldDefaultTTLJobInterval = "1h"

OldDefaultTTLJobInterval is the default interval of TTL jobs in v8.5 and the previous versions. It is used by some codes to keep compatible with the previous versions.

Variables

View Source
var ActionBDRMap = map[ActionType]DDLBDRType{}

ActionBDRMap is the map of DDL ActionType to DDLBDRType.

View Source
var ActionMap = map[ActionType]string{
	ActionCreateSchema:                  "create schema",
	ActionDropSchema:                    "drop schema",
	ActionCreateTable:                   "create table",
	ActionCreateTables:                  "create tables",
	ActionDropTable:                     "drop table",
	ActionAddColumn:                     "add column",
	ActionDropColumn:                    "drop column",
	ActionAddIndex:                      "add index",
	ActionDropIndex:                     "drop index",
	ActionAddForeignKey:                 "add foreign key",
	ActionDropForeignKey:                "drop foreign key",
	ActionTruncateTable:                 "truncate table",
	ActionModifyColumn:                  "modify column",
	ActionRebaseAutoID:                  "rebase auto_increment ID",
	ActionRenameTable:                   "rename table",
	ActionRenameTables:                  "rename tables",
	ActionSetDefaultValue:               "set default value",
	ActionShardRowID:                    "shard row ID",
	ActionModifyTableComment:            "modify table comment",
	ActionRenameIndex:                   "rename index",
	ActionAddTablePartition:             "add partition",
	ActionDropTablePartition:            "drop partition",
	ActionCreateView:                    "create view",
	ActionModifyTableCharsetAndCollate:  "modify table charset and collate",
	ActionTruncateTablePartition:        "truncate partition",
	ActionDropView:                      "drop view",
	ActionRecoverTable:                  "recover table",
	ActionModifySchemaCharsetAndCollate: "modify schema charset and collate",
	ActionLockTable:                     "lock table",
	ActionUnlockTable:                   "unlock table",
	ActionRepairTable:                   "repair table",
	ActionSetTiFlashReplica:             "set tiflash replica",
	ActionUpdateTiFlashReplicaStatus:    "update tiflash replica status",
	ActionAddPrimaryKey:                 "add primary key",
	ActionDropPrimaryKey:                "drop primary key",
	ActionCreateSequence:                "create sequence",
	ActionAlterSequence:                 "alter sequence",
	ActionDropSequence:                  "drop sequence",
	ActionModifyTableAutoIDCache:        "modify auto id cache",
	ActionRebaseAutoRandomBase:          "rebase auto_random ID",
	ActionAlterIndexVisibility:          "alter index visibility",
	ActionExchangeTablePartition:        "exchange partition",
	ActionAddCheckConstraint:            "add check constraint",
	ActionDropCheckConstraint:           "drop check constraint",
	ActionAlterCheckConstraint:          "alter check constraint",
	ActionAlterTableAttributes:          "alter table attributes",
	ActionAlterTablePartitionPlacement:  "alter table partition placement",
	ActionAlterTablePartitionAttributes: "alter table partition attributes",
	ActionCreatePlacementPolicy:         "create placement policy",
	ActionAlterPlacementPolicy:          "alter placement policy",
	ActionDropPlacementPolicy:           "drop placement policy",
	ActionModifySchemaDefaultPlacement:  "modify schema default placement",
	ActionAlterTablePlacement:           "alter table placement",
	ActionAlterCacheTable:               "alter table cache",
	ActionAlterNoCacheTable:             "alter table nocache",
	ActionAlterTableStatsOptions:        "alter table statistics options",
	ActionMultiSchemaChange:             "alter table multi-schema change",
	ActionFlashbackCluster:              "flashback cluster",
	ActionRecoverSchema:                 "flashback schema",
	ActionReorganizePartition:           "alter table reorganize partition",
	ActionAlterTTLInfo:                  "alter table ttl",
	ActionAlterTTLRemove:                "alter table no_ttl",
	ActionCreateResourceGroup:           "create resource group",
	ActionAlterResourceGroup:            "alter resource group",
	ActionDropResourceGroup:             "drop resource group",
	ActionAlterTablePartitioning:        "alter table partition by",
	ActionRemovePartitioning:            "alter table remove partitioning",
	ActionAddVectorIndex:                "add vector index",
	// contains filtered or unexported fields
}

ActionMap is the map of DDL ActionType to string.

View Source
var BDRActionMap = map[DDLBDRType][]ActionType{
	SafeDDL: {
		ActionCreateSchema,
		ActionCreateTable,
		ActionAddColumn,
		ActionAddIndex,
		ActionDropIndex,
		ActionModifyColumn,
		ActionSetDefaultValue,
		ActionModifyTableComment,
		ActionRenameIndex,
		ActionAddTablePartition,
		ActionDropPrimaryKey,
		ActionAlterIndexVisibility,
		ActionCreateTables,
		ActionAlterTTLInfo,
		ActionAlterTTLRemove,
		ActionCreateView,
		ActionDropView,
	},
	UnsafeDDL: {
		ActionDropSchema,
		ActionDropTable,
		ActionDropColumn,
		ActionAddForeignKey,
		ActionDropForeignKey,
		ActionTruncateTable,
		ActionRebaseAutoID,
		ActionRenameTable,
		ActionShardRowID,
		ActionDropTablePartition,
		ActionModifyTableCharsetAndCollate,
		ActionTruncateTablePartition,
		ActionRecoverTable,
		ActionModifySchemaCharsetAndCollate,
		ActionLockTable,
		ActionUnlockTable,
		ActionRepairTable,
		ActionSetTiFlashReplica,
		ActionUpdateTiFlashReplicaStatus,
		ActionAddPrimaryKey,
		ActionCreateSequence,
		ActionAlterSequence,
		ActionDropSequence,
		ActionModifyTableAutoIDCache,
		ActionRebaseAutoRandomBase,
		ActionExchangeTablePartition,
		ActionAddCheckConstraint,
		ActionDropCheckConstraint,
		ActionAlterCheckConstraint,
		ActionRenameTables,
		ActionAlterTableAttributes,
		ActionAlterTablePartitionAttributes,
		ActionAlterTablePartitionPlacement,
		ActionModifySchemaDefaultPlacement,
		ActionAlterTablePlacement,
		ActionAlterCacheTable,
		ActionAlterTableStatsOptions,
		ActionAlterNoCacheTable,
		ActionMultiSchemaChange,
		ActionFlashbackCluster,
		ActionRecoverSchema,
		ActionReorganizePartition,
		ActionAlterTablePartitioning,
		ActionRemovePartitioning,
		ActionAddVectorIndex,
	},
	UnmanagementDDL: {
		ActionCreatePlacementPolicy,
		ActionAlterPlacementPolicy,
		ActionDropPlacementPolicy,
		ActionCreateResourceGroup,
		ActionAlterResourceGroup,
		ActionDropResourceGroup,
	},
	UnknownDDL: {
		_DEPRECATEDActionAlterTableAlterPartition,
	},
}

BDRActionMap is the map of DDLBDRType to ActionType (reversed from ActionBDRMap).

View Source
var ExtraHandleName = model.NewCIStr("_tidb_rowid")

ExtraHandleName is the name of ExtraHandle Column.

View Source
var ExtraPhysTblIDName = model.NewCIStr("_tidb_tid")

ExtraPhysTblIDName is the name of ExtraPhysTblID Column.

IndexableDistanceMetricToFnName maps a distance metric to the distance function name.

IndexableFnNameToDistanceMetric maps a distance function name to the distance metric. Only indexable distance functions should be listed here!

Functions

func FillRollBackArgsForAddColumn

func FillRollBackArgsForAddColumn(job *Job, args *TableColumnArgs)

FillRollBackArgsForAddColumn fills the args for rollback add column ddl.

func FillRollbackArgsForAddPartition

func FillRollbackArgsForAddPartition(job *Job, args *TablePartitionArgs)

FillRollbackArgsForAddPartition fills the rollback args for add partition job. see details in TablePartitionArgs.

func IsIndexPrefixCovered

func IsIndexPrefixCovered(tbInfo *TableInfo, index *IndexInfo, cols ...model.CIStr) bool

IsIndexPrefixCovered checks the index's columns beginning with the cols.

func LessDBInfo

func LessDBInfo(a *DBInfo, b *DBInfo) int

LessDBInfo is used for sorting DBInfo by DBInfo.Name.

func SetJobVerInUse

func SetJobVerInUse(ver JobVersion)

SetJobVerInUse sets the version of DDL job used in the node.

func TSConvert2Time

func TSConvert2Time(ts uint64) time.Time

TSConvert2Time converts timestamp to time.

Types

type ActionType

type ActionType byte

ActionType is the type for DDL action.

const (
	ActionNone                          ActionType = 0
	ActionCreateSchema                  ActionType = 1
	ActionDropSchema                    ActionType = 2
	ActionCreateTable                   ActionType = 3
	ActionDropTable                     ActionType = 4
	ActionAddColumn                     ActionType = 5
	ActionDropColumn                    ActionType = 6
	ActionAddIndex                      ActionType = 7
	ActionDropIndex                     ActionType = 8
	ActionAddForeignKey                 ActionType = 9
	ActionDropForeignKey                ActionType = 10
	ActionTruncateTable                 ActionType = 11
	ActionModifyColumn                  ActionType = 12
	ActionRebaseAutoID                  ActionType = 13
	ActionRenameTable                   ActionType = 14
	ActionSetDefaultValue               ActionType = 15
	ActionShardRowID                    ActionType = 16
	ActionModifyTableComment            ActionType = 17
	ActionRenameIndex                   ActionType = 18
	ActionAddTablePartition             ActionType = 19
	ActionDropTablePartition            ActionType = 20
	ActionCreateView                    ActionType = 21
	ActionModifyTableCharsetAndCollate  ActionType = 22
	ActionTruncateTablePartition        ActionType = 23
	ActionDropView                      ActionType = 24
	ActionRecoverTable                  ActionType = 25
	ActionModifySchemaCharsetAndCollate ActionType = 26
	ActionLockTable                     ActionType = 27
	ActionUnlockTable                   ActionType = 28
	ActionRepairTable                   ActionType = 29
	ActionSetTiFlashReplica             ActionType = 30
	ActionUpdateTiFlashReplicaStatus    ActionType = 31
	ActionAddPrimaryKey                 ActionType = 32
	ActionDropPrimaryKey                ActionType = 33
	ActionCreateSequence                ActionType = 34
	ActionAlterSequence                 ActionType = 35
	ActionDropSequence                  ActionType = 36
	ActionAddColumns                    ActionType = 37 // Deprecated, we use ActionMultiSchemaChange instead.
	ActionDropColumns                   ActionType = 38 // Deprecated, we use ActionMultiSchemaChange instead.
	ActionModifyTableAutoIDCache        ActionType = 39
	ActionRebaseAutoRandomBase          ActionType = 40
	ActionAlterIndexVisibility          ActionType = 41
	ActionExchangeTablePartition        ActionType = 42
	ActionAddCheckConstraint            ActionType = 43
	ActionDropCheckConstraint           ActionType = 44
	ActionAlterCheckConstraint          ActionType = 45

	ActionRenameTables ActionType = 47

	ActionAlterTableAttributes          ActionType = 49
	ActionAlterTablePartitionAttributes ActionType = 50
	ActionCreatePlacementPolicy         ActionType = 51
	ActionAlterPlacementPolicy          ActionType = 52
	ActionDropPlacementPolicy           ActionType = 53
	ActionAlterTablePartitionPlacement  ActionType = 54
	ActionModifySchemaDefaultPlacement  ActionType = 55
	ActionAlterTablePlacement           ActionType = 56
	ActionAlterCacheTable               ActionType = 57
	// not used
	ActionAlterTableStatsOptions ActionType = 58
	ActionAlterNoCacheTable      ActionType = 59
	ActionCreateTables           ActionType = 60
	ActionMultiSchemaChange      ActionType = 61
	ActionFlashbackCluster       ActionType = 62
	ActionRecoverSchema          ActionType = 63
	ActionReorganizePartition    ActionType = 64
	ActionAlterTTLInfo           ActionType = 65
	ActionAlterTTLRemove         ActionType = 67
	ActionCreateResourceGroup    ActionType = 68
	ActionAlterResourceGroup     ActionType = 69
	ActionDropResourceGroup      ActionType = 70
	ActionAlterTablePartitioning ActionType = 71
	ActionRemovePartitioning     ActionType = 72
	ActionAddVectorIndex         ActionType = 73
)

List DDL actions.

func (ActionType) String

func (action ActionType) String() string

String return current ddl action in string

type AddCheckConstraintArgs

type AddCheckConstraintArgs struct {
	Constraint *ConstraintInfo `json:"constraint_info"`
}

AddCheckConstraintArgs is the args for add check constraint

func GetAddCheckConstraintArgs

func GetAddCheckConstraintArgs(job *Job) (*AddCheckConstraintArgs, error)

GetAddCheckConstraintArgs gets the AddCheckConstraint args.

type AddForeignKeyArgs

type AddForeignKeyArgs struct {
	FkInfo  *FKInfo `json:"fk_info,omitempty"`
	FkCheck bool    `json:"fk_check,omitempty"`
}

AddForeignKeyArgs is the arguments for ActionAddForeignKey ddl.

func GetAddForeignKeyArgs

func GetAddForeignKeyArgs(job *Job) (*AddForeignKeyArgs, error)

GetAddForeignKeyArgs get the args for AddForeignKey ddl.

type AddForeignKeyInfo

type AddForeignKeyInfo struct {
	Name model.CIStr
	Cols []model.CIStr
}

AddForeignKeyInfo contains foreign key information.

type AdminCommandOperator

type AdminCommandOperator int

AdminCommandOperator indicates where the Cancel/Pause/Resume command on DDL jobs comes from.

const (
	// AdminCommandByNotKnown indicates that unknow calling of the
	// Cancel/Pause/Resume on DDL job.
	AdminCommandByNotKnown AdminCommandOperator = iota
	// AdminCommandByEndUser indicates that the Cancel/Pause/Resume command on
	// DDL job is issued by the end user.
	AdminCommandByEndUser
	// AdminCommandBySystem indicates that the Cancel/Pause/Resume command on
	// DDL job is issued by TiDB itself, such as Upgrade(bootstrap).
	AdminCommandBySystem
)

func (*AdminCommandOperator) String

func (a *AdminCommandOperator) String() string

String implements fmt.Stringer interface.

type AffectedOption

type AffectedOption struct {
	SchemaID    int64 `json:"schema_id"`
	TableID     int64 `json:"table_id"`
	OldTableID  int64 `json:"old_table_id"`
	OldSchemaID int64 `json:"old_schema_id"`
}

AffectedOption is used when a ddl affects multi tables.

type AlterIndexVisibilityArgs

type AlterIndexVisibilityArgs struct {
	IndexName pmodel.CIStr `json:"index_name,omitempty"`
	Invisible bool         `json:"invisible,omitempty"`
}

AlterIndexVisibilityArgs is the arguments for ActionAlterIndexVisibility ddl.

func GetAlterIndexVisibilityArgs

func GetAlterIndexVisibilityArgs(job *Job) (*AlterIndexVisibilityArgs, error)

GetAlterIndexVisibilityArgs gets the args for AlterIndexVisibility ddl.

type AlterSequenceArgs

type AlterSequenceArgs struct {
	Ident      ast.Ident             `json:"ident,omitempty"`
	SeqOptions []*ast.SequenceOption `json:"seq_options,omitempty"`
}

AlterSequenceArgs is the arguments for alter sequence ddl job.

func GetAlterSequenceArgs

func GetAlterSequenceArgs(job *Job) (*AlterSequenceArgs, error)

GetAlterSequenceArgs gets the args for alter Sequence ddl job.

type AlterTTLInfoArgs

type AlterTTLInfoArgs struct {
	TTLInfo            *TTLInfo `json:"ttl_info,omitempty"`
	TTLEnable          *bool    `json:"ttl_enable,omitempty"`
	TTLCronJobSchedule *string  `json:"ttl_cron_job_schedule,omitempty"`
}

AlterTTLInfoArgs is the arguments for alter ttl info job.

func GetAlterTTLInfoArgs

func GetAlterTTLInfoArgs(job *Job) (*AlterTTLInfoArgs, error)

GetAlterTTLInfoArgs gets the args for alter ttl info job.

type AlterTableAttributesArgs

type AlterTableAttributesArgs struct {
	LabelRule *pdhttp.LabelRule `json:"label_rule,omitempty"`
}

AlterTableAttributesArgs is the argument for alter table attributes

func GetAlterTableAttributesArgs

func GetAlterTableAttributesArgs(job *Job) (*AlterTableAttributesArgs, error)

GetAlterTableAttributesArgs get alter table attribute args from job.

type AlterTablePartitionArgs

type AlterTablePartitionArgs struct {
	PartitionID   int64             `json:"partition_id,omitempty"`
	LabelRule     *pdhttp.LabelRule `json:"label_rule,omitempty"`
	PolicyRefInfo *PolicyRefInfo    `json:"policy_ref_info,omitempty"`
}

AlterTablePartitionArgs is the arguments for alter table partition job. it's used for:

  • ActionAlterTablePartitionAttributes
  • ActionAlterTablePartitionPlacement

func GetAlterTablePartitionArgs

func GetAlterTablePartitionArgs(job *Job) (*AlterTablePartitionArgs, error)

GetAlterTablePartitionArgs gets the alter table partition args.

type AlterTablePlacementArgs

type AlterTablePlacementArgs struct {
	PlacementPolicyRef *PolicyRefInfo `json:"placement_policy_ref,omitempty"`
}

AlterTablePlacementArgs is the arguments for alter table placements ddl job.

func GetAlterTablePlacementArgs

func GetAlterTablePlacementArgs(job *Job) (*AlterTablePlacementArgs, error)

GetAlterTablePlacementArgs gets the args for alter table placements ddl job.

type AutoIDGroup

type AutoIDGroup struct {
	RowID       int64
	IncrementID int64
	RandomID    int64
}

AutoIDGroup represents a group of auto IDs of a specific table.

type BackfillMeta

type BackfillMeta struct {
	IsUnique   bool          `json:"is_unique"`
	EndInclude bool          `json:"end_include"`
	Error      *terror.Error `json:"err"`

	SQLMode       mysql.SQLMode                    `json:"sql_mode"`
	Warnings      map[errors.ErrorID]*terror.Error `json:"warnings"`
	WarningsCount map[errors.ErrorID]int64         `json:"warnings_count"`
	Location      *TimeZoneLocation                `json:"location"`
	ReorgTp       ReorgType                        `json:"reorg_tp"`
	RowCount      int64                            `json:"row_count"`
	StartKey      []byte                           `json:"start_key"`
	EndKey        []byte                           `json:"end_key"`
	CurrKey       []byte                           `json:"curr_key"`
	*JobMeta      `json:"job_meta"`
}

BackfillMeta is meta info of the backfill job.

func (*BackfillMeta) Decode

func (bm *BackfillMeta) Decode(b []byte) error

Decode decodes BackfillMeta from the json buffer.

func (*BackfillMeta) Encode

func (bm *BackfillMeta) Encode() ([]byte, error)

Encode encodes BackfillMeta with json format.

type BackfillState

type BackfillState byte

BackfillState is the state used by the backfill-merge process.

const (
	// BackfillStateInapplicable means the backfill-merge process is not used.
	BackfillStateInapplicable BackfillState = iota
	// BackfillStateRunning is the state that the backfill process is running.
	// In this state, the index's write and delete operations are redirected to a temporary index.
	BackfillStateRunning
	// BackfillStateReadyToMerge is the state that the temporary index's records are ready to be merged back
	// to the origin index.
	// In this state, the index's write and delete operations are copied to a temporary index.
	// This state is used to make sure that all the TiDB instances are aware of the copy
	// during the merge(BackfillStateMerging).
	BackfillStateReadyToMerge
	// BackfillStateMerging is the state that the temp index is merging back to the origin index.
	// In this state, the index's write and delete operations are copied to a temporary index.
	BackfillStateMerging
)

func (BackfillState) String

func (s BackfillState) String() string

String implements fmt.Stringer interface.

type BatchCreateTableArgs

type BatchCreateTableArgs struct {
	Tables []*CreateTableArgs `json:"tables,omitempty"`
}

BatchCreateTableArgs is the arguments for batch create table job.

func GetBatchCreateTableArgs

func GetBatchCreateTableArgs(job *Job) (*BatchCreateTableArgs, error)

GetBatchCreateTableArgs gets the batch create-table args.

type ChangeStateInfo

type ChangeStateInfo struct {
	// DependencyColumnOffset is the changing column offset that the current column depends on when executing modify/change column.
	DependencyColumnOffset int `json:"relative_col_offset"`
}

ChangeStateInfo is used for recording the information of schema changing.

type CheckConstraintArgs

type CheckConstraintArgs struct {
	ConstraintName pmodel.CIStr `json:"constraint_name,omitempty"`
	Enforced       bool         `json:"enforced,omitempty"`
}

CheckConstraintArgs is the arguments for both AlterCheckConstraint and DropCheckConstraint job.

func GetCheckConstraintArgs

func GetCheckConstraintArgs(job *Job) (*CheckConstraintArgs, error)

GetCheckConstraintArgs gets the AlterCheckConstraint args.

type ColumnInfo

type ColumnInfo struct {
	ID                    int64       `json:"id"`
	Name                  model.CIStr `json:"name"`
	Offset                int         `json:"offset"`
	OriginDefaultValue    any         `json:"origin_default"`
	OriginDefaultValueBit []byte      `json:"origin_default_bit"`
	DefaultValue          any         `json:"default"`
	DefaultValueBit       []byte      `json:"default_bit"`
	// DefaultIsExpr is indicates the default value string is expr.
	DefaultIsExpr       bool                `json:"default_is_expr"`
	GeneratedExprString string              `json:"generated_expr_string"`
	GeneratedStored     bool                `json:"generated_stored"`
	Dependences         map[string]struct{} `json:"dependences"`
	FieldType           types.FieldType     `json:"type"`
	State               SchemaState         `json:"state"`
	Comment             string              `json:"comment"`
	// A hidden column is used internally(expression index) and are not accessible by users.
	Hidden           bool `json:"hidden"`
	*ChangeStateInfo `json:"change_state_info"`
	// Version means the version of the column info.
	// Version = 0: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in system time zone.
	//              That is a bug if multiple TiDB servers in different system time zone.
	// Version = 1: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in UTC time zone.
	//              This will fix bug in version 0. For compatibility with version 0, we add version field in column info struct.
	Version uint64 `json:"version"`
}

ColumnInfo provides meta data describing of a table column.

func FindColumnInfo

func FindColumnInfo(cols []*ColumnInfo, name string) *ColumnInfo

FindColumnInfo finds ColumnInfo in cols by name.

func FindColumnInfoByID

func FindColumnInfoByID(cols []*ColumnInfo, id int64) *ColumnInfo

FindColumnInfoByID finds ColumnInfo in cols by id.

func NewExtraHandleColInfo

func NewExtraHandleColInfo() *ColumnInfo

NewExtraHandleColInfo mocks a column info for extra handle column.

func NewExtraPhysTblIDColInfo

func NewExtraPhysTblIDColInfo() *ColumnInfo

NewExtraPhysTblIDColInfo mocks a column info for extra partition id column.

func (*ColumnInfo) AddFlag

func (c *ColumnInfo) AddFlag(flag uint)

AddFlag adds the flag of ColumnInfo.

func (*ColumnInfo) AndFlag

func (c *ColumnInfo) AndFlag(flag uint)

AndFlag adds a flag to the column.

func (*ColumnInfo) Clone

func (c *ColumnInfo) Clone() *ColumnInfo

Clone clones ColumnInfo.

func (*ColumnInfo) DelFlag

func (c *ColumnInfo) DelFlag(flag uint)

DelFlag removes the flag from the column's flag.

func (*ColumnInfo) GetCharset

func (c *ColumnInfo) GetCharset() string

GetCharset returns the charset of ColumnInfo.

func (*ColumnInfo) GetCollate

func (c *ColumnInfo) GetCollate() string

GetCollate returns the collation of ColumnInfo.

func (*ColumnInfo) GetDecimal

func (c *ColumnInfo) GetDecimal() int

GetDecimal returns the decimal of ColumnInfo.

func (*ColumnInfo) GetDefaultValue

func (c *ColumnInfo) GetDefaultValue() any

GetDefaultValue gets the default value of the column. Default value use to stored in DefaultValue field, but now, bit type default value will store in DefaultValueBit for fix bit default value decode/encode bug.

func (*ColumnInfo) GetElems

func (c *ColumnInfo) GetElems() []string

GetElems returns the elems of ColumnInfo.

func (*ColumnInfo) GetFlag

func (c *ColumnInfo) GetFlag() uint

GetFlag returns the flag of ColumnInfo.

func (*ColumnInfo) GetFlen

func (c *ColumnInfo) GetFlen() int

GetFlen returns the flen of ColumnInfo.

func (*ColumnInfo) GetOriginDefaultValue

func (c *ColumnInfo) GetOriginDefaultValue() any

GetOriginDefaultValue gets the origin default value.

func (*ColumnInfo) GetType

func (c *ColumnInfo) GetType() byte

GetType returns the type of ColumnInfo.

func (*ColumnInfo) GetTypeDesc

func (c *ColumnInfo) GetTypeDesc() string

GetTypeDesc gets the description for column type.

func (*ColumnInfo) IsGenerated

func (c *ColumnInfo) IsGenerated() bool

IsGenerated returns true if the column is generated column.

func (*ColumnInfo) IsVirtualGenerated

func (c *ColumnInfo) IsVirtualGenerated() bool

IsVirtualGenerated checks the column if it is virtual.

func (*ColumnInfo) SetCharset

func (c *ColumnInfo) SetCharset(charset string)

SetCharset sets charset of the ColumnInfo

func (*ColumnInfo) SetCollate

func (c *ColumnInfo) SetCollate(collate string)

SetCollate sets the collation of the column.

func (*ColumnInfo) SetDecimal

func (c *ColumnInfo) SetDecimal(decimal int)

SetDecimal sets the decimal of ColumnInfo.

func (*ColumnInfo) SetDefaultValue

func (c *ColumnInfo) SetDefaultValue(value any) error

SetDefaultValue sets the default value.

func (*ColumnInfo) SetElems

func (c *ColumnInfo) SetElems(elems []string)

SetElems set the elements of enum column.

func (*ColumnInfo) SetFlag

func (c *ColumnInfo) SetFlag(flag uint)

SetFlag set the flag of ColumnInfo.

func (*ColumnInfo) SetFlen

func (c *ColumnInfo) SetFlen(flen int)

SetFlen sets the flen of ColumnInfo.

func (*ColumnInfo) SetOriginDefaultValue

func (c *ColumnInfo) SetOriginDefaultValue(value any) error

SetOriginDefaultValue sets the origin default value. For mysql.TypeBit type, the default value storage format must be a string. Other value such as int must convert to string format first. The mysql.TypeBit type supports the null default value.

func (*ColumnInfo) SetType

func (c *ColumnInfo) SetType(tp byte)

SetType set the type of ColumnInfo.

func (*ColumnInfo) ToggleFlag

func (c *ColumnInfo) ToggleFlag(flag uint)

ToggleFlag flips the flag according to the value.

type ConstraintInfo

type ConstraintInfo struct {
	ID             int64         `json:"id"`
	Name           model.CIStr   `json:"constraint_name"`
	Table          model.CIStr   `json:"tbl_name"`        // Table name.
	ConstraintCols []model.CIStr `json:"constraint_cols"` // Depended column names.
	Enforced       bool          `json:"enforced"`
	InColumn       bool          `json:"in_column"` // Indicate whether the constraint is column type check.
	ExprString     string        `json:"expr_string"`
	State          SchemaState   `json:"state"`
}

ConstraintInfo provides meta data describing check-expression constraint.

func (*ConstraintInfo) Clone

func (ci *ConstraintInfo) Clone() *ConstraintInfo

Clone clones ConstraintInfo.

type CreateSchemaArgs

type CreateSchemaArgs struct {
	DBInfo *DBInfo `json:"db_info,omitempty"`
}

CreateSchemaArgs is the arguments for create schema job.

func GetCreateSchemaArgs

func GetCreateSchemaArgs(job *Job) (*CreateSchemaArgs, error)

GetCreateSchemaArgs gets the args for create schema job.

type CreateTableArgs

type CreateTableArgs struct {
	TableInfo *TableInfo `json:"table_info,omitempty"`
	// below 2 are used for create view.
	OnExistReplace bool  `json:"on_exist_replace,omitempty"`
	OldViewTblID   int64 `json:"old_view_tbl_id,omitempty"`
	// used for create table.
	FKCheck bool `json:"fk_check,omitempty"`
}

CreateTableArgs is the arguments for create table/view/sequence job.

func GetCreateTableArgs

func GetCreateTableArgs(job *Job) (*CreateTableArgs, error)

GetCreateTableArgs gets the create-table args.

type DBInfo

type DBInfo struct {
	ID         int64       `json:"id"`      // Database ID
	Name       model.CIStr `json:"db_name"` // DB name.
	Charset    string      `json:"charset"`
	Collate    string      `json:"collate"`
	Deprecated struct {
		Tables []*TableInfo `json:"-"` // Tables in the DB.
	}
	State              SchemaState      `json:"state"`
	PlacementPolicyRef *PolicyRefInfo   `json:"policy_ref_info"`
	TableName2ID       map[string]int64 `json:"-"`
}

DBInfo provides meta data describing a DB.

func (*DBInfo) Clone

func (db *DBInfo) Clone() *DBInfo

Clone clones DBInfo.

func (*DBInfo) Copy

func (db *DBInfo) Copy() *DBInfo

Copy shallow copies DBInfo.

type DDLBDRType

type DDLBDRType string

DDLBDRType is the type for DDL when BDR enable.

const (
	// UnsafeDDL means the DDL can't be executed by user when cluster is Primary/Secondary.
	UnsafeDDL DDLBDRType = "unsafe DDL"
	// SafeDDL means the DDL can be executed by user when cluster is Primary.
	SafeDDL DDLBDRType = "safe DDL"
	// UnmanagementDDL means the DDL can't be synced by CDC.
	UnmanagementDDL DDLBDRType = "unmanagement DDL"
	// UnknownDDL means the DDL is unknown.
	UnknownDDL DDLBDRType = "unknown DDL"
)

type DDLReorgMeta

type DDLReorgMeta struct {
	SQLMode           mysql.SQLMode                    `json:"sql_mode"`
	Warnings          map[errors.ErrorID]*terror.Error `json:"warnings"`
	WarningsCount     map[errors.ErrorID]int64         `json:"warnings_count"`
	Location          *TimeZoneLocation                `json:"location"`
	ReorgTp           ReorgType                        `json:"reorg_tp"`
	IsFastReorg       bool                             `json:"is_fast_reorg"`
	IsDistReorg       bool                             `json:"is_dist_reorg"`
	UseCloudStorage   bool                             `json:"use_cloud_storage"`
	ResourceGroupName string                           `json:"resource_group_name"`
	Version           int64                            `json:"version"`
	TargetScope       string                           `json:"target_scope"`
	// These two variables are used to control the concurrency and batch size of the reorganization process.
	// They can be adjusted dynamically through `admin alter ddl jobs` command.
	// Note: Don't get or set these two variables directly, use the functions instead.
	Concurrency   atomic.Int64 `json:"concurrency"`
	BatchSize     atomic.Int64 `json:"batch_size"`
	MaxWriteSpeed atomic.Int64 `json:"max_write_speed"`
}

DDLReorgMeta is meta info of DDL reorganization.

func (*DDLReorgMeta) GetBatchSizeOrDefault

func (dm *DDLReorgMeta) GetBatchSizeOrDefault(defaultVal int) int

GetBatchSizeOrDefault gets the batch size from DDLReorgMeta.

func (*DDLReorgMeta) GetConcurrencyOrDefault

func (dm *DDLReorgMeta) GetConcurrencyOrDefault(defaultVal int) int

GetConcurrencyOrDefault gets the concurrency from DDLReorgMeta, pass the default value in case of the reorg meta coming from old cluster and Concurrency is 0.

func (*DDLReorgMeta) GetMaxWriteSpeedOrDefault

func (dm *DDLReorgMeta) GetMaxWriteSpeedOrDefault() int

GetMaxWriteSpeedOrDefault gets the max write speed from DDLReorgMeta. 0 means no limit.

func (*DDLReorgMeta) SetBatchSize

func (dm *DDLReorgMeta) SetBatchSize(batchSize int)

SetBatchSize sets the batch size in DDLReorgMeta.

func (*DDLReorgMeta) SetConcurrency

func (dm *DDLReorgMeta) SetConcurrency(concurrency int)

SetConcurrency sets the concurrency in DDLReorgMeta.

func (*DDLReorgMeta) SetMaxWriteSpeed

func (dm *DDLReorgMeta) SetMaxWriteSpeed(maxWriteSpeed int)

SetMaxWriteSpeed sets the max write speed in DDLReorgMeta.

type DistanceMetric

type DistanceMetric string

DistanceMetric is the distance metric used by the vector index. Note that not all distance functions are indexable. See FnNameToDistanceMetric for a list of indexable distance functions.

const (
	DistanceMetricL2 DistanceMetric = "L2"
	// DistanceMetricCosine is cosine distance.
	DistanceMetricCosine DistanceMetric = "COSINE"
	// DistanceMetricInnerProduct is inner product.
	// Currently this distance metric is not supported. It is placed here only for
	// reminding what's the desired naming convension (UPPER_UNDER_SCORE) if this
	// is going to be implemented.
	DistanceMetricInnerProduct DistanceMetric = "INNER_PRODUCT"
)

Note: tipb.VectorDistanceMetric's enum names must be aligned with these constant values.

type DropForeignKeyArgs

type DropForeignKeyArgs struct {
	FkName pmodel.CIStr `json:"fk_name,omitempty"`
}

DropForeignKeyArgs is the arguments for DropForeignKey ddl.

func GetDropForeignKeyArgs

func GetDropForeignKeyArgs(job *Job) (*DropForeignKeyArgs, error)

GetDropForeignKeyArgs gets the args for DropForeignKey ddl.

type DropSchemaArgs

type DropSchemaArgs struct {
	// this is the args for job submission, it's invalid if the job is finished.
	FKCheck bool `json:"fk_check,omitempty"`
	// this is the args for finished job. this list include all partition IDs too.
	AllDroppedTableIDs []int64 `json:"all_dropped_table_ids,omitempty"`
}

DropSchemaArgs is the arguments for drop schema job.

func GetDropSchemaArgs

func GetDropSchemaArgs(job *Job) (*DropSchemaArgs, error)

GetDropSchemaArgs gets the args for drop schema job.

func GetFinishedDropSchemaArgs

func GetFinishedDropSchemaArgs(job *Job) (*DropSchemaArgs, error)

GetFinishedDropSchemaArgs gets the args for drop schema job after the job is finished.

type DropTableArgs

type DropTableArgs struct {
	// below fields are only for drop table.
	// when dropping multiple tables, the Identifiers is the same, but each drop-table
	// runs in a separate job.
	Identifiers []ast.Ident `json:"identifiers,omitempty"`
	FKCheck     bool        `json:"fk_check,omitempty"`

	// below fields are finished job args
	StartKey        []byte   `json:"start_key,omitempty"`
	OldPartitionIDs []int64  `json:"old_partition_ids,omitempty"`
	OldRuleIDs      []string `json:"old_rule_ids,omitempty"`
}

DropTableArgs is the arguments for drop table/view/sequence job. when dropping multiple objects, each object will have a separate job

func GetDropTableArgs

func GetDropTableArgs(job *Job) (*DropTableArgs, error)

GetDropTableArgs gets the drop-table args.

func GetFinishedDropTableArgs

func GetFinishedDropTableArgs(job *Job) (*DropTableArgs, error)

GetFinishedDropTableArgs gets the drop-table args after the job is finished.

type EmptyArgs

type EmptyArgs struct{}

EmptyArgs is the args for ddl job with no args.

type ExchangePartitionInfo

type ExchangePartitionInfo struct {
	// It is nt tableID when table which has the info is a partition table, else pt tableID.
	ExchangePartitionTableID int64 `json:"exchange_partition_id"`
	ExchangePartitionDefID   int64 `json:"exchange_partition_def_id"`
	// Deprecated, not used
	XXXExchangePartitionFlag bool `json:"exchange_partition_flag"`
}

ExchangePartitionInfo provides exchange partition info.

type ExchangeTablePartitionArgs

type ExchangeTablePartitionArgs struct {
	PartitionID    int64  `json:"partition_id,omitempty"`
	PTSchemaID     int64  `json:"pt_schema_id,omitempty"`
	PTTableID      int64  `json:"pt_table_id,omitempty"`
	PartitionName  string `json:"partition_name,omitempty"`
	WithValidation bool   `json:"with_validation,omitempty"`
}

ExchangeTablePartitionArgs is the arguments for exchange table partition job. pt: the partition table to exchange nt: the non-partition table to exchange with

func GetExchangeTablePartitionArgs

func GetExchangeTablePartitionArgs(job *Job) (*ExchangeTablePartitionArgs, error)

GetExchangeTablePartitionArgs gets the exchange table partition args.

type FKInfo

type FKInfo struct {
	ID        int64         `json:"id"`
	Name      model.CIStr   `json:"fk_name"`
	RefSchema model.CIStr   `json:"ref_schema"`
	RefTable  model.CIStr   `json:"ref_table"`
	RefCols   []model.CIStr `json:"ref_cols"`
	Cols      []model.CIStr `json:"cols"`
	OnDelete  int           `json:"on_delete"`
	OnUpdate  int           `json:"on_update"`
	State     SchemaState   `json:"state"`
	Version   int           `json:"version"`
}

FKInfo provides meta data describing a foreign key constraint.

func FindFKInfoByName

func FindFKInfoByName(fks []*FKInfo, name string) *FKInfo

FindFKInfoByName finds FKInfo in fks by lowercase name.

func (*FKInfo) Clone

func (fk *FKInfo) Clone() *FKInfo

Clone clones FKInfo.

func (*FKInfo) String

func (fk *FKInfo) String(db, tb string) string

String returns the string representation of FKInfo.

type FinishedJobArgs

type FinishedJobArgs interface {
	JobArgs
	// contains filtered or unexported methods
}

FinishedJobArgs is the interface for finished job arguments. in most cases, job args are cleared out after the job is finished, but some jobs will write some args back to the job for other components.

type FlashbackClusterArgs

type FlashbackClusterArgs struct {
	FlashbackTS        uint64         `json:"flashback_ts,omitempty"`
	PDScheduleValue    map[string]any `json:"pd_schedule_value,omitempty"`
	EnableGC           bool           `json:"enable_gc,omitempty"`
	EnableAutoAnalyze  bool           `json:"enable_auto_analyze,omitempty"`
	EnableTTLJob       bool           `json:"enable_ttl_job,omitempty"`
	SuperReadOnly      bool           `json:"super_read_only,omitempty"`
	LockedRegionCnt    uint64         `json:"locked_region_cnt,omitempty"`
	StartTS            uint64         `json:"start_ts,omitempty"`
	CommitTS           uint64         `json:"commit_ts,omitempty"`
	FlashbackKeyRanges []KeyRange     `json:"key_ranges,omitempty"`
}

FlashbackClusterArgs is the argument for flashback cluster.

func GetFlashbackClusterArgs

func GetFlashbackClusterArgs(job *Job) (*FlashbackClusterArgs, error)

GetFlashbackClusterArgs get the flashback cluster argument from job.

type HistoryInfo

type HistoryInfo struct {
	SchemaVersion int64
	DBInfo        *DBInfo
	TableInfo     *TableInfo
	FinishedTS    uint64

	// MultipleTableInfos is like TableInfo but only for operations updating multiple tables.
	MultipleTableInfos []*TableInfo
}

HistoryInfo is used for binlog.

func (*HistoryInfo) AddDBInfo

func (h *HistoryInfo) AddDBInfo(schemaVer int64, dbInfo *DBInfo)

AddDBInfo adds schema version and schema information that are used for binlog. dbInfo is added in the following operations: create database, drop database.

func (*HistoryInfo) AddTableInfo

func (h *HistoryInfo) AddTableInfo(schemaVer int64, tblInfo *TableInfo)

AddTableInfo adds schema version and table information that are used for binlog. tblInfo is added except for the following operations: create database, drop database.

func (*HistoryInfo) Clean

func (h *HistoryInfo) Clean()

Clean cleans history information.

func (*HistoryInfo) SetTableInfos

func (h *HistoryInfo) SetTableInfos(schemaVer int64, tblInfos []*TableInfo)

SetTableInfos is like AddTableInfo, but will add multiple table infos to the binlog.

type IndexArg

type IndexArg struct {
	// Global is never used, we only use Global in IndexOption. Can be deprecated later.
	Global                  bool                          `json:"-"`
	Unique                  bool                          `json:"unique,omitempty"`
	IndexName               pmodel.CIStr                  `json:"index_name,omitempty"`
	IndexPartSpecifications []*ast.IndexPartSpecification `json:"index_part_specifications"`
	IndexOption             *ast.IndexOption              `json:"index_option,omitempty"`
	HiddenCols              []*ColumnInfo                 `json:"hidden_cols,omitempty"`

	// For vector index
	FuncExpr string `json:"func_expr,omitempty"`
	IsVector bool   `json:"is_vector,omitempty"`

	// For PK
	IsPK    bool          `json:"is_pk,omitempty"`
	SQLMode mysql.SQLMode `json:"sql_mode,omitempty"`

	// IfExist will be used in onDropIndex.
	IndexID  int64 `json:"index_id,omitempty"`
	IfExist  bool  `json:"if_exist,omitempty"`
	IsGlobal bool  `json:"is_global,omitempty"`
}

IndexArg is the argument for single add/drop/rename index operation. Different types of job use different fields. Below lists used fields for each type (listed in order of the layout in v1)

Adding NonPK: Unique, IndexName, IndexPartSpecifications, IndexOption, SQLMode, Warning(not stored, always nil), Global
Adding PK: Unique, IndexName, IndexPartSpecifications, IndexOptions, HiddelCols, Global
Adding vector index: IndexName, IndexPartSpecifications, IndexOption, FuncExpr
Drop index: IndexName, IfExist, IndexID
Rollback add index: IndexName, IfExist, IsVector
Rename index: IndexName

type IndexColumn

type IndexColumn struct {
	Name   model.CIStr `json:"name"`   // Index name
	Offset int         `json:"offset"` // Index offset
	// Length of prefix when using column prefix
	// for indexing;
	// UnspecifedLength if not using prefix indexing
	Length int `json:"length"`
}

IndexColumn provides index column info.

func FindIndexColumnByName

func FindIndexColumnByName(indexCols []*IndexColumn, nameL string) (int, *IndexColumn)

FindIndexColumnByName finds IndexColumn by name. When IndexColumn is not found, returns (-1, nil).

func (*IndexColumn) Clone

func (i *IndexColumn) Clone() *IndexColumn

Clone clones IndexColumn.

type IndexInfo

type IndexInfo struct {
	ID            int64            `json:"id"`
	Name          model.CIStr      `json:"idx_name"` // Index name.
	Table         model.CIStr      `json:"tbl_name"` // Table name.
	Columns       []*IndexColumn   `json:"idx_cols"` // Index columns.
	State         SchemaState      `json:"state"`
	BackfillState BackfillState    `json:"backfill_state"`
	Comment       string           `json:"comment"`      // Comment
	Tp            model.IndexType  `json:"index_type"`   // Index type: Btree, Hash, Rtree or HNSW
	Unique        bool             `json:"is_unique"`    // Whether the index is unique.
	Primary       bool             `json:"is_primary"`   // Whether the index is primary key.
	Invisible     bool             `json:"is_invisible"` // Whether the index is invisible.
	Global        bool             `json:"is_global"`    // Whether the index is global.
	MVIndex       bool             `json:"mv_index"`     // Whether the index is multivalued index.
	VectorInfo    *VectorIndexInfo `json:"vector_index"` // VectorInfo is the vector index information.
}

IndexInfo provides meta data describing a DB index. It corresponds to the statement `CREATE INDEX Name ON Table (Column);` See https://dev.mysql.com/doc/refman/5.7/en/create-index.html

func FindIndexByColumns

func FindIndexByColumns(tbInfo *TableInfo, indices []*IndexInfo, cols ...model.CIStr) *IndexInfo

FindIndexByColumns find IndexInfo in indices which is cover the specified columns.

func FindIndexInfoByID

func FindIndexInfoByID(indices []*IndexInfo, id int64) *IndexInfo

FindIndexInfoByID finds IndexInfo in indices by id.

func (*IndexInfo) Clone

func (index *IndexInfo) Clone() *IndexInfo

Clone clones IndexInfo.

func (*IndexInfo) Equals

func (index *IndexInfo) Equals(other any) bool

Equals implements HashEquals interface.

func (*IndexInfo) FindColumnByName

func (index *IndexInfo) FindColumnByName(nameL string) *IndexColumn

FindColumnByName finds the index column with the specified name.

func (*IndexInfo) HasColumnInIndexColumns

func (index *IndexInfo) HasColumnInIndexColumns(tblInfo *TableInfo, colID int64) bool

HasColumnInIndexColumns checks whether the index contains the column with the specified ID.

func (*IndexInfo) HasPrefixIndex

func (index *IndexInfo) HasPrefixIndex() bool

HasPrefixIndex returns whether any columns of this index uses prefix length.

func (*IndexInfo) Hash64

func (index *IndexInfo) Hash64(h base.Hasher)

Hash64 implement HashEquals interface.

func (*IndexInfo) IsPublic

func (index *IndexInfo) IsPublic() bool

IsPublic checks if the index state is public

type IndexOp

type IndexOp byte

IndexOp is used to identify arguemnt type, which is only used for v1 index args. TODO(joechenrh): remove this type after totally switched to v2

type InvolvingSchemaInfo

type InvolvingSchemaInfo struct {
	Database      string                  `json:"database,omitempty"`
	Table         string                  `json:"table,omitempty"`
	Policy        string                  `json:"policy,omitempty"`
	ResourceGroup string                  `json:"resource_group,omitempty"`
	Mode          InvolvingSchemaInfoMode `json:"mode,omitempty"`
}

InvolvingSchemaInfo returns the schema info involved in the job. The value should be stored in lower case. Only one type of the three member types (Database&Table, Policy, ResourceGroup) should only be set in a InvolvingSchemaInfo.

type InvolvingSchemaInfoMode

type InvolvingSchemaInfoMode int

InvolvingSchemaInfoMode is used by InvolvingSchemaInfo.Mode.

const (
	// ExclusiveInvolving is the default value to keep compatibility with old
	// versions.
	ExclusiveInvolving InvolvingSchemaInfoMode = iota
	SharedInvolving
)

ExclusiveInvolving and SharedInvolving are considered like the exclusive lock and shared lock when calculate DDL job dependencies. And we also implement the fair lock semantic which means if we have job A/B/C arrive in order, and job B (exclusive request object 0) is waiting for the running job A (shared request object 0), and job C (shared request object 0) arrives, job C should also be blocked until job B is finished although job A & C has no dependency.

type Job

type Job struct {
	ID   int64      `json:"id"`
	Type ActionType `json:"type"`
	// SchemaID means different for different job types:
	// - ExchangeTablePartition: db id of non-partitioned table
	SchemaID int64 `json:"schema_id"`
	// TableID means different for different job types:
	// - ExchangeTablePartition: non-partitioned table id
	TableID    int64         `json:"table_id"`
	SchemaName string        `json:"schema_name"`
	TableName  string        `json:"table_name"`
	State      JobState      `json:"state"`
	Warning    *terror.Error `json:"warning"`
	Error      *terror.Error `json:"err"`
	// ErrorCount will be increased, every time we meet an error when running job.
	ErrorCount int64 `json:"err_count"`
	// RowCount means the number of rows that are processed.
	RowCount int64      `json:"row_count"`
	Mu       sync.Mutex `json:"-"`

	// CtxVars are variables attached to the job. It is for internal usage.
	// E.g. passing arguments between functions by one single *Job pointer.
	// for ExchangeTablePartition, RenameTables, RenameTable, it's [slice-of-db-id, slice-of-table-id]
	CtxVars []any `json:"-"`

	// we use json raw message to delay parsing special args.
	// the args are cleared out unless Job.FillFinishedArgs is called.
	RawArgs json.RawMessage `json:"raw_args"`

	SchemaState SchemaState `json:"schema_state"`
	// SnapshotVer means snapshot version for this job.
	SnapshotVer uint64 `json:"snapshot_ver"`
	// RealStartTS uses timestamp allocated by TSO.
	// Now it's the TS when we actually start the job.
	RealStartTS uint64 `json:"real_start_ts"`
	// StartTS uses timestamp allocated by TSO.
	// Now it's the TS when we put the job to job table.
	StartTS uint64 `json:"start_ts"`
	// DependencyID is the largest job ID before current job and current job depends on.
	DependencyID int64 `json:"dependency_id"`
	// Query string of the ddl job.
	Query      string       `json:"query"`
	BinlogInfo *HistoryInfo `json:"binlog"`

	// Version indicates the DDL job version.
	Version JobVersion `json:"version"`

	// ReorgMeta is meta info of ddl reorganization.
	ReorgMeta *DDLReorgMeta `json:"reorg_meta"`

	// MultiSchemaInfo keeps some warning now for multi schema change.
	MultiSchemaInfo *MultiSchemaInfo `json:"multi_schema_info"`

	// Priority is only used to set the operation priority of adding indices.
	Priority int `json:"priority"`

	// SeqNum is used to identify the order of moving the job into DDL history, it's
	// not the order of the job execution. for jobs with dependency, or if they are
	// run in the same session, their SeqNum will be in increasing order.
	// when using fast create table, there might duplicate seq_num as any TiDB can
	// execute the DDL in this case.
	// since 8.3, we only honor previous semantic when DDL owner not changed, on
	// owner change, new owner will start it from 1. as previous semantic forces
	// 'moving jobs into DDL history' part to be serial, it hurts performance, and
	// has very limited usage scenario.
	SeqNum uint64 `json:"seq_num"`

	// Charset is the charset when the DDL Job is created.
	Charset string `json:"charset"`
	// Collate is the collation the DDL Job is created.
	Collate string `json:"collate"`

	// InvolvingSchemaInfo indicates the schema info involved in the job.
	// nil means fallback to use job.SchemaName/TableName.
	// Keep unchanged after initialization.
	InvolvingSchemaInfo []InvolvingSchemaInfo `json:"involving_schema_info,omitempty"`

	// AdminOperator indicates where the Admin command comes, by the TiDB
	// itself (AdminCommandBySystem) or by user (AdminCommandByEndUser).
	AdminOperator AdminCommandOperator `json:"admin_operator"`

	// TraceInfo indicates the information for SQL tracing
	TraceInfo *TraceInfo `json:"trace_info"`

	// BDRRole indicates the role of BDR cluster when executing this DDL.
	BDRRole string `json:"bdr_role"`

	// CDCWriteSource indicates the source of CDC write.
	CDCWriteSource uint64 `json:"cdc_write_source"`

	// LocalMode = true means the job is running on the local TiDB that the client
	// connects to, else it's run on the DDL owner.
	// Only happens when tidb_enable_fast_create_table = on
	// this field is useless since 8.3
	LocalMode bool `json:"local_mode"`

	// SQLMode for executing DDL query.
	SQLMode mysql.SQLMode `json:"sql_mode"`
	// contains filtered or unexported fields
}

Job is for a DDL operation.

func (*Job) ClearDecodedArgs

func (job *Job) ClearDecodedArgs()

ClearDecodedArgs clears the decoded args.

func (*Job) Clone

func (job *Job) Clone() *Job

Clone returns a copy of the job. Note: private args fields are not copied.

func (*Job) Decode

func (job *Job) Decode(b []byte) error

Decode decodes job from the json buffer, we must use decodeArgs later to decode special args for this job.

func (*Job) Encode

func (job *Job) Encode(updateRawArgs bool) ([]byte, error)

Encode encodes job with json format. updateRawArgs is used to determine whether to update the raw args.

func (*Job) FillArgs

func (job *Job) FillArgs(args JobArgs)

FillArgs fills args for new job.

func (*Job) FillFinishedArgs

func (job *Job) FillFinishedArgs(args FinishedJobArgs)

FillFinishedArgs fills args for finished job.

func (*Job) FinishDBJob

func (job *Job) FinishDBJob(jobState JobState, schemaState SchemaState, ver int64, dbInfo *DBInfo)

FinishDBJob is called when a job is finished. It updates the job's state information and adds dbInfo the binlog.

func (*Job) FinishMultipleTableJob

func (job *Job) FinishMultipleTableJob(jobState JobState, schemaState SchemaState, ver int64, tblInfos []*TableInfo)

FinishMultipleTableJob is called when a job is finished. It updates the job's state information and adds tblInfos to the binlog.

func (*Job) FinishTableJob

func (job *Job) FinishTableJob(jobState JobState, schemaState SchemaState, ver int64, tblInfo *TableInfo)

FinishTableJob is called when a job is finished. It updates the job's state information and adds tblInfo to the binlog.

func (*Job) GetInvolvingSchemaInfo

func (job *Job) GetInvolvingSchemaInfo() []InvolvingSchemaInfo

GetInvolvingSchemaInfo returns the schema info involved in the job.

func (*Job) GetRowCount

func (job *Job) GetRowCount() int64

GetRowCount gets the number of rows. Make sure it can pass `make race`.

func (*Job) GetWarnings

func (job *Job) GetWarnings() (map[errors.ErrorID]*terror.Error, map[errors.ErrorID]int64)

GetWarnings gets the warnings of the rows handled.

func (*Job) InFinalState

func (job *Job) InFinalState() bool

InFinalState returns whether the job is in a final state of job FSM. TODO JobStateRollbackDone is not a final state, maybe we should add a JobStateRollbackSynced state to diff between the entrance of JobStateRollbackDone and move the job to history where the job is in final state.

func (*Job) IsAlterable

func (job *Job) IsAlterable() bool

IsAlterable checks whether the job type can be altered.

func (*Job) IsCancelled

func (job *Job) IsCancelled() bool

IsCancelled returns whether the job is cancelled or not.

func (*Job) IsCancelling

func (job *Job) IsCancelling() bool

IsCancelling returns whether the job is cancelling or not.

func (*Job) IsDone

func (job *Job) IsDone() bool

IsDone returns whether job is done.

func (*Job) IsFinished

func (job *Job) IsFinished() bool

IsFinished returns whether job is finished or not. If the job state is Done or Cancelled, it is finished.

func (*Job) IsPausable

func (job *Job) IsPausable() bool

IsPausable checks whether we can pause the job.

func (*Job) IsPaused

func (job *Job) IsPaused() bool

IsPaused returns whether the job is paused.

func (*Job) IsPausedBySystem

func (job *Job) IsPausedBySystem() bool

IsPausedBySystem returns whether the job is paused by system.

func (*Job) IsPausing

func (job *Job) IsPausing() bool

IsPausing indicates whether the job is pausing.

func (*Job) IsQueueing

func (job *Job) IsQueueing() bool

IsQueueing returns whether job is queuing or not.

func (*Job) IsResumable

func (job *Job) IsResumable() bool

IsResumable checks whether the job can be rollback.

func (*Job) IsRollbackDone

func (job *Job) IsRollbackDone() bool

IsRollbackDone returns whether the job is rolled back or not.

func (*Job) IsRollbackable

func (job *Job) IsRollbackable() bool

IsRollbackable checks whether the job can be rollback. TODO(lance6716): should make sure it's the same as convertJob2RollbackJob

func (*Job) IsRollingback

func (job *Job) IsRollingback() bool

IsRollingback returns whether the job is rolling back or not.

func (*Job) IsRunning

func (job *Job) IsRunning() bool

IsRunning returns whether job is still running or not.

func (*Job) IsSynced

func (job *Job) IsSynced() bool

IsSynced returns whether the DDL modification is synced among all TiDB servers.

func (*Job) MarkNonRevertible

func (job *Job) MarkNonRevertible()

MarkNonRevertible mark the current job to be non-revertible. It means the job cannot be cancelled or rollbacked.

func (*Job) MayNeedReorg

func (job *Job) MayNeedReorg() bool

MayNeedReorg indicates that this job may need to reorganize the data.

func (*Job) NotStarted

func (job *Job) NotStarted() bool

NotStarted returns true if the job is never run by a worker.

func (*Job) SetRowCount

func (job *Job) SetRowCount(count int64)

SetRowCount sets the number of rows. Make sure it can pass `make race`.

func (*Job) SetWarnings

func (job *Job) SetWarnings(warnings map[errors.ErrorID]*terror.Error, warningsCount map[errors.ErrorID]int64)

SetWarnings sets the warnings of rows handled.

func (*Job) Started

func (job *Job) Started() bool

Started returns true if the job is started.

func (*Job) String

func (job *Job) String() string

String implements fmt.Stringer interface.

type JobArgs

type JobArgs interface {
	// contains filtered or unexported methods
}

JobArgs is the interface for job arguments.

type JobMeta

type JobMeta struct {
	SchemaID int64 `json:"schema_id"`
	TableID  int64 `json:"table_id"`
	// Type is the DDL job's type.
	Type ActionType `json:"job_type"`
	// Query is the DDL job's SQL string.
	Query string `json:"query"`
	// Priority is only used to set the operation priority of adding indices.
	Priority int `json:"priority"`
}

JobMeta is meta info of Job.

type JobState

type JobState int32

JobState is for job state.

const (
	JobStateNone    JobState = 0
	JobStateRunning JobState = 1
	// JobStateRollingback is the state to do the rolling back job.
	// When DDL encountered an unrecoverable error at reorganization state,
	// some keys has been added already, we need to remove them.
	JobStateRollingback  JobState = 2
	JobStateRollbackDone JobState = 3
	JobStateDone         JobState = 4
	// JobStateCancelled is the state to do the job is cancelled, this state only
	// persisted to history table and queue too.
	JobStateCancelled JobState = 5
	// JobStateSynced means the job is done and has been synchronized to all servers.
	// job of this state will not be written to the tidb_ddl_job table, when job
	// is in `done` state and version synchronized, the job will be deleted from
	// tidb_ddl_job table, and we insert a `synced` job to the history table and queue directly.
	JobStateSynced JobState = 6
	// JobStateCancelling is used to mark the DDL job is cancelled by the client, but
	// the DDL worker hasn't handled it.
	JobStateCancelling JobState = 7
	// JobStateQueueing means the job has not yet been started.
	JobStateQueueing JobState = 8

	JobStatePaused  JobState = 9
	JobStatePausing JobState = 10
)

List job states.

func StrToJobState

func StrToJobState(s string) JobState

StrToJobState converts string to JobState.

func (JobState) String

func (s JobState) String() string

String implements fmt.Stringer interface.

type JobVersion

type JobVersion int64

JobVersion is the version of DDL job.

const (
	// JobVersion1 is the first version of DDL job where job args are stored as un-typed
	// array. Before v8.4.0, all DDL jobs are in this version.
	JobVersion1 JobVersion = 1
	// JobVersion2 is the second version of DDL job where job args are stored as
	// typed structs, we start to use this version from v8.4.0.
	// Note: this version is not enabled right now except in some test cases, will
	// enable it after we have CI to run both versions.
	JobVersion2 JobVersion = 2
)

func GetJobVerInUse

func GetJobVerInUse() JobVersion

GetJobVerInUse returns the version of DDL job used in the node.

func (JobVersion) String

func (v JobVersion) String() string

String implements fmt.Stringer interface.

type KeyRange

type KeyRange struct {
	StartKey []byte
	EndKey   []byte
}

KeyRange is copied from kv.KeyRange to avoid cycle import. Unused fields are removed.

type LockTablesArgs

type LockTablesArgs struct {
	LockTables    []TableLockTpInfo `json:"lock_tables,omitempty"`
	IndexOfLock   int               `json:"index_of_lock,omitempty"`
	UnlockTables  []TableLockTpInfo `json:"unlock_tables,omitempty"`
	IndexOfUnlock int               `json:"index_of_unlock,omitempty"`
	SessionInfo   SessionInfo       `json:"session_info,omitempty"`
	IsCleanup     bool              `json:"is_cleanup:omitempty"`
}

LockTablesArgs is the argument for LockTables.

func GetLockTablesArgs

func GetLockTablesArgs(job *Job) (*LockTablesArgs, error)

GetLockTablesArgs get the LockTablesArgs argument.

type ModifyColumnArgs

type ModifyColumnArgs struct {
	Column           *ColumnInfo         `json:"column,omitempty"`
	OldColumnName    pmodel.CIStr        `json:"old_column_name,omitempty"`
	Position         *ast.ColumnPosition `json:"position,omitempty"`
	ModifyColumnType byte                `json:"modify_column_type,omitempty"`
	NewShardBits     uint64              `json:"new_shard_bits,omitempty"`
	// ChangingColumn is the temporary column derived from OldColumn
	ChangingColumn *ColumnInfo `json:"changing_column,omitempty"`
	// ChangingIdxs is only used in test, so don't persist it
	ChangingIdxs []*IndexInfo `json:"-"`
	// RedundantIdxs stores newly-created temp indexes which can be overwritten by other temp indexes.
	// These idxs will be added to finished args after job done.
	RedundantIdxs []int64 `json:"removed_idxs,omitempty"`

	// Finished args
	// IndexIDs stores index ids to be added to gc table.
	IndexIDs     []int64 `json:"index_ids,omitempty"`
	PartitionIDs []int64 `json:"partition_ids,omitempty"`
}

ModifyColumnArgs is the argument for modify column.

func GetFinishedModifyColumnArgs

func GetFinishedModifyColumnArgs(job *Job) (*ModifyColumnArgs, error)

GetFinishedModifyColumnArgs get the finished modify column argument from job.

func GetModifyColumnArgs

func GetModifyColumnArgs(job *Job) (*ModifyColumnArgs, error)

GetModifyColumnArgs get the modify column argument from job.

type ModifyIndexArgs

type ModifyIndexArgs struct {
	IndexArgs []*IndexArg `json:"index_args,omitempty"`

	// Belows is used for finished args.
	PartitionIDs []int64 `json:"partition_ids,omitempty"`

	// This is only used for getFinishedArgsV1 to distinguish different type of job in v1,
	// since they need different arguments layout.
	// TODO(joechenrh): remove this flag after totally switched to v2
	OpType IndexOp `json:"-"`
}

ModifyIndexArgs is the argument for add/drop/rename index jobs, which includes PK and vector index.

func GetDropIndexArgs

func GetDropIndexArgs(job *Job) (*ModifyIndexArgs, error)

GetDropIndexArgs is only used to get drop index arg. The logic is separated from ModifyIndexArgs.decodeV1. TODO(joechenrh): replace this function with GetModifyIndexArgs after totally switched to v2.

func GetFinishedModifyIndexArgs

func GetFinishedModifyIndexArgs(job *Job) (*ModifyIndexArgs, error)

GetFinishedModifyIndexArgs gets the add/drop index args.

func GetModifyIndexArgs

func GetModifyIndexArgs(job *Job) (*ModifyIndexArgs, error)

GetModifyIndexArgs gets the add/rename index args.

func (*ModifyIndexArgs) GetRenameIndexes

func (a *ModifyIndexArgs) GetRenameIndexes() (from, to pmodel.CIStr)

GetRenameIndexes get name of renamed index.

type ModifySchemaArgs

type ModifySchemaArgs struct {
	// below 2 are used for modify schema charset and collate.
	ToCharset string `json:"to_charset,omitempty"`
	ToCollate string `json:"to_collate,omitempty"`
	// used for modify schema placement policy.
	// might be nil, means set it to default.
	PolicyRef *PolicyRefInfo `json:"policy_ref,omitempty"`
}

ModifySchemaArgs is the arguments for modify schema job.

func GetModifySchemaArgs

func GetModifySchemaArgs(job *Job) (*ModifySchemaArgs, error)

GetModifySchemaArgs gets the modify schema args.

type ModifyTableAutoIDCacheArgs

type ModifyTableAutoIDCacheArgs struct {
	NewCache int64 `json:"new_cache,omitempty"`
}

ModifyTableAutoIDCacheArgs is the arguments for Modify Table AutoID Cache ddl job.

func GetModifyTableAutoIDCacheArgs

func GetModifyTableAutoIDCacheArgs(job *Job) (*ModifyTableAutoIDCacheArgs, error)

GetModifyTableAutoIDCacheArgs gets the args for modify table autoID cache ddl job.

type ModifyTableCharsetAndCollateArgs

type ModifyTableCharsetAndCollateArgs struct {
	ToCharset          string `json:"to_charset,omitempty"`
	ToCollate          string `json:"to_collate,omitempty"`
	NeedsOverwriteCols bool   `json:"needs_overwrite_cols,omitempty"`
}

ModifyTableCharsetAndCollateArgs is the arguments for ActionModifyTableCharsetAndCollate ddl.

func GetModifyTableCharsetAndCollateArgs

func GetModifyTableCharsetAndCollateArgs(job *Job) (*ModifyTableCharsetAndCollateArgs, error)

GetModifyTableCharsetAndCollateArgs gets the args for ActionModifyTableCharsetAndCollate ddl.

type ModifyTableCommentArgs

type ModifyTableCommentArgs struct {
	Comment string `json:"comment,omitempty"`
}

ModifyTableCommentArgs is the arguments for ActionModifyTableComment ddl.

func GetModifyTableCommentArgs

func GetModifyTableCommentArgs(job *Job) (*ModifyTableCommentArgs, error)

GetModifyTableCommentArgs gets the args for ActionModifyTableComment.

type MultiSchemaInfo

type MultiSchemaInfo struct {
	SubJobs    []*SubJob `json:"sub_jobs"`
	Revertible bool      `json:"revertible"`
	Seq        int32     `json:"seq"`

	// SkipVersion is used to control whether generating a new schema version for a sub-job.
	SkipVersion bool `json:"-"`

	AddColumns    []model.CIStr `json:"-"`
	DropColumns   []model.CIStr `json:"-"`
	ModifyColumns []model.CIStr `json:"-"`
	AddIndexes    []model.CIStr `json:"-"`
	DropIndexes   []model.CIStr `json:"-"`
	AlterIndexes  []model.CIStr `json:"-"`

	AddForeignKeys []AddForeignKeyInfo `json:"-"`

	RelativeColumns []model.CIStr `json:"-"`
	PositionColumns []model.CIStr `json:"-"`
}

MultiSchemaInfo keeps some information for multi schema change.

func NewMultiSchemaInfo

func NewMultiSchemaInfo() *MultiSchemaInfo

NewMultiSchemaInfo new a MultiSchemaInfo.

type PartitionDefinition

type PartitionDefinition struct {
	ID                 int64          `json:"id"`
	Name               model.CIStr    `json:"name"`
	LessThan           []string       `json:"less_than"`
	InValues           [][]string     `json:"in_values"`
	PlacementPolicyRef *PolicyRefInfo `json:"policy_ref_info"`
	Comment            string         `json:"comment,omitempty"`
}

PartitionDefinition defines a single partition.

func (*PartitionDefinition) Clone

Clone clones PartitionDefinition.

func (*PartitionDefinition) MemoryUsage

func (ci *PartitionDefinition) MemoryUsage() (sum int64)

MemoryUsage return the memory usage of PartitionDefinition

type PartitionInfo

type PartitionInfo struct {
	Type    model.PartitionType `json:"type"`
	Expr    string              `json:"expr"`
	Columns []model.CIStr       `json:"columns"`

	// User may already create table with partition but table partition is not
	// yet supported back then. When Enable is true, write/read need use tid
	// rather than pid.
	Enable bool `json:"enable"`

	// IsEmptyColumns is for syntax like `partition by key()`.
	// When IsEmptyColums is true, it will not display column name in `show create table` stmt.
	IsEmptyColumns bool `json:"is_empty_columns"`

	Definitions []PartitionDefinition `json:"definitions"`
	// AddingDefinitions is filled when adding partitions that is in the mid state.
	AddingDefinitions []PartitionDefinition `json:"adding_definitions"`
	// DroppingDefinitions is filled when dropping/truncating partitions that is in the mid state.
	DroppingDefinitions []PartitionDefinition `json:"dropping_definitions"`
	// NewPartitionIDs is filled when truncating partitions that is in the mid state.
	NewPartitionIDs []int64 `json:"new_partition_ids,omitempty"`
	// OriginalPartitionIDsOrder is only needed for rollback of Reorganize Partition for
	// LIST partitions, since in StateDeleteReorganize we don't know the old order any longer.
	OriginalPartitionIDsOrder []int64 `json:"original_partition_ids_order,omitempty"`

	States []PartitionState `json:"states"`
	Num    uint64           `json:"num"`
	// Indicate which DDL Action is currently on going
	DDLAction ActionType `json:"ddl_action,omitempty"`
	// Only used during ReorganizePartition so far
	DDLState SchemaState `json:"ddl_state"`
	// Set during ALTER TABLE ... if the table id needs to change
	// like if there is a global index or going between non-partitioned
	// and partitioned table, to make the data dropping / range delete
	// optimized.
	NewTableID int64 `json:"new_table_id,omitempty"`
	// Set during ALTER TABLE ... PARTITION BY ...
	// First as the new partition scheme, then in StateDeleteReorg as the old
	DDLType    model.PartitionType `json:"ddl_type,omitempty"`
	DDLExpr    string              `json:"ddl_expr,omitempty"`
	DDLColumns []model.CIStr       `json:"ddl_columns,omitempty"`
	// For ActionAlterTablePartitioning, UPDATE INDEXES
	DDLUpdateIndexes []UpdateIndexInfo `json:"ddl_update_indexes,omitempty"`
	// Simplified way to handle Global Index changes, instead of calculating
	// it every time, keep track of the changes here.
	// if index.ID exists in map, then it has changed, true for new copy,
	// false for old copy (to be removed).
	DDLChangedIndex map[int64]bool `json:"ddl_changed_index,omitempty"`
}

PartitionInfo provides table partition info.

func (*PartitionInfo) CanHaveOverlappingDroppingPartition

func (pi *PartitionInfo) CanHaveOverlappingDroppingPartition() bool

CanHaveOverlappingDroppingPartition returns true if special handling is needed during DDL of partitioned tables, where range or list with default partition can have overlapping partitions. Example: ... PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10), PARTITION p1 VALUES LESS THAN (20)) ALTER TABLE t DROP PARTITION p0; When p0 is gone, then p1 can have values < 10, so if p0 is visible for one session, while another session have dropped p0, a value '9' will then be in p1, instead of p0, i.e. an "overlapping" partition, that needs special handling. Same can happen for LIST partitioning, if there is a DEFAULT partition.

func (*PartitionInfo) ClearReorgIntermediateInfo

func (pi *PartitionInfo) ClearReorgIntermediateInfo()

ClearReorgIntermediateInfo remove intermediate information used during reorganize partition.

func (*PartitionInfo) Clone

func (pi *PartitionInfo) Clone() *PartitionInfo

Clone clones itself.

func (*PartitionInfo) FindPartitionDefinitionByName

func (pi *PartitionInfo) FindPartitionDefinitionByName(partitionDefinitionName string) int

FindPartitionDefinitionByName finds PartitionDefinition by name.

func (*PartitionInfo) GCPartitionStates

func (pi *PartitionInfo) GCPartitionStates()

GCPartitionStates cleans up the partition state.

func (*PartitionInfo) GetDefaultListPartition

func (pi *PartitionInfo) GetDefaultListPartition() int

GetDefaultListPartition return the index of Definitions that contains the LIST Default partition otherwise it returns -1

func (*PartitionInfo) GetNameByID

func (pi *PartitionInfo) GetNameByID(id int64) string

GetNameByID gets the partition name by ID. TODO: Remove the need for this function!

func (*PartitionInfo) GetOverlappingDroppingPartitionIdx

func (pi *PartitionInfo) GetOverlappingDroppingPartitionIdx(idx int) int

GetOverlappingDroppingPartitionIdx takes the index of Definitions and returns possible overlapping partition to use instead. Only used during DROP PARTITION! For RANGE, DROP PARTITION must be a consecutive range of partitions. For LIST, it only takes effect if there is default partition. returns same idx if no overlapping partition return -1 if the partition is being dropped, with no overlapping partition, like for last range partition dropped or no default list partition. See CanHaveOverlappingDroppingPartition() for more info about Overlapping dropping partition.

func (*PartitionInfo) GetPartitionIDByName

func (pi *PartitionInfo) GetPartitionIDByName(partitionDefinitionName string) int64

GetPartitionIDByName gets the partition ID by name.

func (*PartitionInfo) GetStateByID

func (pi *PartitionInfo) GetStateByID(id int64) SchemaState

GetStateByID gets the partition state by ID.

func (*PartitionInfo) IDsInDDLToIgnore

func (pi *PartitionInfo) IDsInDDLToIgnore() []int64

IDsInDDLToIgnore returns a list of IDs that the current session should not see (may be duplicate errors on insert/update though) For example during truncate or drop partition.

func (*PartitionInfo) IsDropping

func (pi *PartitionInfo) IsDropping(idx int) bool

IsDropping returns true if the partition is being dropped (i.e. in DroppingDefinitions)

func (*PartitionInfo) ReplaceWithOverlappingPartitionIdx

func (pi *PartitionInfo) ReplaceWithOverlappingPartitionIdx(idx int, err error) (int, error)

ReplaceWithOverlappingPartitionIdx returns the overlapping partition if there is one and a previous error. Functions based on locatePartitionCommon, like GetPartitionIdxByRow will return the found partition, with an error, since it is being dropped. This function will correct the partition index and error if it can. For example of Overlapping partition, see CanHaveOverlappingDroppingPartition This function should not be used for writing, since we should block writes to partitions that are being dropped. But for read, we should replace the dropping partitions with the overlapping partition if it exists, so we can read new data from sessions one step ahead in the DDL State.

func (*PartitionInfo) SetOriginalPartitionIDs

func (pi *PartitionInfo) SetOriginalPartitionIDs()

SetOriginalPartitionIDs sets the order of the original partition IDs in case it needs to be rolled back. LIST Partitioning would not know otherwise.

func (*PartitionInfo) SetStateByID

func (pi *PartitionInfo) SetStateByID(id int64, state SchemaState)

SetStateByID sets the state of the partition by ID.

type PartitionState

type PartitionState struct {
	ID    int64       `json:"id"`
	State SchemaState `json:"state"`
}

PartitionState is the state of the partition.

type PlacementPolicyArgs

type PlacementPolicyArgs struct {
	Policy         *PolicyInfo  `json:"policy,omitempty"`
	ReplaceOnExist bool         `json:"replace_on_exist,omitempty"`
	PolicyName     pmodel.CIStr `json:"policy_name,omitempty"`

	// it's set for alter/drop policy in v2
	PolicyID int64 `json:"policy_id"`
}

PlacementPolicyArgs is the argument for create/alter/drop placement policy

func GetPlacementPolicyArgs

func GetPlacementPolicyArgs(job *Job) (*PlacementPolicyArgs, error)

GetPlacementPolicyArgs gets the placement policy args.

type PlacementSettings

type PlacementSettings struct {
	PrimaryRegion       string `json:"primary_region"`
	Regions             string `json:"regions"`
	Learners            uint64 `json:"learners"`
	Followers           uint64 `json:"followers"`
	Voters              uint64 `json:"voters"`
	Schedule            string `json:"schedule"`
	Constraints         string `json:"constraints"`
	LeaderConstraints   string `json:"leader_constraints"`
	LearnerConstraints  string `json:"learner_constraints"`
	FollowerConstraints string `json:"follower_constraints"`
	VoterConstraints    string `json:"voter_constraints"`
	SurvivalPreferences string `json:"survival_preferences"`
}

PlacementSettings is the settings of the placement

func (*PlacementSettings) Clone

Clone clones the placement settings.

func (*PlacementSettings) String

func (p *PlacementSettings) String() string

String implements fmt.Stringer interface.

type PolicyInfo

type PolicyInfo struct {
	*PlacementSettings
	ID    int64       `json:"id"`
	Name  model.CIStr `json:"name"`
	State SchemaState `json:"state"`
}

PolicyInfo is the struct to store the placement policy.

func (*PolicyInfo) Clone

func (p *PolicyInfo) Clone() *PolicyInfo

Clone clones PolicyInfo.

type PolicyRefInfo

type PolicyRefInfo struct {
	ID   int64       `json:"id"`
	Name model.CIStr `json:"name"`
}

PolicyRefInfo is the struct to refer the placement policy.

type RebaseAutoIDArgs

type RebaseAutoIDArgs struct {
	NewBase int64 `json:"new_base,omitempty"`
	Force   bool  `json:"force,omitempty"`
}

RebaseAutoIDArgs is the arguments for ActionRebaseAutoID DDL. It is also for ActionRebaseAutoRandomBase.

func GetRebaseAutoIDArgs

func GetRebaseAutoIDArgs(job *Job) (*RebaseAutoIDArgs, error)

GetRebaseAutoIDArgs the args for ActionRebaseAutoID/ActionRebaseAutoRandomBase ddl.

type RecoverArgs

type RecoverArgs struct {
	RecoverInfo *RecoverSchemaInfo `json:"recover_info,omitempty"`
	CheckFlag   int64              `json:"check_flag,omitempty"`

	// used during runtime
	AffectedPhysicalIDs []int64 `json:"-"`
}

RecoverArgs is the argument for recover table/schema.

func GetRecoverArgs

func GetRecoverArgs(job *Job) (*RecoverArgs, error)

GetRecoverArgs get the recover table/schema args.

func (*RecoverArgs) RecoverTableInfos

func (a *RecoverArgs) RecoverTableInfos() []*RecoverTableInfo

RecoverTableInfos get all the recover infos.

type RecoverSchemaInfo

type RecoverSchemaInfo struct {
	*DBInfo
	RecoverTableInfos []*RecoverTableInfo
	// LoadTablesOnExecute is the new logic to avoid a large RecoverTabsInfo can't be
	// persisted. If it's true, DDL owner will recover RecoverTabsInfo instead of the
	// job submit node.
	LoadTablesOnExecute bool
	DropJobID           int64
	SnapshotTS          uint64
	OldSchemaName       pmodel.CIStr
}

RecoverSchemaInfo contains information needed by DDL.RecoverSchema.

type RecoverTableInfo

type RecoverTableInfo struct {
	SchemaID      int64
	TableInfo     *TableInfo
	DropJobID     int64
	SnapshotTS    uint64
	AutoIDs       AutoIDGroup
	OldSchemaName string
	OldTableName  string
}

RecoverTableInfo contains information needed by DDL.RecoverTable.

type ReferredFKInfo

type ReferredFKInfo struct {
	Cols        []model.CIStr `json:"cols"`
	ChildSchema model.CIStr   `json:"child_schema"`
	ChildTable  model.CIStr   `json:"child_table"`
	ChildFKName model.CIStr   `json:"child_fk_name"`
}

ReferredFKInfo provides the cited foreign key in the child table.

type RenameTableArgs

type RenameTableArgs struct {
	// for Args
	OldSchemaID   int64        `json:"old_schema_id,omitempty"`
	OldSchemaName pmodel.CIStr `json:"old_schema_name,omitempty"`
	NewTableName  pmodel.CIStr `json:"new_table_name,omitempty"`

	// for rename tables
	OldTableName pmodel.CIStr `json:"old_table_name,omitempty"`
	NewSchemaID  int64        `json:"new_schema_id,omitempty"`
	TableID      int64        `json:"table_id,omitempty"`

	// runtime info
	OldSchemaIDForSchemaDiff int64 `json:"-"`
}

RenameTableArgs is the arguments for rename table DDL job. It's also used for rename tables.

func GetRenameTableArgs

func GetRenameTableArgs(job *Job) (*RenameTableArgs, error)

GetRenameTableArgs get the arguments from job.

func GetRenameTablesArgsFromV1

func GetRenameTablesArgsFromV1(
	oldSchemaIDs []int64,
	oldSchemaNames []pmodel.CIStr,
	oldTableNames []pmodel.CIStr,
	newSchemaIDs []int64,
	newTableNames []pmodel.CIStr,
	tableIDs []int64,
) []*RenameTableArgs

GetRenameTablesArgsFromV1 get v2 args from v1

type RenameTablesArgs

type RenameTablesArgs struct {
	RenameTableInfos []*RenameTableArgs `json:"rename_table_infos,omitempty"`
}

RenameTablesArgs is the arguments for rename tables job.

func GetRenameTablesArgs

func GetRenameTablesArgs(job *Job) (*RenameTablesArgs, error)

GetRenameTablesArgs gets the rename-tables args.

type ReorgType

type ReorgType int8

ReorgType indicates which process is used for the data reorganization.

const (
	// ReorgTypeNone means the backfill task is not started yet.
	ReorgTypeNone ReorgType = iota
	// ReorgTypeTxn means the index records are backfill with transactions.
	// All the index KVs are written through the transaction interface.
	// This is the original backfill implementation.
	ReorgTypeTxn
	// ReorgTypeLitMerge means the index records are backfill with lightning.
	// The index KVs are encoded to SST files and imported to the storage directly.
	// The incremental index KVs written by DML are redirected to a temporary index.
	// After the backfill is finished, the temporary index records are merged back to the original index.
	ReorgTypeLitMerge
	// ReorgTypeTxnMerge means backfill with transactions and merge incremental changes.
	// The backfill index KVs are written through the transaction interface.
	// The incremental index KVs written by DML are redirected to a temporary index.
	// After the backfill is finished, the temporary index records are merged back to the original index.
	ReorgTypeTxnMerge
)

func (ReorgType) NeedMergeProcess

func (tp ReorgType) NeedMergeProcess() bool

NeedMergeProcess means the incremental changes need to be merged.

func (ReorgType) String

func (tp ReorgType) String() string

String implements fmt.Stringer interface.

type RepairTableArgs

type RepairTableArgs struct {
	TableInfo *TableInfo `json:"table_info"`
}

RepairTableArgs is the argument for repair table

func GetRepairTableArgs

func GetRepairTableArgs(job *Job) (*RepairTableArgs, error)

GetRepairTableArgs get the repair table args.

type ResourceGroupArgs

type ResourceGroupArgs struct {
	// for DropResourceGroup we only use it to store the name, other fields are invalid.
	RGInfo *ResourceGroupInfo `json:"rg_info,omitempty"`
}

ResourceGroupArgs is the arguments for resource group job.

func GetResourceGroupArgs

func GetResourceGroupArgs(job *Job) (*ResourceGroupArgs, error)

GetResourceGroupArgs gets the resource group args.

type ResourceGroupBackgroundSettings

type ResourceGroupBackgroundSettings struct {
	JobTypes          []string `json:"job_types"`
	ResourceUtilLimit uint64   `json:"utilization_limit"`
}

ResourceGroupBackgroundSettings is the background settings of the resource group.

type ResourceGroupInfo

type ResourceGroupInfo struct {
	*ResourceGroupSettings
	ID    int64       `json:"id"`
	Name  model.CIStr `json:"name"`
	State SchemaState `json:"state"`
}

ResourceGroupInfo is the struct to store the resource group.

func (*ResourceGroupInfo) Clone

Clone clones the ResourceGroupInfo.

type ResourceGroupRunawaySettings

type ResourceGroupRunawaySettings struct {
	ExecElapsedTimeMs uint64                  `json:"exec_elapsed_time_ms"`
	ProcessedKeys     int64                   `json:"processed_keys"`
	RequestUnit       int64                   `json:"request_unit"`
	Action            model.RunawayActionType `json:"action"`
	SwitchGroupName   string                  `json:"switch_group_name"`
	WatchType         model.RunawayWatchType  `json:"watch_type"`
	WatchDurationMs   int64                   `json:"watch_duration_ms"`
}

ResourceGroupRunawaySettings is the runaway settings of the resource group

type ResourceGroupSettings

type ResourceGroupSettings struct {
	RURate           uint64                           `json:"ru_per_sec"`
	Priority         uint64                           `json:"priority"`
	CPULimiter       string                           `json:"cpu_limit"`
	IOReadBandwidth  string                           `json:"io_read_bandwidth"`
	IOWriteBandwidth string                           `json:"io_write_bandwidth"`
	BurstLimit       int64                            `json:"burst_limit"`
	Runaway          *ResourceGroupRunawaySettings    `json:"runaway"`
	Background       *ResourceGroupBackgroundSettings `json:"background"`
}

ResourceGroupSettings is the settings of the resource group

func NewResourceGroupSettings

func NewResourceGroupSettings() *ResourceGroupSettings

NewResourceGroupSettings creates a new ResourceGroupSettings.

func (*ResourceGroupSettings) Adjust

func (p *ResourceGroupSettings) Adjust()

Adjust adjusts the resource group settings.

func (*ResourceGroupSettings) Clone

Clone clones the resource group settings.

func (*ResourceGroupSettings) String

func (p *ResourceGroupSettings) String() string

String implements the fmt.Stringer interface.

type SchemaDiff

type SchemaDiff struct {
	Version  int64      `json:"version"`
	Type     ActionType `json:"type"`
	SchemaID int64      `json:"schema_id"`
	TableID  int64      `json:"table_id"`

	// SubActionTypes is the list of action types done together within a multiple schema
	// change job. As the job might contain multiple steps that changes schema version,
	// if some step only contains one action, Type will be that action, and SubActionTypes
	// will be empty.
	// for other types of job, it will always be empty.
	SubActionTypes []ActionType `json:"sub_action_types,omitempty"`
	// OldTableID is the table ID before truncate, only used by truncate table DDL.
	OldTableID int64 `json:"old_table_id"`
	// OldSchemaID is the schema ID before rename table, only used by rename table DDL.
	OldSchemaID int64 `json:"old_schema_id"`
	// RegenerateSchemaMap means whether to rebuild the schema map when applying to the schema diff.
	RegenerateSchemaMap bool `json:"regenerate_schema_map"`
	// ReadTableFromMeta is set to avoid the diff is too large to be saved in SchemaDiff.
	// infoschema should read latest meta directly.
	ReadTableFromMeta bool `json:"read_table_from_meta,omitempty"`

	AffectedOpts []*AffectedOption `json:"affected_options"`
}

SchemaDiff contains the schema modification at a particular schema version. It is used to reduce schema reload cost.

type SchemaState

type SchemaState byte

SchemaState is the state for schema elements.

const (
	// StateNone means this schema element is absent and can't be used.
	StateNone SchemaState = iota
	// StateDeleteOnly means we can only delete items for this schema element.
	StateDeleteOnly
	// StateWriteOnly means we can use any write operation on this schema element,
	// but outer can't read the changed data.
	StateWriteOnly
	// StateWriteReorganization means we are re-organizing whole data after write only state.
	StateWriteReorganization
	// StateDeleteReorganization means we are re-organizing whole data after delete only state.
	StateDeleteReorganization
	// StatePublic means this schema element is ok for all write and read operations.
	StatePublic
	// StateReplicaOnly means we're waiting tiflash replica to be finished.
	StateReplicaOnly
	// StateGlobalTxnOnly means we can only use global txn for operator on this schema element
	StateGlobalTxnOnly
)

func (SchemaState) String

func (s SchemaState) String() string

String implements fmt.Stringer interface.

type SequenceInfo

type SequenceInfo struct {
	Start      int64  `json:"sequence_start"`
	Cache      bool   `json:"sequence_cache"`
	Cycle      bool   `json:"sequence_cycle"`
	MinValue   int64  `json:"sequence_min_value"`
	MaxValue   int64  `json:"sequence_max_value"`
	Increment  int64  `json:"sequence_increment"`
	CacheValue int64  `json:"sequence_cache_value"`
	Comment    string `json:"sequence_comment"`
}

SequenceInfo provide meta data describing a DB sequence.

type SessionInfo

type SessionInfo struct {
	ServerID  string
	SessionID uint64
}

SessionInfo contain the session ID and the server ID.

func (SessionInfo) String

func (s SessionInfo) String() string

String implements fmt.Stringer interface.

type SetDefaultValueArgs

type SetDefaultValueArgs struct {
	Col *ColumnInfo `json:"column_info,omitempty"`
}

SetDefaultValueArgs is the argument for setting default value ddl.

func GetSetDefaultValueArgs

func GetSetDefaultValueArgs(job *Job) (*SetDefaultValueArgs, error)

GetSetDefaultValueArgs get the args for setting default value ddl.

type SetTiFlashReplicaArgs

type SetTiFlashReplicaArgs struct {
	TiflashReplica ast.TiFlashReplicaSpec `json:"tiflash_replica,omitempty"`
}

SetTiFlashReplicaArgs is the arguments for setting TiFlash replica ddl.

func GetSetTiFlashReplicaArgs

func GetSetTiFlashReplicaArgs(job *Job) (*SetTiFlashReplicaArgs, error)

GetSetTiFlashReplicaArgs gets the args for setting TiFlash replica ddl.

type ShardRowIDArgs

type ShardRowIDArgs struct {
	ShardRowIDBits uint64 `json:"shard_row_id_bits,omitempty"`
}

ShardRowIDArgs is the arguments for shard row ID ddl job.

func GetShardRowIDArgs

func GetShardRowIDArgs(job *Job) (*ShardRowIDArgs, error)

GetShardRowIDArgs gets the args for shard row ID ddl job.

type StatsLoadItem

type StatsLoadItem struct {
	TableItemID
	FullLoad bool
}

StatsLoadItem represents the load unit for statistics's memory loading.

func (StatsLoadItem) Key

func (s StatsLoadItem) Key() string

Key is used to generate unique key for TableItemID to use in the syncload

type StatsOptions

type StatsOptions struct {
	*StatsWindowSettings
	AutoRecalc   bool               `json:"auto_recalc"`
	ColumnChoice model.ColumnChoice `json:"column_choice"`
	ColumnList   []model.CIStr      `json:"column_list"`
	SampleNum    uint64             `json:"sample_num"`
	SampleRate   float64            `json:"sample_rate"`
	Buckets      uint64             `json:"buckets"`
	TopN         uint64             `json:"topn"`
	Concurrency  uint               `json:"concurrency"`
}

StatsOptions is the struct to store the stats options.

func NewStatsOptions

func NewStatsOptions() *StatsOptions

NewStatsOptions creates a new StatsOptions.

type StatsWindowSettings

type StatsWindowSettings struct {
	WindowStart    time.Time        `json:"window_start"`
	WindowEnd      time.Time        `json:"window_end"`
	RepeatType     WindowRepeatType `json:"repeat_type"`
	RepeatInterval uint             `json:"repeat_interval"`
}

StatsWindowSettings is the settings of the stats window.

type SubJob

type SubJob struct {
	Type    ActionType `json:"type"`
	JobArgs JobArgs    `json:"-"`

	RawArgs     json.RawMessage `json:"raw_args"`
	SchemaState SchemaState     `json:"schema_state"`
	SnapshotVer uint64          `json:"snapshot_ver"`
	RealStartTS uint64          `json:"real_start_ts"`
	Revertible  bool            `json:"revertible"`
	State       JobState        `json:"state"`
	RowCount    int64           `json:"row_count"`
	Warning     *terror.Error   `json:"warning"`
	CtxVars     []any           `json:"-"`
	SchemaVer   int64           `json:"schema_version"`
	ReorgTp     ReorgType       `json:"reorg_tp"`
	// contains filtered or unexported fields
}

SubJob is a representation of one DDL schema change. A Job may contain zero (when multi-schema change is not applicable) or more SubJobs.

func (*SubJob) Clone

func (sub *SubJob) Clone() *SubJob

Clone returns a copy of the sub-job. Note: private args fields are not copied.

func (*SubJob) FillArgs

func (sub *SubJob) FillArgs(jobVer JobVersion)

FillArgs fills args.

func (*SubJob) FromProxyJob

func (sub *SubJob) FromProxyJob(proxyJob *Job, ver int64)

FromProxyJob converts a proxy job to a sub-job.

func (*SubJob) IsFinished

func (sub *SubJob) IsFinished() bool

IsFinished returns true if the job is done.

func (*SubJob) IsNormal

func (sub *SubJob) IsNormal() bool

IsNormal returns true if the sub-job is normally running.

func (*SubJob) ToProxyJob

func (sub *SubJob) ToProxyJob(parentJob *Job, seq int) Job

ToProxyJob converts a sub-job to a proxy job.

type TTLInfo

type TTLInfo struct {
	ColumnName      model.CIStr `json:"column"`
	IntervalExprStr string      `json:"interval_expr"`
	// `IntervalTimeUnit` is actually ast.TimeUnitType. Use `int` to avoid cycle dependency
	IntervalTimeUnit int  `json:"interval_time_unit"`
	Enable           bool `json:"enable"`
	// JobInterval is the interval between two TTL scan jobs.
	// It's suggested to get a duration with `(*TTLInfo).GetJobInterval`
	JobInterval string `json:"job_interval"`
}

TTLInfo records the TTL config

func (*TTLInfo) Clone

func (t *TTLInfo) Clone() *TTLInfo

Clone clones TTLInfo

func (*TTLInfo) GetJobInterval

func (t *TTLInfo) GetJobInterval() (time.Duration, error)

GetJobInterval parses the job interval and return if the job interval is an empty string, the "1h" will be returned, to keep compatible with 6.5 (in which TTL_JOB_INTERVAL attribute doesn't exist) Didn't set TTL_JOB_INTERVAL during upgrade and bootstrap because setting default value here is much simpler and could avoid bugs blocking users from upgrading or bootstrapping the cluster.

type TableCacheStatusType

type TableCacheStatusType int

TableCacheStatusType is the type of the table cache status

const (
	TableCacheStatusDisable TableCacheStatusType = iota
	TableCacheStatusEnable
	TableCacheStatusSwitching
)

TableCacheStatusType values.

func (TableCacheStatusType) String

func (t TableCacheStatusType) String() string

String implements fmt.Stringer interface.

type TableColumnArgs

type TableColumnArgs struct {
	// follow items for add column.
	Col    *ColumnInfo         `json:"column_info,omitempty"`
	Pos    *ast.ColumnPosition `json:"position,omitempty"`
	Offset int                 `json:"offset,omitempty"`
	// it's shared by add/drop column.
	IgnoreExistenceErr bool `json:"ignore_existence_err,omitempty"`

	// for drop column.
	// below 2 fields are filled during running.
	IndexIDs     []int64 `json:"index_ids,omitempty"`
	PartitionIDs []int64 `json:"partition_ids,omitempty"`
}

TableColumnArgs is the arguments for dropping column ddl or Adding column ddl.

func GetTableColumnArgs

func GetTableColumnArgs(job *Job) (*TableColumnArgs, error)

GetTableColumnArgs gets the args for dropping column ddl or Adding column ddl.

type TableIDIndexID

type TableIDIndexID struct {
	TableID int64
	IndexID int64
}

TableIDIndexID contains TableID+IndexID of index ranges to be deleted

type TableInfo

type TableInfo struct {
	ID      int64       `json:"id"`
	Name    model.CIStr `json:"name"`
	Charset string      `json:"charset"`
	Collate string      `json:"collate"`
	// Columns are listed in the order in which they appear in the schema.
	Columns     []*ColumnInfo     `json:"cols"`
	Indices     []*IndexInfo      `json:"index_info"`
	Constraints []*ConstraintInfo `json:"constraint_info"`
	ForeignKeys []*FKInfo         `json:"fk_info"`
	State       SchemaState       `json:"state"`
	// PKIsHandle is true when primary key is a single integer column.
	PKIsHandle bool `json:"pk_is_handle"`
	// IsCommonHandle is true when clustered index feature is
	// enabled and the primary key is not a single integer column.
	IsCommonHandle bool `json:"is_common_handle"`
	// CommonHandleVersion is the version of the clustered index.
	// 0 for the clustered index created == 5.0.0 RC.
	// 1 for the clustered index created > 5.0.0 RC.
	CommonHandleVersion uint16 `json:"common_handle_version"`

	Comment   string `json:"comment"`
	AutoIncID int64  `json:"auto_inc_id"`

	// Only used by BR when:
	// 1. SepAutoInc() is true
	// 2. The table is nonclustered and has auto_increment column.
	// In that case, both auto_increment_id and tidb_rowid need to be backup & recover.
	// See also https://github.com/pingcap/tidb/issues/46093
	//
	// It should have been named TiDBRowID, but for historial reasons, we do not use separate meta key for _tidb_rowid and auto_increment_id,
	// and field `AutoIncID` is used to serve both _tidb_rowid and auto_increment_id.
	// If we introduce a TiDBRowID here, it could make furthur misunderstanding:
	//	in most cases, AutoIncID is _tidb_rowid and TiDBRowID is null
	//      but in some cases, AutoIncID is auto_increment_id and TiDBRowID is _tidb_rowid
	// So let's just use another name AutoIncIDExtra to avoid misconception.
	AutoIncIDExtra int64 `json:"auto_inc_id_extra,omitempty"`

	AutoIDCache     int64 `json:"auto_id_cache"`
	AutoRandID      int64 `json:"auto_rand_id"`
	MaxColumnID     int64 `json:"max_col_id"`
	MaxIndexID      int64 `json:"max_idx_id"`
	MaxForeignKeyID int64 `json:"max_fk_id"`
	MaxConstraintID int64 `json:"max_cst_id"`
	// UpdateTS is used to record the timestamp of updating the table's schema information.
	// These changing schema operations don't include 'truncate table' and 'rename table'.
	UpdateTS uint64 `json:"update_timestamp"`
	// OldSchemaID :
	// Because auto increment ID has schemaID as prefix,
	// We need to save original schemaID to keep autoID unchanged
	// while renaming a table from one database to another.
	// Only set if table has been renamed across schemas
	// Old name 'old_schema_id' is kept for backwards compatibility
	AutoIDSchemaID int64 `json:"old_schema_id,omitempty"`

	// ShardRowIDBits specify if the implicit row ID is sharded.
	ShardRowIDBits uint64
	// MaxShardRowIDBits uses to record the max ShardRowIDBits be used so far.
	MaxShardRowIDBits uint64 `json:"max_shard_row_id_bits"`
	// AutoRandomBits is used to set the bit number to shard automatically when PKIsHandle.
	AutoRandomBits uint64 `json:"auto_random_bits"`
	// AutoRandomRangeBits represents the bit number of the int primary key that will be used by TiDB.
	AutoRandomRangeBits uint64 `json:"auto_random_range_bits"`
	// PreSplitRegions specify the pre-split region when create table.
	// The pre-split region num is 2^(PreSplitRegions-1).
	// And the PreSplitRegions should less than or equal to ShardRowIDBits.
	PreSplitRegions uint64 `json:"pre_split_regions"`

	Partition *PartitionInfo `json:"partition"`

	Compression string `json:"compression"`

	View *ViewInfo `json:"view"`

	Sequence *SequenceInfo `json:"sequence"`

	// Lock represent the table lock info.
	Lock *TableLockInfo `json:"Lock"`

	// Version means the version of the table info.
	Version uint16 `json:"version"`

	// TiFlashReplica means the TiFlash replica info.
	TiFlashReplica *TiFlashReplicaInfo `json:"tiflash_replica"`

	// IsColumnar means the table is column-oriented.
	// It's true when the engine of the table is TiFlash only.
	IsColumnar bool `json:"is_columnar"`

	TempTableType        `json:"temp_table_type"`
	TableCacheStatusType `json:"cache_table_status"`
	PlacementPolicyRef   *PolicyRefInfo `json:"policy_ref_info"`

	// StatsOptions is used when do analyze/auto-analyze for each table
	StatsOptions *StatsOptions `json:"stats_options"`

	ExchangePartitionInfo *ExchangePartitionInfo `json:"exchange_partition_info"`

	TTLInfo *TTLInfo `json:"ttl_info"`

	// Revision is per table schema's version, it will be increased when the schema changed.
	Revision uint64 `json:"revision"`

	DBID int64 `json:"-"`
}

TableInfo provides meta data describing a DB table.

func (*TableInfo) ClearPlacement

func (t *TableInfo) ClearPlacement()

ClearPlacement clears all table and partitions' placement settings

func (*TableInfo) Clone

func (t *TableInfo) Clone() *TableInfo

Clone clones TableInfo.

func (*TableInfo) Cols

func (t *TableInfo) Cols() []*ColumnInfo

Cols returns the columns of the table in public state.

func (*TableInfo) ColumnIsInIndex

func (t *TableInfo) ColumnIsInIndex(c *ColumnInfo) bool

ColumnIsInIndex checks whether c is included in any indices of t.

func (*TableInfo) ContainsAutoRandomBits

func (t *TableInfo) ContainsAutoRandomBits() bool

ContainsAutoRandomBits indicates whether a table contains auto_random column.

func (*TableInfo) Equals

func (t *TableInfo) Equals(other any) bool

Equals implements HashEquals interface.

func (*TableInfo) FindColumnByID

func (t *TableInfo) FindColumnByID(id int64) *ColumnInfo

FindColumnByID finds ColumnInfo by id.

func (*TableInfo) FindColumnNameByID

func (t *TableInfo) FindColumnNameByID(id int64) string

FindColumnNameByID finds column name by id.

func (*TableInfo) FindConstraintInfoByName

func (t *TableInfo) FindConstraintInfoByName(constrName string) *ConstraintInfo

FindConstraintInfoByName finds constraintInfo by name.

func (*TableInfo) FindIndexByID

func (t *TableInfo) FindIndexByID(id int64) *IndexInfo

FindIndexByID finds index by id.

func (*TableInfo) FindIndexByName

func (t *TableInfo) FindIndexByName(idxName string) *IndexInfo

FindIndexByName finds index by name.

func (*TableInfo) FindIndexNameByID

func (t *TableInfo) FindIndexNameByID(id int64) string

FindIndexNameByID finds index name by id.

func (*TableInfo) FindPublicColumnByName

func (t *TableInfo) FindPublicColumnByName(colNameL string) *ColumnInfo

FindPublicColumnByName finds the public column by name.

func (*TableInfo) GetAutoIncrementColInfo

func (t *TableInfo) GetAutoIncrementColInfo() *ColumnInfo

GetAutoIncrementColInfo gets the ColumnInfo of auto_increment column if exists.

func (*TableInfo) GetColumnByID

func (t *TableInfo) GetColumnByID(id int64) *ColumnInfo

GetColumnByID finds the column by ID.

func (*TableInfo) GetPartitionInfo

func (t *TableInfo) GetPartitionInfo() *PartitionInfo

GetPartitionInfo returns the partition information.

func (*TableInfo) GetPkColInfo

func (t *TableInfo) GetPkColInfo() *ColumnInfo

GetPkColInfo gets the ColumnInfo of pk if exists. Make sure PkIsHandle checked before call this method.

func (*TableInfo) GetPkName

func (t *TableInfo) GetPkName() model.CIStr

GetPkName will return the pk name if pk exists.

func (*TableInfo) GetPrimaryKey

func (t *TableInfo) GetPrimaryKey() *IndexInfo

GetPrimaryKey extract the primary key in a table and return `IndexInfo` The returned primary key could be explicit or implicit. If there is no explicit primary key in table, the first UNIQUE INDEX on NOT NULL columns will be the implicit primary key. For more information about implicit primary key, see https://dev.mysql.com/doc/refman/8.0/en/invisible-indexes.html

func (*TableInfo) GetUpdateTime

func (t *TableInfo) GetUpdateTime() time.Time

GetUpdateTime gets the table's updating time.

func (*TableInfo) HasClusteredIndex

func (t *TableInfo) HasClusteredIndex() bool

HasClusteredIndex checks whether the table has a clustered index.

func (*TableInfo) Hash64

func (t *TableInfo) Hash64(h base.Hasher)

Hash64 implement HashEquals interface.

func (*TableInfo) IsAutoIncColUnsigned

func (t *TableInfo) IsAutoIncColUnsigned() bool

IsAutoIncColUnsigned checks whether the auto increment column is unsigned.

func (*TableInfo) IsAutoRandomBitColUnsigned

func (t *TableInfo) IsAutoRandomBitColUnsigned() bool

IsAutoRandomBitColUnsigned indicates whether the auto_random column is unsigned. Make sure the table contains auto_random before calling this method.

func (*TableInfo) IsBaseTable

func (t *TableInfo) IsBaseTable() bool

IsBaseTable checks to see the table is neither a view or a sequence.

func (*TableInfo) IsLocked

func (t *TableInfo) IsLocked() bool

IsLocked checks whether the table was locked.

func (*TableInfo) IsSequence

func (t *TableInfo) IsSequence() bool

IsSequence checks if TableInfo is a sequence.

func (*TableInfo) IsView

func (t *TableInfo) IsView() bool

IsView checks if TableInfo is a view.

func (*TableInfo) MoveColumnInfo

func (t *TableInfo) MoveColumnInfo(from, to int)

MoveColumnInfo moves a column to another offset. It maintains the offsets of all affects columns and index columns,

func (*TableInfo) SepAutoInc

func (t *TableInfo) SepAutoInc() bool

SepAutoInc decides whether _rowid and auto_increment id use separate allocator.

type TableItemID

type TableItemID struct {
	TableID          int64
	ID               int64
	IsIndex          bool
	IsSyncLoadFailed bool
}

TableItemID is composed by table ID and column/index ID

func (TableItemID) Key

func (t TableItemID) Key() string

Key is used to generate unique key for TableItemID to use in the syncload

type TableLockInfo

type TableLockInfo struct {
	Tp model.TableLockType
	// Use array because there may be multiple sessions holding the same read lock.
	Sessions []SessionInfo
	State    TableLockState
	// TS is used to record the timestamp this table lock been locked.
	TS uint64
}

TableLockInfo provides meta data describing a table lock.

type TableLockState

type TableLockState byte

TableLockState is the state for table lock.

const (
	// TableLockStateNone means this table lock is absent.
	TableLockStateNone TableLockState = iota
	// TableLockStatePreLock means this table lock is pre-lock state. Other session doesn't hold this lock should't do corresponding operation according to the lock type.
	TableLockStatePreLock
	// TableLockStatePublic means this table lock is public state.
	TableLockStatePublic
)

func (TableLockState) String

func (t TableLockState) String() string

String implements fmt.Stringer interface.

type TableLockTpInfo

type TableLockTpInfo struct {
	SchemaID int64
	TableID  int64
	Tp       model.TableLockType
}

TableLockTpInfo is composed by schema ID, table ID and table lock type.

type TableNameInfo

type TableNameInfo struct {
	ID   int64       `json:"id"`
	Name model.CIStr `json:"name"`
}

TableNameInfo provides meta data describing a table name info.

type TablePartitionArgs

type TablePartitionArgs struct {
	PartNames []string       `json:"part_names,omitempty"`
	PartInfo  *PartitionInfo `json:"part_info,omitempty"`

	// set on finished
	OldPhysicalTblIDs []int64          `json:"old_physical_tbl_ids,omitempty"`
	OldGlobalIndexes  []TableIDIndexID `json:"old_global_indexes,omitempty"`

	// runtime info
	NewPartitionIDs []int64 `json:"-"`
}

TablePartitionArgs is the arguments for table partition related jobs, including:

  • ActionAlterTablePartitioning
  • ActionRemovePartitioning
  • ActionReorganizePartition
  • ActionAddTablePartition: don't have finished args if success.
  • ActionDropTablePartition

when rolling back, args of ActionAddTablePartition will be changed to be the same as ActionDropTablePartition, and it will have finished args, but not used anywhere, for other types, their args will be decoded as if its args is the same of ActionDropTablePartition.

func GetFinishedTablePartitionArgs

func GetFinishedTablePartitionArgs(job *Job) (*TablePartitionArgs, error)

GetFinishedTablePartitionArgs gets the table partition args after the job is finished.

func GetTablePartitionArgs

func GetTablePartitionArgs(job *Job) (*TablePartitionArgs, error)

GetTablePartitionArgs gets the table partition args.

type TempTableType

type TempTableType byte

TempTableType is the type of the temp table

const (
	TempTableNone TempTableType = iota
	TempTableGlobal
	TempTableLocal
)

TempTableType values.

func (TempTableType) String

func (t TempTableType) String() string

String implements fmt.Stringer interface.

type TiFlashReplicaInfo

type TiFlashReplicaInfo struct {
	Count                 uint64
	LocationLabels        []string
	Available             bool
	AvailablePartitionIDs []int64
}

TiFlashReplicaInfo means the flash replica info.

func (*TiFlashReplicaInfo) IsPartitionAvailable

func (tr *TiFlashReplicaInfo) IsPartitionAvailable(pid int64) bool

IsPartitionAvailable checks whether the partition table replica was available.

type TimeZoneLocation

type TimeZoneLocation struct {
	Name   string `json:"name"`
	Offset int    `json:"offset"` // seconds east of UTC
	// contains filtered or unexported fields
}

TimeZoneLocation represents a single time zone.

func (*TimeZoneLocation) GetLocation

func (tz *TimeZoneLocation) GetLocation() (*time.Location, error)

GetLocation gets the timezone location.

type TraceInfo

type TraceInfo struct {
	// ConnectionID is the id of the connection
	ConnectionID uint64 `json:"connection_id"`
	// SessionAlias is the alias of session
	SessionAlias string `json:"session_alias"`
}

TraceInfo is the information for trace.

type TruncateTableArgs

type TruncateTableArgs struct {
	FKCheck         bool    `json:"fk_check,omitempty"`
	NewTableID      int64   `json:"new_table_id,omitempty"`
	NewPartitionIDs []int64 `json:"new_partition_ids,omitempty"`
	OldPartitionIDs []int64 `json:"old_partition_ids,omitempty"`

	// context vars
	NewPartIDsWithPolicy           []int64 `json:"-"`
	OldPartIDsWithPolicy           []int64 `json:"-"`
	ShouldUpdateAffectedPartitions bool    `json:"-"`
}

TruncateTableArgs is the arguments for truncate table/partition job.

func GetFinishedTruncateTableArgs

func GetFinishedTruncateTableArgs(job *Job) (*TruncateTableArgs, error)

GetFinishedTruncateTableArgs gets the truncate table args after the job is finished.

func GetTruncateTableArgs

func GetTruncateTableArgs(job *Job) (*TruncateTableArgs, error)

GetTruncateTableArgs gets the truncate table args.

type UpdateIndexInfo

type UpdateIndexInfo struct {
	IndexName string `json:"index_name"`
	Global    bool   `json:"global"`
}

UpdateIndexInfo is to carry the entries in the list of indexes in UPDATE INDEXES during ALTER TABLE t PARTITION BY ... UPDATE INDEXES (idx_a GLOBAL, idx_b LOCAL...)

type UpdateTiFlashReplicaStatusArgs

type UpdateTiFlashReplicaStatusArgs struct {
	Available  bool  `json:"available,omitempty"`
	PhysicalID int64 `json:"physical_id,omitempty"`
}

UpdateTiFlashReplicaStatusArgs is the arguments for updating TiFlash replica status ddl.

func GetUpdateTiFlashReplicaStatusArgs

func GetUpdateTiFlashReplicaStatusArgs(job *Job) (*UpdateTiFlashReplicaStatusArgs, error)

GetUpdateTiFlashReplicaStatusArgs gets the args for updating TiFlash replica status ddl.

type VectorIndexInfo

type VectorIndexInfo struct {
	// Dimension is the dimension of the vector.
	Dimension uint64 `json:"dimension"`
	// DistanceMetric is the distance metric used by the index.
	DistanceMetric DistanceMetric `json:"distance_metric"`
}

VectorIndexInfo is the information of vector index of a column.

type ViewInfo

type ViewInfo struct {
	Algorithm   model.ViewAlgorithm   `json:"view_algorithm"`
	Definer     *auth.UserIdentity    `json:"view_definer"`
	Security    model.ViewSecurity    `json:"view_security"`
	SelectStmt  string                `json:"view_select"`
	CheckOption model.ViewCheckOption `json:"view_checkoption"`
	Cols        []model.CIStr         `json:"view_cols"`
}

ViewInfo provides meta data describing a DB view.

type WindowRepeatType

type WindowRepeatType byte

WindowRepeatType is the type of the window repeat.

const (
	Never WindowRepeatType = iota
	Day
	Week
	Month
)

WindowRepeatType values.

func (WindowRepeatType) String

func (s WindowRepeatType) String() string

String implements fmt.Stringer interface.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL