Documentation ¶
Index ¶
- Constants
- func DecryptFullBackupMetaIfNeeded(metaData []byte, cipherInfo *backuppb.CipherInfo) ([]byte, error)
- func Encrypt(content []byte, cipher *backuppb.CipherInfo) (encryptedContent, iv []byte, err error)
- func LoadBackupTables(ctx context.Context, reader *MetaReader, loadStats bool) (map[string]*Database, error)
- func NewSizedMetaFile(sizeLimit int) *sizedMetaFile
- func PitrIDMapsFilename(clusterID, restoreTS uint64) string
- func RestoreStats(ctx context.Context, storage storage.ExternalStorage, ...) error
- func SkipFiles(conf *readSchemaConfig)
- func SkipStats(conf *readSchemaConfig)
- type AppendOp
- type Database
- type MetaReader
- func (*MetaReader) ArchiveSize(_ context.Context, files []*backuppb.File) uint64
- func (reader *MetaReader) GetBasic() backuppb.BackupMeta
- func (reader *MetaReader) ReadDDLs(ctx context.Context) ([]byte, error)
- func (reader *MetaReader) ReadSchemasFiles(ctx context.Context, output chan<- *Table, opts ...ReadSchemaOption) error
- type MetaWriter
- func (writer *MetaWriter) ArchiveSize() uint64
- func (writer *MetaWriter) Backupmeta() *backuppb.BackupMeta
- func (writer *MetaWriter) FinishWriteMetas(ctx context.Context, op AppendOp) error
- func (writer *MetaWriter) FlushBackupMeta(ctx context.Context) error
- func (writer *MetaWriter) MetaFilesSize() uint64
- func (writer *MetaWriter) NewStatsWriter() *StatsWriter
- func (writer *MetaWriter) Send(m any, _ AppendOp) error
- func (writer *MetaWriter) StartWriteMetasAsync(ctx context.Context, op AppendOp)
- func (writer *MetaWriter) Update(f func(m *backuppb.BackupMeta))
- type ReadSchemaOption
- type StatsWriter
- type Table
Constants ¶
const ( // LockFile represents file name LockFile = "backup.lock" // MetaFile represents file name MetaFile = "backupmeta" // MetaJSONFile represents backup meta json file name MetaJSONFile = "backupmeta.json" // MaxBatchSize represents the internal channel buffer size of MetaWriter and MetaReader. MaxBatchSize = 1024 // MetaFileSize represents the limit size of one MetaFile MetaFileSize = 128 * units.MiB // CrypterIvLen represents the length of iv of crypter method CrypterIvLen = 16 )
const ( // MetaV1 represents the old version of backupmeta. // because the old version doesn't have version field, so set it to 0 for compatibility. MetaV1 = iota // MetaV2 represents the new version of backupmeta. MetaV2 )
Variables ¶
This section is empty.
Functions ¶
func DecryptFullBackupMetaIfNeeded ¶
func DecryptFullBackupMetaIfNeeded(metaData []byte, cipherInfo *backuppb.CipherInfo) ([]byte, error)
func Encrypt ¶
func Encrypt(content []byte, cipher *backuppb.CipherInfo) (encryptedContent, iv []byte, err error)
Encrypt encrypts the content according to CipherInfo.
func LoadBackupTables ¶
func LoadBackupTables(ctx context.Context, reader *MetaReader, loadStats bool) (map[string]*Database, error)
LoadBackupTables loads schemas from BackupMeta.
func NewSizedMetaFile ¶
func NewSizedMetaFile(sizeLimit int) *sizedMetaFile
NewSizedMetaFile represents the sizedMetaFile.
func PitrIDMapsFilename ¶
PitrIDMapsFilename is filename that used to save id maps in pitr.
func RestoreStats ¶
func RestoreStats( ctx context.Context, storage storage.ExternalStorage, cipher *backuppb.CipherInfo, statsHandler *handle.Handle, newTableInfo *model.TableInfo, statsFileIndexes []*backuppb.StatsFileIndex, rewriteIDMap map[int64]int64, ) error
Types ¶
type AppendOp ¶
type AppendOp int
AppendOp represents the operation type of meta.
const ( // AppendMetaFile represents the MetaFile type. AppendMetaFile AppendOp = 0 // AppendDataFile represents the DataFile type. // it records the file meta from tikv. AppendDataFile AppendOp = 1 // AppendSchema represents the schema from tidb. AppendSchema AppendOp = 2 // AppendDDL represents the ddls before last backup. AppendDDL AppendOp = 3 )
type MetaReader ¶
type MetaReader struct {
// contains filtered or unexported fields
}
MetaReader wraps a reader to read both old and new version of backupmeta.
func NewMetaReader ¶
func NewMetaReader( backupMeta *backuppb.BackupMeta, storage storage.ExternalStorage, cipher *backuppb.CipherInfo) *MetaReader
NewMetaReader creates MetaReader.
func (*MetaReader) ArchiveSize ¶
ArchiveSize return the size of Archive data
func (*MetaReader) GetBasic ¶
func (reader *MetaReader) GetBasic() backuppb.BackupMeta
GetBasic returns a basic copy of the backup meta.
func (*MetaReader) ReadDDLs ¶
func (reader *MetaReader) ReadDDLs(ctx context.Context) ([]byte, error)
ReadDDLs reads the ddls from the backupmeta. This function is compatible with the old backupmeta.
func (*MetaReader) ReadSchemasFiles ¶
func (reader *MetaReader) ReadSchemasFiles(ctx context.Context, output chan<- *Table, opts ...ReadSchemaOption) error
ReadSchemasFiles reads the schema and datafiles from the backupmeta. This function is compatible with the old backupmeta.
type MetaWriter ¶
type MetaWriter struct {
// contains filtered or unexported fields
}
MetaWriter represents wraps a writer, and the MetaWriter should be compatible with old version of backupmeta.
func NewMetaWriter ¶
func NewMetaWriter( storage storage.ExternalStorage, metafileSizeLimit int, useV2Meta bool, metaFileName string, cipher *backuppb.CipherInfo, ) *MetaWriter
NewMetaWriter creates MetaWriter.
func (*MetaWriter) ArchiveSize ¶
func (writer *MetaWriter) ArchiveSize() uint64
ArchiveSize represents the size of ArchiveSize.
func (*MetaWriter) Backupmeta ¶
func (writer *MetaWriter) Backupmeta() *backuppb.BackupMeta
Backupmeta clones a backupmeta.
func (*MetaWriter) FinishWriteMetas ¶
func (writer *MetaWriter) FinishWriteMetas(ctx context.Context, op AppendOp) error
FinishWriteMetas close the channel in StartWriteMetasAsync and flush the buffered data.
func (*MetaWriter) FlushBackupMeta ¶
func (writer *MetaWriter) FlushBackupMeta(ctx context.Context) error
FlushBackupMeta flush the `backupMeta` to `ExternalStorage`
func (*MetaWriter) MetaFilesSize ¶
func (writer *MetaWriter) MetaFilesSize() uint64
MetaFilesSize represents the size of meta files from backupmeta v2, must be called after everything finishes by `FinishWriteMetas`.
func (*MetaWriter) NewStatsWriter ¶
func (writer *MetaWriter) NewStatsWriter() *StatsWriter
NewStatsWriter wraps the new function of stats writer
func (*MetaWriter) Send ¶
func (writer *MetaWriter) Send(m any, _ AppendOp) error
Send sends the item to buffer.
func (*MetaWriter) StartWriteMetasAsync ¶
func (writer *MetaWriter) StartWriteMetasAsync(ctx context.Context, op AppendOp)
StartWriteMetasAsync writes four kind of meta into backupmeta. 1. file 2. schema 3. ddl 4. rawRange( raw kv ) when useBackupMetaV2 enabled, it will generate multi-level index backupmetav2. else it will generate backupmeta as before for compatibility. User should call FinishWriteMetas after StartWriterMetasAsync.
func (*MetaWriter) Update ¶
func (writer *MetaWriter) Update(f func(m *backuppb.BackupMeta))
Update updates some property of backupmeta.
type ReadSchemaOption ¶
type ReadSchemaOption func(*readSchemaConfig)
ReadSchemaOption describes some extra option of reading the config.
type StatsWriter ¶
type StatsWriter struct {
// contains filtered or unexported fields
}
A lightweight function wrapper to dump the statistic
func (*StatsWriter) BackupStats ¶
func (*StatsWriter) BackupStatsDone ¶
func (s *StatsWriter) BackupStatsDone(ctx context.Context) ([]*backuppb.StatsFileIndex, error)
type Table ¶
type Table struct { DB *model.DBInfo Info *model.TableInfo Crc64Xor uint64 TotalKvs uint64 TotalBytes uint64 Files []*backuppb.File TiFlashReplicas int Stats *util.JSONTable StatsFileIndexes []*backuppb.StatsFileIndex }
Table wraps the schema and files of a table.
func (*Table) NoChecksum ¶
NoChecksum checks whether the table has a calculated checksum.