Documentation ¶
Index ¶
- func BytesToString(b []byte) string
- func CheckAllKeysExistInSecret(secret *corev1.Secret, keys ...string) (string, bool)
- func GenStorageArgsForFlag(provider v1alpha1.StorageProvider, flag string) ([]string, error)
- func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, ...) ([]corev1.EnvVar, string, error)
- func GenerateTidbPasswordEnv(ns, tcName, tidbSecretName string, useKMS bool, ...) ([]corev1.EnvVar, string, error)
- func GetBackupBucketName(backup *v1alpha1.Backup) (string, string, error)
- func GetBackupDataPath(provider v1alpha1.StorageProvider) (string, string, error)
- func GetBackupPrefixName(backup *v1alpha1.Backup) (string, string, error)
- func GetOptions(provider v1alpha1.StorageProvider) []string
- func GetStoragePath(privoder v1alpha1.StorageProvider) (string, error)
- func GetStorageType(provider v1alpha1.StorageProvider) v1alpha1.BackupStorageType
- func ParseImage(image string) (string, string)
- func StringToBytes(s string) []byte
- func ValidateBackup(backup *v1alpha1.Backup, tikvImage string, acrossK8s bool) error
- func ValidateRestore(restore *v1alpha1.Restore, tikvImage string, acrossK8s bool) error
- type BatchDeleteObjectsResult
- type ClusterInfo
- type EBSBasedBRMeta
- type EBSSession
- type EBSStore
- type EBSVolume
- type EBSVolumeType
- type EC2Session
- type KubernetesBackup
- type MockDriver
- func (d *MockDriver) As(i interface{}) bool
- func (d *MockDriver) Delete(_ context.Context, key string) error
- func (d *MockDriver) ErrorCode(err error) gcerrors.ErrorCode
- func (d *MockDriver) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error)
- func (d *MockDriver) SetDelete(fn func(key string) error)
- func (d *MockDriver) SetListPaged(objs []*driver.ListObject, rerr error)
- type ObjectError
- type PDComponent
- type PageIterator
- type StorageBackend
- func (b *StorageBackend) AsGCS() (*storage.Client, bool)
- func (b *StorageBackend) AsS3() (*s3.S3, bool)
- func (b *StorageBackend) BatchDeleteObjects(ctx context.Context, objs []*blob.ListObject, opt v1alpha1.BatchDeleteOption) *BatchDeleteObjectsResult
- func (b *StorageBackend) GetBucket() string
- func (b *StorageBackend) GetPrefix() string
- func (b *StorageBackend) ListPage(opts *blob.ListOptions) *PageIterator
- func (b *StorageBackend) StorageType() v1alpha1.BackupStorageType
- type StorageCredential
- type TagMap
- type TiDBComponent
- type TiKVComponent
- type Worker
- type WorkerPool
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func BytesToString ¶ added in v1.4.0
func CheckAllKeysExistInSecret ¶
CheckAllKeysExistInSecret check if all keys are included in the specific secret return the not-exist keys join by ","
func GenStorageArgsForFlag ¶ added in v1.4.0
func GenStorageArgsForFlag(provider v1alpha1.StorageProvider, flag string) ([]string, error)
genStorageArgs returns the arg for --flag option and the remote/local path for br, default flag is storage. TODO: add unit test
func GenerateStorageCertEnv ¶
func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, secretLister corelisterv1.SecretLister) ([]corev1.EnvVar, string, error)
GenerateStorageCertEnv generate the env info in order to access backend backup storage
func GenerateTidbPasswordEnv ¶
func GenerateTidbPasswordEnv(ns, tcName, tidbSecretName string, useKMS bool, secretLister corelisterv1.SecretLister) ([]corev1.EnvVar, string, error)
GenerateTidbPasswordEnv generate the password EnvVar
func GetBackupBucketName ¶
GetBackupBucketName return the bucket name for remote storage
func GetBackupDataPath ¶
func GetBackupDataPath(provider v1alpha1.StorageProvider) (string, string, error)
GetBackupDataPath return the full path of backup data
func GetBackupPrefixName ¶
GetBackupPrefixName return the prefix for remote storage
func GetOptions ¶ added in v1.4.0
func GetOptions(provider v1alpha1.StorageProvider) []string
GetOptions gets the rclone options
func GetStoragePath ¶ added in v1.4.0
func GetStoragePath(privoder v1alpha1.StorageProvider) (string, error)
GetStorageRestorePath generate the path of a specific storage from Restore
func GetStorageType ¶
func GetStorageType(provider v1alpha1.StorageProvider) v1alpha1.BackupStorageType
GetStorageType return the backup storage type according to the specified StorageProvider
func ParseImage ¶
ParseImage returns the image name and the tag from the input image string
func StringToBytes ¶ added in v1.4.0
func ValidateBackup ¶
ValidateBackup validates backup sepc
Types ¶
type BatchDeleteObjectsResult ¶ added in v1.4.0
type BatchDeleteObjectsResult struct { Deleted []string Errors []ObjectError }
func BatchDeleteObjectsConcurrently ¶ added in v1.4.0
func BatchDeleteObjectsConcurrently(ctx context.Context, bucket *blob.Bucket, objs []*blob.ListObject, concurrency int) *BatchDeleteObjectsResult
BatchDeleteObjectsConcurrently delete objects by multiple goroutine concurrently
func BatchDeleteObjectsOfS3 ¶ added in v1.4.0
func BatchDeleteObjectsOfS3(ctx context.Context, s3cli s3iface.S3API, objs []*blob.ListObject, bucket string, prefix string, concurrency int) *BatchDeleteObjectsResult
BatchDeleteObjectsOfS3 delete objects by batch delete api
type ClusterInfo ¶ added in v1.4.4
type ClusterInfo struct { Version string `json:"cluster_version" toml:"cluster_version"` FullBackupType string `json:"full_backup_type" toml:"full_backup_type"` ResolvedTS uint64 `json:"resolved_ts" toml:"resolved_ts"` Replicas map[string]uint64 `json:"replicas" toml:"replicas"` }
ClusterInfo represents the tidb cluster level meta infos. such as pd cluster id/alloc id, cluster resolved ts and tikv configuration.
type EBSBasedBRMeta ¶ added in v1.4.4
type EBSBasedBRMeta struct { ClusterInfo *ClusterInfo `json:"cluster_info" toml:"cluster_info"` TiKVComponent *TiKVComponent `json:"tikv" toml:"tikv"` TiDBComponent *TiDBComponent `json:"tidb" toml:"tidb"` PDComponent *PDComponent `json:"pd" toml:"pd"` KubernetesMeta *KubernetesBackup `json:"kubernetes" toml:"kubernetes"` Options map[string]interface{} `json:"options" toml:"options"` Region string `json:"region" toml:"region"` }
func GetVolSnapBackupMetaData ¶ added in v1.4.4
func GetVolSnapBackupMetaData(r *v1alpha1.Restore, secretLister corelisterv1.SecretLister) (*EBSBasedBRMeta, error)
getVolSnapBackupMetaData get backup metadata from cloud storage
type EBSSession ¶ added in v1.4.4
func NewEBSSession ¶ added in v1.4.4
func NewEBSSession(concurrency uint) (*EBSSession, error)
type EBSVolume ¶ added in v1.4.4
type EBSVolume struct { ID string `json:"volume_id" toml:"volume_id"` Type string `json:"type" toml:"type"` SnapshotID string `json:"snapshot_id" toml:"snapshot_id"` RestoreVolumeId string `json:"restore_volume_id" toml:"restore_volume_id"` VolumeAZ string `json:"volume_az" toml:"volume_az"` Status string `json:"status" toml:"status"` }
EBSVolume is passed by TiDB deployment tools: TiDB Operator and TiUP(in future) we should do snapshot inside BR, because we need some logic to determine the order of snapshot starts. TODO finish the info with TiDB Operator developer.
type EBSVolumeType ¶ added in v1.4.4
type EBSVolumeType string
TODO: shall this structure be refactor or reserved for future use?
const ( GP3Volume EBSVolumeType = "gp3" IO1Volume EBSVolumeType = "io1" IO2Volume EBSVolumeType = "io2" CloudAPIConcurrency = 3 )
func (EBSVolumeType) Valid ¶ added in v1.4.4
func (t EBSVolumeType) Valid() bool
type EC2Session ¶ added in v1.4.4
func NewEC2Session ¶ added in v1.4.4
func NewEC2Session(concurrency uint) (*EC2Session, error)
func (*EC2Session) AddTags ¶ added in v1.5.0
func (e *EC2Session) AddTags(resourcesTags map[string]TagMap) error
func (*EC2Session) DeleteSnapshots ¶ added in v1.4.4
func (e *EC2Session) DeleteSnapshots(snapIDMap map[string]string) error
type KubernetesBackup ¶ added in v1.4.4
type KubernetesBackup struct { PVCs []*corev1.PersistentVolumeClaim `json:"pvcs"` PVs []*corev1.PersistentVolume `json:"pvs"` TiDBCluster *v1alpha1.TidbCluster `json:"crd_tidb_cluster"` Unstructured *unstructured.Unstructured `json:"options"` }
type MockDriver ¶ added in v1.4.0
type MockDriver struct { driver.Bucket Type v1alpha1.BackupStorageType // contains filtered or unexported fields }
MockDriver implement driver.Bucket
func (*MockDriver) As ¶ added in v1.4.0
func (d *MockDriver) As(i interface{}) bool
func (*MockDriver) Delete ¶ added in v1.4.0
func (d *MockDriver) Delete(_ context.Context, key string) error
func (*MockDriver) ErrorCode ¶ added in v1.4.0
func (d *MockDriver) ErrorCode(err error) gcerrors.ErrorCode
func (*MockDriver) ListPaged ¶ added in v1.4.0
func (d *MockDriver) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error)
func (*MockDriver) SetDelete ¶ added in v1.4.0
func (d *MockDriver) SetDelete(fn func(key string) error)
func (*MockDriver) SetListPaged ¶ added in v1.4.0
func (d *MockDriver) SetListPaged(objs []*driver.ListObject, rerr error)
type ObjectError ¶ added in v1.4.0
type PDComponent ¶ added in v1.4.4
type PDComponent struct {
Replicas int `json:"replicas"`
}
type PageIterator ¶ added in v1.4.0
type PageIterator struct {
// contains filtered or unexported fields
}
PageIterator iterates a page of objects via 'ListIterator'
func (*PageIterator) Next ¶ added in v1.4.0
func (i *PageIterator) Next(ctx context.Context, pageSize int) ([]*blob.ListObject, error)
Next list a page of objects
If err == io.EOF, all objects of bucket have been read. If err == nil, a page of objects have been read. Otherwise, err occurs and return objects have been read.
TODO: use blob.ListPage after upgrade gocloud.dev denpendency to 0.21
type StorageBackend ¶ added in v1.4.0
StorageBackend provide a generic storage backend
func NewStorageBackend ¶ added in v1.4.0
func NewStorageBackend(provider v1alpha1.StorageProvider, cred *StorageCredential) (*StorageBackend, error)
NewStorageBackend creates new storage backend, now supports S3/GCS/Azblob/Local function called by both controller and backup/restore, since BR already has env config in BR pod, cred can be nil.
func (*StorageBackend) AsGCS ¶ added in v1.4.0
func (b *StorageBackend) AsGCS() (*storage.Client, bool)
func (*StorageBackend) BatchDeleteObjects ¶ added in v1.4.0
func (b *StorageBackend) BatchDeleteObjects(ctx context.Context, objs []*blob.ListObject, opt v1alpha1.BatchDeleteOption) *BatchDeleteObjectsResult
BatchDeleteObjects delete multi objects
Depending on storage type, it use function 'BatchDeleteObjectsOfS3' or 'BatchDeleteObjectsConcurrently'
func (*StorageBackend) GetBucket ¶ added in v1.4.0
func (b *StorageBackend) GetBucket() string
GetBucket return bucket name
If provider is S3/GCS/Azblob, return bucket. Otherwise return empty string
func (*StorageBackend) GetPrefix ¶ added in v1.4.0
func (b *StorageBackend) GetPrefix() string
GetPrefix return prefix
func (*StorageBackend) ListPage ¶ added in v1.4.0
func (b *StorageBackend) ListPage(opts *blob.ListOptions) *PageIterator
func (*StorageBackend) StorageType ¶ added in v1.4.0
func (b *StorageBackend) StorageType() v1alpha1.BackupStorageType
type StorageCredential ¶ added in v1.4.0
type StorageCredential struct {
// contains filtered or unexported fields
}
func GetStorageCredential ¶ added in v1.4.0
func GetStorageCredential(ns string, provider v1alpha1.StorageProvider, secretLister corelisterv1.SecretLister) *StorageCredential
type TiDBComponent ¶ added in v1.4.4
type TiDBComponent struct {
Replicas int `json:"replicas"`
}
type TiKVComponent ¶ added in v1.4.4
type WorkerPool ¶ added in v1.4.4
type WorkerPool struct {
// contains filtered or unexported fields
}
WorkerPool contains a pool of workers.
func NewWorkerPool ¶ added in v1.4.4
func NewWorkerPool(limit uint, name string) *WorkerPool
NewWorkerPool returns a WorkPool.
func (*WorkerPool) Apply ¶ added in v1.4.4
func (pool *WorkerPool) Apply(fn taskFunc)
Apply executes a task.
func (*WorkerPool) ApplyOnErrorGroup ¶ added in v1.4.4
func (pool *WorkerPool) ApplyOnErrorGroup(eg *errgroup.Group, fn func() error)
ApplyOnErrorGroup executes a task in an errorgroup.
func (*WorkerPool) ApplyWorker ¶ added in v1.4.4
func (pool *WorkerPool) ApplyWorker() *Worker
ApplyWorker apply a worker.
func (*WorkerPool) IdleCount ¶ added in v1.4.4
func (pool *WorkerPool) IdleCount() int
IdleCount counts how many idle workers in the pool.
func (*WorkerPool) Limit ¶ added in v1.4.4
func (pool *WorkerPool) Limit() int
Limit is the limit of the pool
func (*WorkerPool) RecycleWorker ¶ added in v1.4.4
func (pool *WorkerPool) RecycleWorker(worker *Worker)
RecycleWorker recycle a worker.