volsync

package
v0.0.0-...-14ccd47 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 28, 2025 License: Apache-2.0 Imports: 30 Imported by: 0

Documentation

Index

Constants

View Source
const (
	ManagedClusterAddOnKind    string = "ManagedClusterAddOn"
	ManagedClusterAddOnGroup   string = "addon.open-cluster-management.io"
	ManagedClusterAddOnVersion string = "v1alpha1"

	VolsyncManagedClusterAddOnName string = "volsync" // Needs to have this name
)
View Source
const (
	ServiceExportKind    string = "ServiceExport"
	ServiceExportGroup   string = "multicluster.x-k8s.io"
	ServiceExportVersion string = "v1alpha1"

	VolumeSnapshotKind                     string = "VolumeSnapshot"
	VolumeSnapshotIsDefaultAnnotation      string = "snapshot.storage.kubernetes.io/is-default-class"
	VolumeSnapshotIsDefaultAnnotationValue string = "true"

	PodVolumePVCClaimIndexName    string = "spec.volumes.persistentVolumeClaim.claimName"
	VolumeAttachmentToPVIndexName string = "spec.source.persistentVolumeName"

	VRGOwnerNameLabel      string = "volumereplicationgroups-owner"
	VRGOwnerNamespaceLabel string = "volumereplicationgroups-owner-namespace"

	FinalSyncTriggerString           string = "vrg-final-sync"
	PrepareForFinalSyncTriggerString string = "PREPARE-FOR-FINAL-SYNC-STOP-SCHEDULING"

	SchedulingIntervalMinLength int = 2
	CronSpecMaxDayOfMonth       int = 28

	VolSyncDoNotDeleteLabel    = "volsync.backube/do-not-delete" // TODO: point to volsync constant once it is available
	VolSyncDoNotDeleteLabelVal = "true"

	// See: https://issues.redhat.com/browse/ACM-1256
	// https://github.com/stolostron/backlog/issues/21824
	ACMAppSubDoNotDeleteAnnotation    = "apps.open-cluster-management.io/do-not-delete"
	ACMAppSubDoNotDeleteAnnotationVal = "true"

	OwnerNameAnnotation      = "ramendr.openshift.io/owner-name"
	OwnerNamespaceAnnotation = "ramendr.openshift.io/owner-namespace"

	// StorageClass label
	StorageIDLabel = "ramendr.openshift.io/storageid"

	PVAnnotationRetentionKey   = "volumereplicationgroups.ramendr.openshift.io/volsync-retained"
	PVAnnotationRetentionValue = "retained"

	PVCFinalizerProtected = "volumereplicationgroups.ramendr.openshift.io/pvc-volsync-protection"
)

Variables

View Source
var DefaultRsyncServiceType corev1.ServiceType = corev1.ServiceTypeClusterIP
View Source
var DefaultScheduleCronSpec = "*/10 * * * *" // Every 10 mins

Functions

func CleanupSecretPropagation

func CleanupSecretPropagation(ctx context.Context, k8sClient client.Client,
	ownerObject metav1.Object, log logr.Logger,
) error

Cleans up policy, placementrule and placementbinding used to replicate the volsync secret (if they exist) does not throw an error if they do not exist

func ConvertSchedulingIntervalToCronSpec

func ConvertSchedulingIntervalToCronSpec(schedulingInterval string) (*string, error)

Convert from schedulingInterval which is in the format of <num><m,h,d> to the format VolSync expects, which is cronspec: https://en.wikipedia.org/wiki/Cron#Overview

func DeployVolSyncToCluster

func DeployVolSyncToCluster(ctx context.Context, k8sClient client.Client,
	managedClusterName string, log logr.Logger,
) error

Function to deploy Volsync from ACM to managed cluster via a ManagedClusterAddOn

Calling this function requires a clusterrole that can create/update ManagedClusterAddOns

Should be called from the Hub

func GetVolSyncPSKSecretNameFromVRGName

func GetVolSyncPSKSecretNameFromVRGName(vrgName string) string

func PropagateSecretToClusters

func PropagateSecretToClusters(ctx context.Context, k8sClient client.Client, sourceSecret *corev1.Secret,
	ownerObject metav1.Object, destClusters []string, destSecretName, destSecretNamespace string,
	log logr.Logger,
) error

Should be run from a hub - assumes the source secret exists on the hub cluster and should be propagated to destClusters. Creates Policy/PlacementRule/PlacementBinding on the hub in the same namespace as the source secret

func RDStatusReady

func RDStatusReady(rd *volsyncv1alpha1.ReplicationDestination, log logr.Logger) bool

For ReplicationDestination - considered ready when a sync has completed - rsync address should be filled out in the status - latest image should be set properly in the status (at least one sync cycle has completed and we have a snapshot)

func ReconcileVolSyncReplicationSecret

func ReconcileVolSyncReplicationSecret(ctx context.Context, k8sClient client.Client, ownerObject metav1.Object,
	secretName, secretNamespace string, log logr.Logger) (*corev1.Secret, error,
)

Creates a new volsync replication secret on the cluster (should be called on the hub cluster). If the secret already exists, nop

func ValidateObjectExists

func ValidateObjectExists(ctx context.Context, c client.Client, obj client.Object) error

ValidateObjectExists indicates whether a kubernetes resource exists in APIServer

Types

type VSHandler

type VSHandler struct {
	// contains filtered or unexported fields
}

func NewVSHandler

func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, owner metav1.Object,
	asyncSpec *ramendrv1alpha1.VRGAsyncSpec, defaultCephFSCSIDriverName string, copyMethod string,
	adminNamespaceVRG bool,
) *VSHandler

func (*VSHandler) CleanupLocalResources

func (v *VSHandler) CleanupLocalResources(lrs *volsyncv1alpha1.ReplicationSource) error

func (*VSHandler) CleanupRDNotInSpecList

func (v *VSHandler) CleanupRDNotInSpecList(rdSpecList []ramendrv1alpha1.VolSyncReplicationDestinationSpec) error

func (*VSHandler) DeleteLocalRD

func (v *VSHandler) DeleteLocalRD(lrdName, lrdNamespace string) error

func (*VSHandler) DeleteRD

func (v *VSHandler) DeleteRD(pvcName string, pvcNamespace string) error

func (*VSHandler) DeleteRS

func (v *VSHandler) DeleteRS(pvcName string, pvcNamespace string) error

func (*VSHandler) DeleteSnapshots

func (v *VSHandler) DeleteSnapshots(pvcNamespace string) error

func (*VSHandler) DisownVolSyncManagedPVC

func (v *VSHandler) DisownVolSyncManagedPVC(pvc *corev1.PersistentVolumeClaim) error

func (*VSHandler) EnsurePVCforDirectCopy

func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context,
	rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec,
) error

func (*VSHandler) EnsurePVCfromRD

func (v *VSHandler) EnsurePVCfromRD(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, failoverAction bool,
) error

func (*VSHandler) GetVolumeSnapshotClassFromPVCStorageClass

func (v *VSHandler) GetVolumeSnapshotClassFromPVCStorageClass(storageClassName *string) (string, error)

func (*VSHandler) GetVolumeSnapshotClasses

func (v *VSHandler) GetVolumeSnapshotClasses() ([]snapv1.VolumeSnapshotClass, error)

func (*VSHandler) GetWorkloadStatus

func (v *VSHandler) GetWorkloadStatus() string

func (*VSHandler) IsActiveJobPresent

func (v *VSHandler) IsActiveJobPresent(name, namespace string) (bool, error)

func (*VSHandler) IsCopyMethodDirect

func (v *VSHandler) IsCopyMethodDirect() bool

func (*VSHandler) IsRDDataProtected

func (v *VSHandler) IsRDDataProtected(pvcName, pvcNamespace string) (bool, error)

Returns true if at least one sync has completed (we'll consider this "data protected")

func (*VSHandler) IsRSDataProtected

func (v *VSHandler) IsRSDataProtected(pvcName, pvcNamespace string) (bool, error)

func (*VSHandler) ModifyRSSpecForCephFS

func (v *VSHandler) ModifyRSSpecForCephFS(rsSpec *ramendrv1alpha1.VolSyncReplicationSourceSpec,
	storageClass *storagev1.StorageClass,
)

Workaround for cephfs issue: FIXME: For CephFS only, there is a problem where restoring a PVC from snapshot can be very slow when there are a lot of files - on every replication cycle we need to create a PVC from snapshot in order to get a point-in-time copy of the source PVC to sync with the replicationdestination. If CephFS PVC, modify rsSpec AccessModes to use 'ReadOnlyMany'.

func (*VSHandler) PrecreateDestPVCIfEnabled

func (v *VSHandler) PrecreateDestPVCIfEnabled(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec,
) (*string, error)

func (*VSHandler) PreparePVC

func (v *VSHandler) PreparePVC(pvcNamespacedName types.NamespacedName,
	copyMethodDirect,
	prepFinalSync,
	runFinalSync bool,
) error

func (*VSHandler) ReconcileRD

returns replication destination only if create/update is successful and the RD is considered available. Callers should assume getting a nil replication destination back means they should retry/requeue.

func (*VSHandler) ReconcileRS

Returns true only if runFinalSync is true and the final sync is done Returns replication source only if create/update is successful Callers should assume getting a nil replication source back means they should retry/requeue. Returns true/false if final sync is complete, and also returns an RS if one was reconciled.

func (*VSHandler) ReconcileServiceExportForRD

func (v *VSHandler) ReconcileServiceExportForRD(rd *volsyncv1alpha1.ReplicationDestination) error

Make sure a ServiceExport exists to export the service for this RD to remote clusters See: https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/ 2.4/html/services/services-overview#enable-service-discovery-submariner

func (*VSHandler) ReleasePVCOwnership

func (v *VSHandler) ReleasePVCOwnership(pvcNamespacedName types.NamespacedName) (*corev1.PersistentVolumeClaim, error)

func (*VSHandler) TakePVCOwnership

func (v *VSHandler) TakePVCOwnership(pvcNamespacedName types.NamespacedName) (bool, error)

TakePVCOwnership adds do-not-delete annotation to indicate that ACM should not delete/cleanup this pvc when the appsub is removed and adds VRG as owner so the PVC is garbage collected when the VRG is deleted.

func (*VSHandler) ValidateSecretAndAddVRGOwnerRef

func (v *VSHandler) ValidateSecretAndAddVRGOwnerRef(secretName string) (bool, error)

func (*VSHandler) ValidateSnapshotAndEnsurePVC

func (v *VSHandler) ValidateSnapshotAndEnsurePVC(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec,
	snapshotRef corev1.TypedLocalObjectReference, failoverAction bool,
) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL