controller

package
v1.16.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 17, 2024 License: Apache-2.0 Imports: 55 Imported by: 50

Documentation

Overview

Copyright 2024 The Rook Authors. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Package config to provide conditions for CephCluster

Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons

Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons

Index

Constants

View Source
const (
	CleanupAppName              = "resource-cleanup"
	RESOURCE_CLEANUP_ANNOTATION = "rook.io/force-deletion"

	// CephFSSubVolumeGroup env resources
	CephFSSubVolumeGroupNameEnv = "SUB_VOLUME_GROUP_NAME"
	CephFSNameEnv               = "FILESYSTEM_NAME"
	CSICephFSRadosNamesaceEnv   = "CSI_CEPHFS_RADOS_NAMESPACE"
	CephFSMetaDataPoolNameEnv   = "METADATA_POOL_NAME"

	// cephblockpoolradosnamespace env resources
	CephBlockPoolNameEnv           = "BLOCKPOOL_NAME"
	CephBlockPoolRadosNamespaceEnv = "RADOS_NAMESPACE"
)
View Source
const (
	// OperatorCreds is the name of the secret
	//nolint:gosec // since this is not leaking any hardcoded credentials, it's just the secret name
	OperatorCreds = "rook-ceph-operator-creds"

	MonSecretNameKey = "mon-secret"
	// AdminSecretName is the name of the admin secret
	AdminSecretNameKey = "admin-secret"
	CephUsernameKey    = "ceph-username"
	CephUserSecretKey  = "ceph-secret"
	// EndpointConfigMapName is the name of the configmap with mon endpoints
	EndpointConfigMapName = "rook-ceph-mon-endpoints"
	// EndpointDataKey is the name of the key inside the mon configmap to get the endpoints
	EndpointDataKey = "data"
	// OutOfQuorumKey is the name of the key for tracking mons detected out of quorum
	OutOfQuorumKey = "outOfQuorum"
	// MaxMonIDKey is the name of the max mon id used
	MaxMonIDKey = "maxMonId"
	// MappingKey is the name of the mapping for the mon->node and node->port
	MappingKey = "mapping"
	// AppName is the name of the secret storing cluster mon.admin key, fsid and name
	AppName                         = "rook-ceph-mon"
	DisasterProtectionFinalizerName = cephv1.CustomResourceGroup + "/disaster-protection"
)
View Source
const (
	// OperatorSettingConfigMapName refers to ConfigMap that configures rook ceph operator
	OperatorSettingConfigMapName string = "rook-ceph-operator-config"

	// UninitializedCephConfigError refers to the error message printed by the Ceph CLI when there is no ceph configuration file
	// This typically is raised when the operator has not finished initializing
	UninitializedCephConfigError = "error calling conf_read_file"

	// OperatorNotInitializedMessage is the message we print when the Operator is not ready to reconcile, typically the ceph.conf has not been generated yet
	OperatorNotInitializedMessage = "skipping reconcile since operator is still initializing"
)
View Source
const (

	//nolint:gosec // since this is not leaking any hardcoded credentials, it's just the prefix of the secret name
	RBDMirrorBootstrapPeerSecretName = "rbdMirrorBootstrapPeerSecretName"
	//nolint:gosec // since this is not leaking any hardcoded credentials, it's just the prefix of the secret name
	FSMirrorBootstrapPeerSecretName = "fsMirrorBootstrapPeerSecretName"
)
View Source
const (
	// ConfigInitContainerName is the name which is given to the config initialization container
	// in all Ceph pods.
	ConfigInitContainerName = "config-init"

	DaemonIDLabel = "ceph_daemon_id"

	ExternalMgrAppName        = "rook-ceph-mgr-external"
	ExternalCephExporterName  = "rook-ceph-exporter-external"
	ServiceExternalMetricName = "http-external-metrics"
	CephUserID                = int64(167)
)
View Source
const (
	// CephVersionLabelKey is the key used for reporting the Ceph version which Rook has detected is
	// configured for the labeled resource.
	CephVersionLabelKey = "ceph-version"
)
View Source
const (
	DoNotReconcileLabelName = "do_not_reconcile"
)

Variables

View Source
var (
	// ImmediateRetryResult Return this for a immediate retry of the reconciliation loop with the same request object.
	ImmediateRetryResult = reconcile.Result{Requeue: true}

	// ImmediateRetryResultNoBackoff Return this for a immediate retry of the reconciliation loop with the same request object.
	// Override the exponential backoff behavior by setting the RequeueAfter time explicitly.
	ImmediateRetryResultNoBackoff = reconcile.Result{Requeue: true, RequeueAfter: time.Second}

	// WaitForRequeueIfCephClusterNotReady waits for the CephCluster to be ready
	WaitForRequeueIfCephClusterNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}

	// WaitForRequeueIfCephClusterIsUpgrading waits until the upgrade is complete
	WaitForRequeueIfCephClusterIsUpgrading = reconcile.Result{Requeue: true, RequeueAfter: time.Minute}

	// WaitForRequeueIfFinalizerBlocked waits for resources to be cleaned up before the finalizer can be removed
	WaitForRequeueIfFinalizerBlocked = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}

	// WaitForRequeueIfOperatorNotInitialized waits for resources to be cleaned up before the finalizer can be removed
	WaitForRequeueIfOperatorNotInitialized = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}

	// OperatorCephBaseImageVersion is the ceph version in the operator image
	OperatorCephBaseImageVersion string
)
View Source
var (
	ClusterInfoNoClusterNoSecret = errors.New("not expected to create new cluster info and did not find existing secret")
)
View Source
var ClusterResource = k8sutil.CustomResource{
	Name:       "cephcluster",
	Plural:     "cephclusters",
	Group:      cephv1.CustomResourceGroup,
	Version:    cephv1.Version,
	Kind:       reflect.TypeOf(cephv1.CephCluster{}).Name(),
	APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version),
}

ClusterResource operator-kit Custom Resource Definition

Functions

func AddCephVersionLabelToDaemonSet

func AddCephVersionLabelToDaemonSet(cephVersion version.CephVersion, d *apps.DaemonSet)

AddCephVersionLabelToDaemonSet adds a label reporting the Ceph version which Rook has detected is running in the DaemonSet's pods.

func AddCephVersionLabelToDeployment

func AddCephVersionLabelToDeployment(cephVersion version.CephVersion, d *apps.Deployment)

AddCephVersionLabelToDeployment adds a label reporting the Ceph version which Rook has detected is running in the Deployment's pods.

func AddCephVersionLabelToJob

func AddCephVersionLabelToJob(cephVersion version.CephVersion, j *batch.Job)

AddCephVersionLabelToJob adds a label reporting the Ceph version which Rook has detected is running in the Job's pods.

func AddCephVersionLabelToObjectMeta

func AddCephVersionLabelToObjectMeta(cephVersion version.CephVersion, meta *metav1.ObjectMeta)

func AddFinalizerIfNotPresent

func AddFinalizerIfNotPresent(ctx context.Context, client client.Client, obj client.Object) error

AddFinalizerIfNotPresent adds a finalizer an object to avoid instant deletion of the object without finalizing it.

func AddVolumeMountSubPath

func AddVolumeMountSubPath(podSpec *v1.PodSpec, volumeMountName string)

AddVolumeMountSubPath updates each init and regular container of the podspec such that each volume mount attached to a container is mounted under a subpath in the source volume. This is important because some daemons may not start if the volume mount directory is non-empty. When the volume is the root of an ext4 file system, one may find a "lost+found" directory.

func AdminFlags

func AdminFlags(cluster *client.ClusterInfo) []string

AdminFlags returns the command line flags used for Ceph commands requiring admin authentication.

func AppLabels

func AppLabels(appName, namespace string) map[string]string

AppLabels returns labels common for all Rook-Ceph applications which may be useful for admins. App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc.

func ApplyCephNetworkSettings added in v1.12.4

func ApplyCephNetworkSettings(
	ctx context.Context,
	rookImage string,
	clusterdContext *clusterd.Context,
	clusterSpec *cephv1.ClusterSpec,
	clusterInfo *cephclient.ClusterInfo,
) error

func ApplyNetworkEnv added in v1.12.4

func ApplyNetworkEnv(cephClusterSpec *cephv1.ClusterSpec) []v1.EnvVar

func CephDaemonAppLabels added in v1.4.0

func CephDaemonAppLabels(appName, namespace, daemonType, daemonID, parentName, resourceKind string, includeNewLabels bool) map[string]string

CephDaemonAppLabels returns pod labels common to all Rook-Ceph pods which may be useful for admins. App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc Daemon type is the Ceph daemon type: "mon", "mgr", "osd", "mds", "rgw" Daemon ID is the ID portion of the Ceph daemon name: "a" for "mon.a"; "c" for "mds.c" ParentName is the resource metadata.name: "rook-ceph", "my-cluster", etc ResourceKind is the CR type: "CephCluster", "CephFilesystem", etc

func CephSecurityContext added in v1.9.13

func CephSecurityContext() *v1.SecurityContext

PodSecurityContext detects if the pod needs privileges to run

func CephVolumeMounts

func CephVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount

CephVolumeMounts returns the common list of Kubernetes volume mounts for Ceph containers. This function is only used for OSDs.

func CheckPodMemory

func CheckPodMemory(name string, resources v1.ResourceRequirements, cephPodMinimumMemory uint64) error

CheckPodMemory verify pod's memory limit is valid

func ChownCephDataDirsInitContainer

func ChownCephDataDirsInitContainer(
	dpm config.DataPathMap,
	containerImage string,
	containerImagePullPolicy v1.PullPolicy,
	volumeMounts []v1.VolumeMount,
	resources v1.ResourceRequirements,
	securityContext *v1.SecurityContext,
	configDir string,
) v1.Container

ChownCephDataDirsInitContainer returns an init container which `chown`s the given data directories as the `ceph:ceph` user in the container. It also `chown`s the Ceph log dir in the container automatically. Doing a chown in a post start lifecycle hook does not reliably complete before the OSD process starts, which can cause the pod to fail without the lifecycle hook's chown command completing. It can take an arbitrarily long time for a pod restart to successfully chown the directory. This is a race condition for all daemons; therefore, do this in an init container. See more discussion here: https://github.com/rook/rook/pull/3594#discussion_r312279176

func ClusterOwnerRef added in v1.4.0

func ClusterOwnerRef(clusterName, clusterID string) metav1.OwnerReference

ClusterOwnerRef represents the owner reference of the CephCluster CR

func ConfGeneratedInPodVolumeAndMount added in v1.5.0

func ConfGeneratedInPodVolumeAndMount() (v1.Volume, v1.VolumeMount)

ConfGeneratedInPodVolumeAndMount generate an empty dir of /etc/ceph

func ConfigureExternalMetricsEndpoint added in v1.6.3

func ConfigureExternalMetricsEndpoint(ctx *clusterd.Context, monitoringSpec cephv1.MonitoringSpec, clusterInfo *client.ClusterInfo, ownerInfo *k8sutil.OwnerInfo) error

func ContainerEnvVarReference

func ContainerEnvVarReference(envVarName string) string

ContainerEnvVarReference returns a reference to a Kubernetes container env var of the given name which can be used in command or argument fields.

func CreateBootstrapPeerSecret added in v1.7.1

func CreateBootstrapPeerSecret(ctx *clusterd.Context, clusterInfo *cephclient.ClusterInfo, object client.Object, ownerInfo *k8sutil.OwnerInfo) (reconcile.Result, error)

func CreateOrUpdateObject

func CreateOrUpdateObject(ctx context.Context, client client.Client, obj client.Object) error

CreateOrUpdateObject updates an object with a given status

func CurrentAndDesiredCephVersion added in v1.8.0

func CurrentAndDesiredCephVersion(ctx context.Context, rookImage, namespace, jobName string, ownerInfo *k8sutil.OwnerInfo, context *clusterd.Context, cephClusterSpec *cephv1.ClusterSpec, clusterInfo *cephclient.ClusterInfo) (*cephver.CephVersion, *cephver.CephVersion, error)

func DaemonEnvVars

func DaemonEnvVars(cephClusterSpec *cephv1.ClusterSpec) []v1.EnvVar

DaemonEnvVars returns the container environment variables used by all Ceph daemons.

func DaemonFlags

func DaemonFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec, daemonID string) []string

DaemonFlags returns the command line flags used by all Ceph daemons.

func DaemonVolumeMounts

func DaemonVolumeMounts(dataPaths *config.DataPathMap, keyringResourceName string, dataDirHostPath string) []v1.VolumeMount

DaemonVolumeMounts returns volume mounts which correspond to the DaemonVolumes. These volume mounts are shared by most all Ceph daemon containers, both init and standard. If keyring resource name is empty, there will be no keyring mounted in the container.

func DaemonVolumes

func DaemonVolumes(dataPaths *config.DataPathMap, keyringResourceName string, dataDirHostPath string) []v1.Volume

DaemonVolumes returns the pod volumes used by all Ceph daemons. If keyring resource name is empty, there will be no keyring volume created from a secret.

func DaemonVolumesBase

func DaemonVolumesBase(dataPaths *config.DataPathMap, keyringResourceName string, dataDirHostPath string) []v1.Volume

DaemonVolumesBase returns the common / static set of volumes.

func DaemonVolumesContainsPVC

func DaemonVolumesContainsPVC(volumes []v1.Volume) bool

DaemonVolumesContainsPVC returns true if a volume exists with a volume source configured with a persistent volume claim.

func DaemonVolumesDataHostPath

func DaemonVolumesDataHostPath(dataPaths *config.DataPathMap) []v1.Volume

DaemonVolumesDataHostPath returns HostPath volume source for daemon container data.

func DaemonVolumesDataPVC

func DaemonVolumesDataPVC(pvcName string) v1.Volume

DaemonVolumesDataPVC returns a PVC volume source for daemon container data.

func DetectCephVersion added in v1.8.0

func DetectCephVersion(ctx context.Context, rookImage, namespace, jobName string, ownerInfo *k8sutil.OwnerInfo, clientset kubernetes.Interface, cephClusterSpec *cephv1.ClusterSpec) (*cephver.CephVersion, error)

DetectCephVersion loads the ceph version from the image and checks that it meets the version requirements to run in the cluster

func DiscoveryDaemonEnabled added in v1.6.0

func DiscoveryDaemonEnabled(data map[string]string) bool

func DuplicateCephClusters added in v1.8.2

func DuplicateCephClusters(ctx context.Context, c client.Client, object client.Object, log bool) bool

DuplicateCephClusters determine whether a similar object exists in the same namespace mainly used for the CephCluster which we only support a single instance per namespace

func EnforceHostNetwork added in v1.15.2

func EnforceHostNetwork() bool

func ErrorCephUpgradingRequeue added in v1.8.0

func ErrorCephUpgradingRequeue(runningCephVersion, desiredCephVersion *cephver.CephVersion) error

func ExtractCephVersionFromLabel

func ExtractCephVersionFromLabel(labelVersion string) (*version.CephVersion, error)

ExtractCephVersionFromLabel returns a CephVersion struct deserialized from a provided version label.

func ExtractKey added in v1.9.2

func ExtractKey(contents string) (string, error)

ExtractKey retrieves mon secret key from the keyring file

func ForceDeleteRequested added in v1.14.1

func ForceDeleteRequested(annotations map[string]string) bool

ForceDeleteRequested returns true if `rook.io/force-deletion:true` annotation is available on the resource

func GenerateBootstrapPeerSecret added in v1.7.1

func GenerateBootstrapPeerSecret(object client.Object, token []byte) *v1.Secret

GenerateBootstrapPeerSecret generates a Kubernetes Secret for the mirror bootstrap peer token

func GenerateLivenessProbeExecDaemon

func GenerateLivenessProbeExecDaemon(daemonType, daemonID string) *v1.Probe

GenerateLivenessProbeExecDaemon generates a liveness probe that makes sure a daemon has a socket, that it can be called, and that it returns 0

func GenerateLivenessProbeTcpPort added in v1.12.8

func GenerateLivenessProbeTcpPort(port, failureThreshold int32) *v1.Probe

GenerateLivenessProbeTcpPort generates a liveness probe that makes sure a daemon has TCP a socket binded to specific port, and may create new connection.

func GenerateLivenessProbeViaRpcinfo added in v1.12.8

func GenerateLivenessProbeViaRpcinfo(port uint16, failureThreshold int32) *v1.Probe

GenerateLivenessProbeViaRpcinfo creates a liveness probe using 'rpcinfo' shell command which checks that the local NFS daemon has TCP a socket binded to specific port, and it has valid reply to NULL RPC request.

func GenerateMinimalCephConfInitContainer

func GenerateMinimalCephConfInitContainer(
	username, keyringPath string,
	containerImage string,
	containerImagePullPolicy v1.PullPolicy,
	volumeMounts []v1.VolumeMount,
	resources v1.ResourceRequirements,
	securityContext *v1.SecurityContext,
) v1.Container

GenerateMinimalCephConfInitContainer returns an init container that will generate the most basic Ceph config for connecting non-Ceph daemons to a Ceph cluster (e.g., nfs-ganesha). Effectively what this means is that it generates '/etc/ceph/ceph.conf' with 'mon_host' populated and a keyring path associated with the user given. 'mon_host' is determined by the 'ROOK_CEPH_MON_HOST' env var present in other Ceph daemon pods, and the keyring is expected to be mounted into the container with a Kubernetes pod volume+mount.

func GenerateStartupProbeExecDaemon added in v1.8.2

func GenerateStartupProbeExecDaemon(daemonType, daemonID string) *v1.Probe

GenerateStartupProbeExecDaemon generates a startup probe that makes sure a daemon has a socket, that it can be called, and that it returns 0

func GenerateStatusInfo added in v1.7.1

func GenerateStatusInfo(object client.Object) map[string]string

func GetCephVersionLabel

func GetCephVersionLabel(cephVersion version.CephVersion) string

GetCephVersionLabel returns a formatted serialization of a provided CephVersion for use in resource labels.

func GetContainerImagePullPolicy added in v1.10.3

func GetContainerImagePullPolicy(containerImagePullPolicy v1.PullPolicy) v1.PullPolicy

func GetDaemonsToSkipReconcile added in v1.13.0

func GetDaemonsToSkipReconcile(ctx context.Context, clusterd *clusterd.Context, namespace, daemonName, label string) (sets.Set[string], error)

func GetImageVersion added in v1.3.6

func GetImageVersion(cephCluster cephv1.CephCluster) (*cephver.CephVersion, error)

GetImageVersion returns the CephVersion registered for a specified image (if any) and whether any image was found.

func GetLogRotateConfig added in v1.15.0

func GetLogRotateConfig(c cephv1.ClusterSpec) (resource.Quantity, string)

func HostPathRequiresPrivileged added in v1.7.8

func HostPathRequiresPrivileged() bool

func IsDoNotReconcile added in v1.4.8

func IsDoNotReconcile(labels map[string]string) bool

func IsReadyToReconcile

func IsReadyToReconcile(ctx context.Context, c client.Client, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result)

IsReadyToReconcile determines if a controller is ready to reconcile or not

func LogCollectorContainer added in v1.5.2

func LogCollectorContainer(daemonID, ns string, c cephv1.ClusterSpec, additionalLogFiles ...string) *v1.Container

LogCollectorContainer rotate logs

func LoopDevicesAllowed added in v1.10.6

func LoopDevicesAllowed() bool

func NetworkBindingFlags added in v1.6.0

func NetworkBindingFlags(cluster *client.ClusterInfo, spec *cephv1.ClusterSpec) []string

func ObjectToCRMapper added in v1.4.0

func ObjectToCRMapper(ctx context.Context, c client.Client, ro runtime.Object, scheme *runtime.Scheme) (handler.MapFunc, error)

ObjectToCRMapper returns the list of a given object type metadata It is used to trigger a reconcile object Kind A when watching object Kind B So we reconcile Kind A instead of Kind B For instance, we watch for CephCluster CR changes but want to reconcile CephFilesystem based on a Spec change

func ParseMonEndpoints added in v1.9.2

func ParseMonEndpoints(input string) map[string]*cephclient.MonInfo

ParseMonEndpoints parses a flattened representation of mons and endpoints in the form <mon-name>=<mon-endpoint> and returns a list of Ceph mon configs.

func PodSecurityContext added in v1.5.2

func PodSecurityContext() *v1.SecurityContext

PodSecurityContext detects if the pod needs privileges to run

func PodVolumes

func PodVolumes(dataPaths *config.DataPathMap, dataDirHostPath string, exporterHostPath string, confGeneratedInPod bool) []v1.Volume

PodVolumes fills in the volumes parameter with the common list of Kubernetes volumes for use in Ceph pods. This function is only used for OSDs.

func PopulateExternalClusterInfo added in v1.9.2

func PopulateExternalClusterInfo(cephClusterSpec *cephv1.ClusterSpec, context *clusterd.Context, ctx context.Context, namespace string, ownerInfo *k8sutil.OwnerInfo) (*cephclient.ClusterInfo, error)

PopulateExternalClusterInfo Add validation in the code to fail if the external cluster has no OSDs keep waiting

func PrivilegedContext added in v1.8.1

func PrivilegedContext(runAsRoot bool) *v1.SecurityContext

PrivilegedContext returns a privileged Pod security context

func ReloadManager added in v1.8.0

func ReloadManager()

func RemoveFinalizer

func RemoveFinalizer(ctx context.Context, client client.Client, obj client.Object) error

RemoveFinalizer removes a finalizer from an object

func RemoveFinalizerWithName added in v1.8.0

func RemoveFinalizerWithName(ctx context.Context, client client.Client, obj client.Object, finalizerName string) error

RemoveFinalizerWithName removes finalizer passed as an argument from an object

func RevisionHistoryLimit added in v1.15.3

func RevisionHistoryLimit() *int32

func RgwOpsLogSidecarContainer added in v1.16.0

func RgwOpsLogSidecarContainer(opsLogFile, ns string, c cephv1.ClusterSpec, Resources v1.ResourceRequirements) *v1.Container

rgw operations will be logged in sidecar ops-log

func RookVolumeMounts

func RookVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount

RookVolumeMounts returns the common list of Kubernetes volume mounts for Rook containers. This function is only used by OSDs.

func SetAllowLoopDevices added in v1.10.6

func SetAllowLoopDevices(data map[string]string)

func SetCephCommandsTimeout added in v1.7.1

func SetCephCommandsTimeout(data map[string]string)

SetCephCommandsTimeout sets the timeout value of Ceph commands which are executed from Rook

func SetEnforceHostNetwork added in v1.15.2

func SetEnforceHostNetwork(data map[string]string)

func SetRevisionHistoryLimit added in v1.15.3

func SetRevisionHistoryLimit(data map[string]string)

func StoredLogAndCrashVolume

func StoredLogAndCrashVolume(hostLogDir, hostCrashDir string) []v1.Volume

StoredLogAndCrashVolume returns a pod volume sourced from the stored log and crashes files.

func StoredLogAndCrashVolumeMount

func StoredLogAndCrashVolumeMount(varLogCephDir, varLibCephCrashDir string) []v1.VolumeMount

StoredLogAndCrashVolumeMount returns a pod volume sourced from the stored log and crashes files.

func UpdateClusterCondition added in v1.5.9

func UpdateClusterCondition(c *clusterd.Context, cluster *cephv1.CephCluster, namespaceName types.NamespacedName, observedGeneration int64, conditionType cephv1.ConditionType, status v1.ConditionStatus,
	reason cephv1.ConditionReason, message string, preserveAllConditions bool)

UpdateClusterCondition function will export each condition into the cluster custom resource

func UpdateCondition added in v1.5.9

func UpdateCondition(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, observedGeneration int64, conditionType cephv1.ConditionType, status v1.ConditionStatus, reason cephv1.ConditionReason, message string)

UpdateCondition function will export each condition into the cluster custom resource

func UpdateMonsOutOfQuorum added in v1.10.6

func UpdateMonsOutOfQuorum(clientset kubernetes.Interface, namespace string, monsOutOfQuorum []string) error

func ValidateCephVersionsBetweenLocalAndExternalClusters

func ValidateCephVersionsBetweenLocalAndExternalClusters(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) (cephver.CephVersion, error)

ValidateCephVersionsBetweenLocalAndExternalClusters makes sure an external cluster can be connected by checking the external ceph versions available and comparing it with the local image provided

func ValidatePeerToken added in v1.7.1

func ValidatePeerToken(object client.Object, data map[string][]byte) error

func WatchControllerPredicate

func WatchControllerPredicate() predicate.Funcs

WatchControllerPredicate is a special update filter for update events do not reconcile if the status changes, this avoids a reconcile storm loop

returning 'true' means triggering a reconciliation returning 'false' means do NOT trigger a reconciliation

func WatchPredicateForNonCRDObject

func WatchPredicateForNonCRDObject(owner runtime.Object, scheme *runtime.Scheme) predicate.Funcs

WatchPredicateForNonCRDObject is a special filter for create events It only applies to non-CRD objects, meaning, for instance a cephv1.CephBlockPool{} object will not have this filter Only for objects like &v1.Secret{} etc...

We return 'false' on a create event so we don't overstep with the main watcher on cephv1.CephBlockPool{} This avoids a double reconcile when the secret gets deleted.

Types

type ClusterHealth added in v1.8.8

type ClusterHealth struct {
	InternalCtx    context.Context
	InternalCancel context.CancelFunc
}

ClusterHealth is passed to the various monitoring go routines to stop them when the context is cancelled

type Mapping added in v1.9.2

type Mapping struct {
	// This isn't really node info since it could also be for zones, but we leave it as "node" for backward compatibility.
	Schedule map[string]*MonScheduleInfo `json:"node"`
}

Mapping is mon node and port mapping

func CreateOrLoadClusterInfo added in v1.9.2

func CreateOrLoadClusterInfo(clusterdContext *clusterd.Context, context context.Context, namespace string, ownerInfo *k8sutil.OwnerInfo, cephClusterSpec *cephv1.ClusterSpec) (*cephclient.ClusterInfo, int, *Mapping, error)

CreateOrLoadClusterInfo constructs or loads a clusterinfo and returns it along with the maxMonID

func LoadClusterInfo added in v1.9.2

func LoadClusterInfo(ctx *clusterd.Context, context context.Context, namespace string, cephClusterSpec *cephv1.ClusterSpec) (*cephclient.ClusterInfo, int, *Mapping, error)

LoadClusterInfo constructs or loads a clusterinfo and returns it along with the maxMonID

type MonScheduleInfo added in v1.9.2

type MonScheduleInfo struct {
	// Name of the node. **json names are capitalized for backwards compat**
	Name     string `json:"Name,omitempty"`
	Hostname string `json:"Hostname,omitempty"`
	Address  string `json:"Address,omitempty"`
	Zone     string `json:"zone,omitempty"`
}

MonScheduleInfo contains name and address of a node.

type OperatorConfig added in v1.8.0

type OperatorConfig struct {
	OperatorNamespace string
	Image             string
	ServiceAccount    string
	NamespaceToWatch  string
	Parameters        map[string]string
}

OperatorConfig represents the configuration of the operator

type OwnerMatcher

type OwnerMatcher struct {
	// contains filtered or unexported fields
}

OwnerMatcher is a struct representing the controller owner reference to use for comparison with child objects

func NewOwnerReferenceMatcher

func NewOwnerReferenceMatcher(owner runtime.Object, scheme *runtime.Scheme) (*OwnerMatcher, error)

NewOwnerReferenceMatcher initializes a new owner reference matcher

func (*OwnerMatcher) Match

func (e *OwnerMatcher) Match(object runtime.Object) (bool, metav1.Object, error)

Match checks whether a given object matches the parent controller owner reference It is used in the predicate functions for non-CRD objects to ensure we only watch resources that have the parent Kind in its owner reference AND the same UID

So we won't reconcile other object is we have multiple CRs

For example, for CephObjectStore we will only watch "secrets" that have an owner reference referencing the 'CephObjectStore' Kind

type ResourceCleanup added in v1.14.1

type ResourceCleanup struct {
	// contains filtered or unexported fields
}

ResourceCleanup defines an rook ceph resource to be cleaned up

func NewResourceCleanup added in v1.14.1

func NewResourceCleanup(obj k8sClient.Object, cluster *cephv1.CephCluster, rookImage string, config map[string]string) *ResourceCleanup

func (*ResourceCleanup) StartJob added in v1.14.1

func (c *ResourceCleanup) StartJob(ctx context.Context, clientset kubernetes.Interface, jobName string) error

Start a new job to perform clean up of the ceph resources. It returns true if the cleanup job has succeeded

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL