Documentation ¶
Overview ¶
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package config provides methods for generating the Ceph config for a Ceph cluster and for producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible flags.
Index ¶
- Constants
- Variables
- func DriveGroupPlacementMatchesNode(dg cephv1.DriveGroup, n *v1.Node) (bool, error)
- func DriveGroupsWithPlacementMatchingNode(dgs cephv1.DriveGroupsSpec, n *v1.Node) (cephv1.DriveGroupsSpec, error)
- func ExtractOSDTopologyFromLabels(labels map[string]string) map[string]string
- func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]*util.Set, error)
- func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, ...) (string, error)
- func MarshalAsDriveGroupBlobs(dgs cephv1.DriveGroupsSpec) (string, error)
- func PrivilegedContext() *v1.SecurityContext
- func SanitizeDriveGroups(dgs cephv1.DriveGroupsSpec) cephv1.DriveGroupsSpec
- func UpdateLocationWithNodeLabels(location *[]string, nodeLabels map[string]string)
- func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus)
- type Cluster
- type OSDHealthMonitor
- type OSDInfo
- type OrchestrationStatus
Constants ¶
const ( // EncryptedDeviceEnvVarName is used in the pod spec to indicate whether the OSD is encrypted or not EncryptedDeviceEnvVarName = "ROOK_ENCRYPTED_DEVICE" PVCNameEnvVarName = "ROOK_PVC_NAME" // CephVolumeEncryptedKeyEnvVarName is the env variable used by ceph-volume to encrypt the OSD (raw mode) // Hardcoded in ceph-volume do NOT touch CephVolumeEncryptedKeyEnvVarName = "CEPH_VOLUME_DMCRYPT_SECRET" CrushDeviceClassVarName = "ROOK_OSD_CRUSH_DEVICE_CLASS" CrushRootVarName = "ROOK_CRUSHMAP_ROOT" )
const ( // CephDeviceSetLabelKey is the Rook device set label key CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet" // CephSetIndexLabelKey is the Rook label key index CephSetIndexLabelKey = "ceph.rook.io/setIndex" // CephDeviceSetPVCIDLabelKey is the Rook PVC ID label key CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId" // OSDOverPVCLabelKey is the Rook PVC label key OSDOverPVCLabelKey = "ceph.rook.io/pvc" // TopologyLocationLabel is the crush location label added to OSD deployments TopologyLocationLabel = "topology-location-%s" )
const ( // AppName is the "app" label on osd pods AppName = "rook-ceph-osd" // FailureDomainKey is the label key whose value is the failure domain of the OSD FailureDomainKey = "failure-domain" // OsdIdLabelKey is the OSD label key OsdIdLabelKey = "ceph-osd-id" )
const ( // DmcryptBlockType is a portion of the device mapper name for the encrypted OSD on PVC block.db (rocksdb db) DmcryptBlockType = "block-dmcrypt" // DmcryptMetadataType is a portion of the device mapper name for the encrypted OSD on PVC block DmcryptMetadataType = "db-dmcrypt" // DmcryptWalType is a portion of the device mapper name for the encrypted OSD on PVC wal DmcryptWalType = "wal-dmcrypt" )
const ( OrchestrationStatusStarting = "starting" OrchestrationStatusComputingDiff = "computingDiff" OrchestrationStatusOrchestrating = "orchestrating" OrchestrationStatusCompleted = "completed" OrchestrationStatusFailed = "failed" )
const ( // OsdEncryptionSecretNameKeyName is the key name of the Secret that contains the OSD encryption key // #nosec G101 since this is not leaking any hardcoded credentials, it's just the secret key name OsdEncryptionSecretNameKeyName = "dmcrypt-key" )
Variables ¶
var ( // The labels that can be specified with the K8s labels such as topology.kubernetes.io/zone // These are all at the top layers of the CRUSH map. KubernetesTopologyLabels = []string{"zone", "region"} // The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"} // The list of supported failure domains in the CRUSH map, ordered from lowest to highest CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...) )
Functions ¶
func DriveGroupPlacementMatchesNode ¶ added in v1.4.0
DriveGroupPlacementMatchesNode returns true if the Drive Group's placement matches the given node. It returns false if the placement does not match the given node. It returns an error if placement match cannot be determined.
func DriveGroupsWithPlacementMatchingNode ¶ added in v1.4.0
func DriveGroupsWithPlacementMatchingNode(dgs cephv1.DriveGroupsSpec, n *v1.Node) (cephv1.DriveGroupsSpec, error)
DriveGroupsWithPlacementMatchingNode returns a subset of the Drive Groups Spec with placement that matches the given node. It returns an error if placement cannot be determined for the node for any Drive Group in the spec.
func ExtractOSDTopologyFromLabels ¶ added in v1.2.6
ExtractTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value
func GetExistingPVCs ¶ added in v1.5.6
func GetExistingPVCs(clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]*util.Set, error)
GetExistingPVCs fetches the list of OSD PVCs
func GetLocationWithNode ¶ added in v1.2.2
func MarshalAsDriveGroupBlobs ¶ added in v1.4.0
func MarshalAsDriveGroupBlobs(dgs cephv1.DriveGroupsSpec) (string, error)
MarshalAsDriveGroupBlobs converts a Ceph CRD <Drive Group Name> => <Drive Group Spec> mapping into a JSON-marshalled <Drive Group Name> => <Drive Group JSON blob> mapping.
func PrivilegedContext ¶ added in v1.3.0
func PrivilegedContext() *v1.SecurityContext
PrivilegedContext returns a privileged Pod security context
func SanitizeDriveGroups ¶ added in v1.4.0
func SanitizeDriveGroups(dgs cephv1.DriveGroupsSpec) cephv1.DriveGroupsSpec
SanitizeDriveGroups processes the drive groups to remove or correct invalid specs.
func UpdateLocationWithNodeLabels ¶ added in v1.2.2
func UpdateNodeStatus ¶
func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus)
Types ¶
type Cluster ¶
type Cluster struct { ValidStorage rookv1.StorageScopeSpec // valid subset of `Storage`, computed at runtime // contains filtered or unexported fields }
Cluster keeps track of the OSDs
func New ¶
func New(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, spec cephv1.ClusterSpec, rookVersion string) *Cluster
New creates an instance of the OSD manager
type OSDHealthMonitor ¶ added in v1.3.3
type OSDHealthMonitor struct {
// contains filtered or unexported fields
}
OSDHealthMonitor defines OSD process monitoring
func NewOSDHealthMonitor ¶ added in v1.3.3
func NewOSDHealthMonitor(context *clusterd.Context, clusterInfo *client.ClusterInfo, removeOSDsIfOUTAndSafeToRemove bool, healthCheck cephv1.CephClusterHealthCheckSpec) *OSDHealthMonitor
NewOSDHealthMonitor instantiates OSD monitoring
func (*OSDHealthMonitor) Start ¶ added in v1.3.3
func (m *OSDHealthMonitor) Start(stopCh chan struct{})
Start runs monitoring logic for osds status at set intervals
func (*OSDHealthMonitor) Update ¶ added in v1.3.3
func (m *OSDHealthMonitor) Update(removeOSDsIfOUTAndSafeToRemove bool)
Update updates the removeOSDsIfOUTAndSafeToRemove
type OSDInfo ¶
type OSDInfo struct { ID int `json:"id"` Cluster string `json:"cluster"` UUID string `json:"uuid"` DevicePartUUID string `json:"device-part-uuid"` // BlockPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev/<Volume Group>/<Logical Volume>' or simply /dev/vdb if block mode is used BlockPath string `json:"lv-path"` MetadataPath string `json:"metadata-path"` WalPath string `json:"wal-path"` SkipLVRelease bool `json:"skip-lv-release"` Location string `json:"location"` LVBackedPV bool `json:"lv-backed-pv"` CVMode string `json:"lv-mode"` Store string `json:"store"` }
OSDInfo represent all the properties of a given OSD