Documentation ¶
Overview ¶
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package config provides methods for generating the Ceph config for a Ceph cluster and for producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible flags.
Index ¶
- Constants
- Variables
- func ExtractOSDTopologyFromLabels(labels map[string]string) (map[string]string, string)
- func GetExistingPVCs(ctx context.Context, clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]sets.String, error)
- func GetLocationWithNode(ctx context.Context, clientset kubernetes.Interface, nodeName string, ...) (string, string, error)
- func UpdateNodeOrPVCStatus(ctx context.Context, kv *k8sutil.ConfigMapKVStore, nodeOrPVC string, ...) string
- type Cluster
- type OSDHealthMonitor
- type OSDInfo
- type OrchestrationStatus
Constants ¶
const ( // EncryptedDeviceEnvVarName is used in the pod spec to indicate whether the OSD is encrypted or not EncryptedDeviceEnvVarName = "ROOK_ENCRYPTED_DEVICE" PVCNameEnvVarName = "ROOK_PVC_NAME" // CephVolumeEncryptedKeyEnvVarName is the env variable used by ceph-volume to encrypt the OSD (raw mode) // Hardcoded in ceph-volume do NOT touch CephVolumeEncryptedKeyEnvVarName = "CEPH_VOLUME_DMCRYPT_SECRET" // PVCBackedOSDVarName indicates whether the OSD is on PVC ("true") or not ("false") PVCBackedOSDVarName = "ROOK_PVC_BACKED_OSD" CrushDeviceClassVarName = "ROOK_OSD_CRUSH_DEVICE_CLASS" CrushInitialWeightVarName = "ROOK_OSD_CRUSH_INITIAL_WEIGHT" CrushRootVarName = "ROOK_CRUSHMAP_ROOT" )
const ( // CephDeviceSetLabelKey is the Rook device set label key CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet" // CephSetIndexLabelKey is the Rook label key index CephSetIndexLabelKey = "ceph.rook.io/setIndex" // CephDeviceSetPVCIDLabelKey is the Rook PVC ID label key CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId" // OSDOverPVCLabelKey is the Rook PVC label key OSDOverPVCLabelKey = "ceph.rook.io/pvc" // TopologyLocationLabel is the crush location label added to OSD deployments TopologyLocationLabel = "topology-location-%s" )
const ( // AppName is the "app" label on osd pods AppName = "rook-ceph-osd" // FailureDomainKey is the label key whose value is the failure domain of the OSD FailureDomainKey = "failure-domain" // OsdIdLabelKey is the OSD label key OsdIdLabelKey = "ceph-osd-id" )
const ( // DmcryptBlockType is a portion of the device mapper name for the encrypted OSD on PVC block.db (rocksdb db) DmcryptBlockType = "block-dmcrypt" // DmcryptMetadataType is a portion of the device mapper name for the encrypted OSD on PVC block DmcryptMetadataType = "db-dmcrypt" // DmcryptWalType is a portion of the device mapper name for the encrypted OSD on PVC wal DmcryptWalType = "wal-dmcrypt" )
const ( // OrchestrationStatusStarting denotes the OSD provisioning is beginning. OrchestrationStatusStarting = "starting" // OrchestrationStatusOrchestrating denotes the OSD provisioning has begun and is running. OrchestrationStatusOrchestrating = "orchestrating" // OrchestrationStatusCompleted denotes the OSD provisioning has completed. This does not imply // the provisioning completed successfully in whole or in part. OrchestrationStatusCompleted = "completed" // OrchestrationStatusFailed denotes the OSD provisioning has failed. OrchestrationStatusFailed = "failed" )
Variables ¶
var ( // The labels that can be specified with the K8s labels such as topology.kubernetes.io/zone // These are all at the top layers of the CRUSH map. KubernetesTopologyLabels = []string{"zone", "region"} // The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack // The labels are in order from lowest to highest in the CRUSH hierarchy CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"} // The list of supported failure domains in the CRUSH map, ordered from lowest to highest CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...) )
Functions ¶
func ExtractOSDTopologyFromLabels ¶ added in v1.2.6
ExtractTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value
func GetExistingPVCs ¶ added in v1.5.6
func GetExistingPVCs(ctx context.Context, clusterdContext *clusterd.Context, namespace string) (map[string]*v1.PersistentVolumeClaim, map[string]sets.String, error)
GetExistingPVCs fetches the list of OSD PVCs
func GetLocationWithNode ¶ added in v1.2.2
func GetLocationWithNode(ctx context.Context, clientset kubernetes.Interface, nodeName string, crushRoot, crushHostname string) (string, string, error)
GetLocationWithNode gets the topology information about the node. The return values are:
location: The CRUSH properties for the OSD to apply topologyAffinity: The label to be applied to the OSD daemon to guarantee it will start in the same topology as the OSD prepare job.
func UpdateNodeOrPVCStatus ¶ added in v1.7.3
func UpdateNodeOrPVCStatus(ctx context.Context, kv *k8sutil.ConfigMapKVStore, nodeOrPVC string, status OrchestrationStatus) string
UpdateNodeOrPVCStatus updates the status ConfigMap for the OSD on the given node or PVC. It returns the name the ConfigMap used.
Types ¶
type Cluster ¶
type Cluster struct { ValidStorage cephv1.StorageScopeSpec // valid subset of `Storage`, computed at runtime // contains filtered or unexported fields }
Cluster keeps track of the OSDs
func New ¶
func New(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, spec cephv1.ClusterSpec, rookVersion string) *Cluster
New creates an instance of the OSD manager
func (*Cluster) PrepareStorageClassDeviceSets ¶ added in v1.8.7
PrepareStorageClassDeviceSets is only exposed for testing purposes
type OSDHealthMonitor ¶ added in v1.3.3
type OSDHealthMonitor struct {
// contains filtered or unexported fields
}
OSDHealthMonitor defines OSD process monitoring
func NewOSDHealthMonitor ¶ added in v1.3.3
func NewOSDHealthMonitor(context *clusterd.Context, clusterInfo *client.ClusterInfo, removeOSDsIfOUTAndSafeToRemove bool, healthCheck cephv1.CephClusterHealthCheckSpec) *OSDHealthMonitor
NewOSDHealthMonitor instantiates OSD monitoring
func (*OSDHealthMonitor) Start ¶ added in v1.3.3
func (m *OSDHealthMonitor) Start(monitoringRoutines map[string]*opcontroller.ClusterHealth, daemon string)
Start runs monitoring logic for osds status at set intervals
func (*OSDHealthMonitor) Update ¶ added in v1.3.3
func (m *OSDHealthMonitor) Update(removeOSDsIfOUTAndSafeToRemove bool)
Update updates the removeOSDsIfOUTAndSafeToRemove
type OSDInfo ¶
type OSDInfo struct { ID int `json:"id"` Cluster string `json:"cluster"` UUID string `json:"uuid"` DevicePartUUID string `json:"device-part-uuid"` DeviceClass string `json:"device-class"` // BlockPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev/<Volume Group>/<Logical Volume>' or simply /dev/vdb if block mode is used BlockPath string `json:"lv-path"` MetadataPath string `json:"metadata-path"` WalPath string `json:"wal-path"` SkipLVRelease bool `json:"skip-lv-release"` Location string `json:"location"` LVBackedPV bool `json:"lv-backed-pv"` CVMode string `json:"lv-mode"` Store string `json:"store"` // Ensure the OSD daemon has affinity with the same topology from the OSD prepare pod TopologyAffinity string `json:"topologyAffinity"` Encrypted bool `json:"encrypted"` }
OSDInfo represent all the properties of a given OSD