Documentation ¶
Overview ¶
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package config provides methods for generating the Ceph config for a Ceph cluster and for producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible flags.
Index ¶
- Constants
- Variables
- func ExtractOSDTopologyFromLabels(labels map[string]string) map[string]string
- func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushHostname string) (string, error)
- func PrivilegedContext() *v1.SecurityContext
- func UpdateLocationWithNodeLabels(location *[]string, nodeLabels map[string]string)
- func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus)
- type Cluster
- type Monitor
- type OSDInfo
- type OrchestrationStatus
Constants ¶
View Source
const ( // AppName is the "app" label on osd pods AppName = "rook-ceph-osd" // FailureDomainKey is the label key whose value is the failure domain of the OSD FailureDomainKey = "failure-domain" // OsdIdLabelKey is the OSD label key OsdIdLabelKey = "ceph-osd-id" )
View Source
const ( CrushDeviceClassVarName = "ROOK_OSD_CRUSH_DEVICE_CLASS" // CephDeviceSetLabelKey is the Rook device set label key CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet" // CephSetIndexLabelKey is the Rook label key index CephSetIndexLabelKey = "ceph.rook.io/setIndex" // CephDeviceSetPVCIDLabelKey is the Rook PVC ID label key CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId" // OSDOverPVCLabelKey is the Rook PVC label key OSDOverPVCLabelKey = "ceph.rook.io/pvc" )
View Source
const ( OrchestrationStatusStarting = "starting" OrchestrationStatusComputingDiff = "computingDiff" OrchestrationStatusOrchestrating = "orchestrating" OrchestrationStatusCompleted = "completed" OrchestrationStatusFailed = "failed" )
Variables ¶
View Source
var ( // The labels that can be specified with the K8s labels such as topology.kubernetes.io/zone // These are all at the top layers of the CRUSH map. KubernetesTopologyLabels = []string{"zone", "region"} // The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"} // The list of supported failure domains in the CRUSH map, ordered from lowest to highest CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...) )
Functions ¶
func ExtractOSDTopologyFromLabels ¶ added in v1.2.6
ExtractTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value
func GetLocationWithNode ¶ added in v1.2.2
func PrivilegedContext ¶ added in v1.3.0
func PrivilegedContext() *v1.SecurityContext
PrivilegedContext returns a privileged Pod security context
func UpdateLocationWithNodeLabels ¶ added in v1.2.2
func UpdateNodeStatus ¶
func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus)
Types ¶
type Cluster ¶
type Cluster struct { Namespace string Keyring string DesiredStorage rookv1.StorageScopeSpec // user-defined storage scope spec ValidStorage rookv1.StorageScopeSpec // valid subset of `Storage`, computed at runtime Network cephv1.NetworkSpec // contains filtered or unexported fields }
Cluster keeps track of the OSDs
func New ¶
func New( clusterInfo *cephconfig.ClusterInfo, context *clusterd.Context, namespace string, rookVersion string, cephVersion cephv1.CephVersionSpec, storageSpec rookv1.StorageScopeSpec, dataDirHostPath string, placement rookv1.Placement, annotations rookv1.Annotations, network cephv1.NetworkSpec, resources v1.ResourceRequirements, prepareResources v1.ResourceRequirements, priorityClassName string, ownerRef metav1.OwnerReference, skipUpgradeChecks bool, continueUpgradeAfterChecksEvenIfNotHealthy bool, ) *Cluster
New creates an instance of the OSD manager
type Monitor ¶
type Monitor struct {
// contains filtered or unexported fields
}
Monitor defines OSD process monitoring
func NewMonitor ¶
func NewMonitor(context *clusterd.Context, clusterName string, removeOSDsIfOUTAndSafeToRemove bool, cephVersion cephver.CephVersion) *Monitor
NewMonitor instantiates OSD monitoring
type OSDInfo ¶
type OSDInfo struct { ID int `json:"id"` Cluster string `json:"cluster"` UUID string `json:"uuid"` DevicePartUUID string `json:"device-part-uuid"` // BlockPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev/<Volume Group>/<Logical Volume>' or simply /dev/vdb if block mode is used BlockPath string `json:"lv-path"` MetadataPath string `json:"metadata-path"` SkipLVRelease bool `json:"skip-lv-release"` Location string `json:"location"` LVBackedPV bool `json:"lv-backed-pv"` CVMode string `json:"lv-mode"` Store string `json:"store"` }
OSDInfo represent all the properties of a given OSD
Source Files ¶
Click to show internal directories.
Click to hide internal directories.