Documentation ¶
Overview ¶
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package osd for the Ceph OSDs.
Package config provides methods for generating the Ceph config for a Ceph cluster and for producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible flags.
Index ¶
Constants ¶
View Source
const ( // AppName is the "app" label on osd pods AppName = "rook-ceph-osd" // FailureDomainKey is the label key whose value is the failure domain of the OSD FailureDomainKey = "failure-domain" OsdIdLabelKey = "ceph-osd-id" )
View Source
const ( CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet" CephSetIndexLabelKey = "ceph.rook.io/setIndex" CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId" OSDOverPVCLabelKey = "ceph.rook.io/pvc" )
View Source
const ( OrchestrationStatusStarting = "starting" OrchestrationStatusComputingDiff = "computingDiff" OrchestrationStatusOrchestrating = "orchestrating" OrchestrationStatusCompleted = "completed" OrchestrationStatusFailed = "failed" )
Variables ¶
View Source
var ( // The labels that can be specified with the K8s labels such as failure-domain.beta.kubernetes.io/zone // These are all at the top layers of the CRUSH map. KubernetesTopologyLabels = []string{"zone", "region"} // The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"} // The list of supported failure domains in the CRUSH map, ordered from lowest to highest CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...) )
Functions ¶
func IsRemovingNode ¶
func UpdateNodeStatus ¶
func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus) error
Types ¶
type Cluster ¶
type Cluster struct { Namespace string Keyring string DesiredStorage rookalpha.StorageScopeSpec // user-defined storage scope spec ValidStorage rookalpha.StorageScopeSpec // valid subset of `Storage`, computed at runtime Network cephv1.NetworkSpec // contains filtered or unexported fields }
Cluster keeps track of the OSDs
func New ¶
func New( clusterInfo *cephconfig.ClusterInfo, context *clusterd.Context, namespace string, rookVersion string, cephVersion cephv1.CephVersionSpec, storageSpec rookalpha.StorageScopeSpec, dataDirHostPath string, placement rookalpha.Placement, annotations rookalpha.Annotations, network cephv1.NetworkSpec, resources v1.ResourceRequirements, prepareResources v1.ResourceRequirements, ownerRef metav1.OwnerReference, isUpgrade bool, skipUpgradeChecks bool, ) *Cluster
New creates an instance of the OSD manager
type Monitor ¶
type Monitor struct {
// contains filtered or unexported fields
}
Monitor defines OSD process monitoring
func NewMonitor ¶
func NewMonitor(context *clusterd.Context, clusterName string, removeOSDsIfOUTAndSafeToRemove bool) *Monitor
NewMonitor instantiates OSD monitoring
type OSDInfo ¶
type OSDInfo struct { ID int `json:"id"` DataPath string `json:"data-path"` Config string `json:"conf"` Cluster string `json:"cluster"` KeyringPath string `json:"keyring-path"` UUID string `json:"uuid"` Journal string `json:"journal"` IsFileStore bool `json:"is-file-store"` IsDirectory bool `json:"is-directory"` DevicePartUUID string `json:"device-part-uuid"` CephVolumeInitiated bool `json:"ceph-volume-initiated"` //LVPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev/<Volume Group>/<Logical Volume>' LVPath string `json:"lv-path"` SkipLVRelease bool `json:"skip-lv-release"` }
Source Files ¶
Directories ¶
Path | Synopsis |
---|---|
Package config for OSD config managed by the operator Package config for OSD config managed by the operator Copyright 2016 The Rook Authors.
|
Package config for OSD config managed by the operator Package config for OSD config managed by the operator Copyright 2016 The Rook Authors. |
Click to show internal directories.
Click to hide internal directories.