Documentation ¶
Index ¶
- Constants
- Variables
- func CreateCniAnnotationFromSetting(storageNetwork *longhorn.Setting) string
- func CreateDefaultDisk(dataPath string) (map[string]longhorn.DiskSpec, error)
- func CreateDisksFromAnnotation(annotation string) (map[string]longhorn.DiskSpec, error)
- func EngineBinaryExistOnHostForImage(image string) bool
- func ErrorAlreadyExists(err error) bool
- func ErrorIsNotFound(err error) bool
- func GenerateEngineNameForVolume(vName string) string
- func GenerateReplicaNameForVolume(vName string) string
- func GetAPIServerAddressFromIP(ip string) string
- func GetBackingImageDataSourceLabels(name, nodeID, diskUUID string) map[string]string
- func GetBackingImageDataSourcePodName(bidsName string) string
- func GetBackingImageDirectoryName(backingImageName, backingImageUUID string) string
- func GetBackingImageDirectoryOnHost(diskPath, backingImageName, backingImageUUID string) string
- func GetBackingImageLabels() map[string]string
- func GetBackingImageManagerDirectoryOnHost(diskPath string) string
- func GetBackingImageManagerLabels(nodeID, diskUUID string) map[string]string
- func GetBackingImageManagerName(image, diskUUID string) string
- func GetBackingImagePathForReplicaManagerContainer(diskPath, backingImageName, backingImageUUID string) string
- func GetBackupVolumeLabels(volumeName string) map[string]string
- func GetBaseLabelsForSystemManagedComponent() map[string]string
- func GetCondition(conditions []longhorn.Condition, conditionType string) longhorn.Condition
- func GetCronJobLabels(job *longhorn.RecurringJobSpec) map[string]string
- func GetCronJobNameForRecurringJob(name string) string
- func GetCronJobNameForVolumeAndJob(vName, job string) string
- func GetCustomizedDefaultSettings(defaultSettingCM *v1.ConfigMap) (defaultSettings map[string]string, err error)
- func GetDaemonSetNameFromEngineImageName(engineImageName string) string
- func GetDefaultManagerURL() string
- func GetEIDaemonSetLabelSelector(engineImageName string) map[string]string
- func GetEngineBinaryDirectoryForEngineManagerContainer(image string) string
- func GetEngineBinaryDirectoryForReplicaManagerContainer(image string) string
- func GetEngineBinaryDirectoryOnHostForImage(image string) string
- func GetEngineImageChecksumName(image string) string
- func GetEngineImageComponentLabel() map[string]string
- func GetEngineImageLabels(engineImageName string) map[string]string
- func GetEngineImageNameFromDaemonSetName(dsName string) string
- func GetImageCanonicalName(image string) string
- func GetInstanceManagerComponentLabel() map[string]string
- func GetInstanceManagerImageChecksumName(image string) string
- func GetInstanceManagerLabels(node, instanceManagerImage string, managerType longhorn.InstanceManagerType) map[string]string
- func GetInstanceManagerName(imType longhorn.InstanceManagerType, nodeName, image string) (string, error)
- func GetInstanceManagerPrefix(imType longhorn.InstanceManagerType) string
- func GetLonghornLabelCRDAPIVersionKey() string
- func GetLonghornLabelComponentKey() string
- func GetLonghornLabelKey(name string) string
- func GetNodeTagsFromAnnotation(annotation string) ([]string, error)
- func GetOrphanChecksumNameForOrphanedDirectory(nodeID, diskName, diskPath, diskUUID, dirName string) string
- func GetOrphanLabelsForOrphanedDirectory(nodeID, diskUUID string) map[string]string
- func GetRecurringJobLabelKey(labelType, recurringJobName string) string
- func GetRecurringJobLabelValueMap(labelType, recurringJobName string) map[string]string
- func GetRegionAndZone(labels map[string]string) (string, string)
- func GetReplicaDataPath(diskPath, dataDirectoryName string) string
- func GetReplicaMountedDataPath(dataPath string) string
- func GetShareManagerComponentLabel() map[string]string
- func GetShareManagerImageChecksumName(image string) string
- func GetShareManagerInstanceLabel(name string) map[string]string
- func GetShareManagerLabels(name, image string) map[string]string
- func GetShareManagerNameFromShareManagerPodName(podName string) string
- func GetShareManagerPodNameFromShareManagerName(smName string) string
- func GetSnapshotName(vds longhorn.VolumeDataSource) string
- func GetVolumeLabels(volumeName string) map[string]string
- func GetVolumeName(vds longhorn.VolumeDataSource) string
- func IsDataFromVolume(vds longhorn.VolumeDataSource) bool
- func IsValidVolumeDataSource(vds longhorn.VolumeDataSource) bool
- func LabelsToString(labels map[string]string) string
- func NewVolumeDataSource(volumeDataSourceType longhorn.VolumeDataSourceType, ...) (dataSource longhorn.VolumeDataSource, err error)
- func NewVolumeDataSourceTypeSnapshot(volumeName, snapshotName string) longhorn.VolumeDataSource
- func NewVolumeDataSourceTypeVolume(volumeName string) longhorn.VolumeDataSource
- func SetCondition(originConditions []longhorn.Condition, conditionType string, ...) []longhorn.Condition
- func SetConditionAndRecord(conditions []longhorn.Condition, conditionType string, ...) []longhorn.Condition
- func SetConditionWithoutTimestamp(originConditions []longhorn.Condition, conditionType string, ...) []longhorn.Condition
- func SetSettingDefinition(name SettingName, definition SettingDefinition)
- func UnmarshalNodeSelector(nodeSelectorSetting string) (map[string]string, error)
- func UnmarshalToNodeTags(s string) ([]string, error)
- func UnmarshalTolerations(tolerationSetting string) ([]v1.Toleration, error)
- func ValidateAccessMode(mode longhorn.AccessMode) error
- func ValidateAndUnmarshalToleration(s string) (*v1.Toleration, error)
- func ValidateCPUReservationValues(engineManagerCPUStr, replicaManagerCPUStr string) error
- func ValidateDataLocality(mode longhorn.DataLocality) error
- func ValidateEngineImageChecksumName(name string) bool
- func ValidateReplicaAutoBalance(option longhorn.ReplicaAutoBalance) error
- func ValidateReplicaCount(count int) error
- func ValidateSetting(name, value string) (err error)
- func ValidateStorageNetwork(value string) (err error)
- type CNIAnnotation
- type CniNetwork
- type DiskSpecWithName
- type NodeDownPodDeletionPolicy
- type NodeWithLastHealthyReplicaDrainPolicy
- type NotFoundError
- type SettingCategory
- type SettingDefinition
- type SettingName
- type SettingType
- type SystemManagedPodsImagePullPolicy
Constants ¶
View Source
const ( SnapPrefix = "snap" VolPrefix = "vol" VolumeNameKey = "volumeName" SnapshotNameKey = "snapshotName" )
View Source
const ( LonghornManagerDaemonSetName = "longhorn-manager" LonghornAdmissionWebhookDeploymentName = "longhorn-admission-webhook" LonghornConversionWebhookDeploymentName = "longhorn-conversion-webhook" LonghornUIDeploymentName = "longhorn-ui" DriverDeployerName = "longhorn-driver-deployer" CSIAttacherName = "csi-attacher" CSIProvisionerName = "csi-provisioner" CSIResizerName = "csi-resizer" CSISnapshotterName = "csi-snapshotter" CSIPluginName = "longhorn-csi-plugin" )
View Source
const ( SettingTypeString = SettingType("string") SettingTypeInt = SettingType("int") SettingTypeBool = SettingType("bool") SettingTypeDeprecated = SettingType("deprecated") )
View Source
const ( SettingNameBackupTarget = SettingName("backup-target") SettingNameBackupTargetCredentialSecret = SettingName("backup-target-credential-secret") SettingNameAllowRecurringJobWhileVolumeDetached = SettingName("allow-recurring-job-while-volume-detached") SettingNameCreateDefaultDiskLabeledNodes = SettingName("create-default-disk-labeled-nodes") SettingNameDefaultDataPath = SettingName("default-data-path") SettingNameDefaultEngineImage = SettingName("default-engine-image") SettingNameDefaultInstanceManagerImage = SettingName("default-instance-manager-image") SettingNameDefaultBackingImageManagerImage = SettingName("default-backing-image-manager-image") SettingNameReplicaSoftAntiAffinity = SettingName("replica-soft-anti-affinity") SettingNameReplicaAutoBalance = SettingName("replica-auto-balance") SettingNameStorageOverProvisioningPercentage = SettingName("storage-over-provisioning-percentage") SettingNameStorageMinimalAvailablePercentage = SettingName("storage-minimal-available-percentage") SettingNameUpgradeChecker = SettingName("upgrade-checker") SettingNameCurrentLonghornVersion = SettingName("current-longhorn-version") SettingNameLatestLonghornVersion = SettingName("latest-longhorn-version") SettingNameStableLonghornVersions = SettingName("stable-longhorn-versions") SettingNameDefaultReplicaCount = SettingName("default-replica-count") SettingNameDefaultDataLocality = SettingName("default-data-locality") SettingNameGuaranteedEngineCPU = SettingName("guaranteed-engine-cpu") SettingNameDefaultLonghornStaticStorageClass = SettingName("default-longhorn-static-storage-class") SettingNameBackupstorePollInterval = SettingName("backupstore-poll-interval") SettingNameTaintToleration = SettingName("taint-toleration") SettingNameSystemManagedComponentsNodeSelector = SettingName("system-managed-components-node-selector") SettingNameCRDAPIVersion = SettingName("crd-api-version") SettingNameAutoSalvage = SettingName("auto-salvage") SettingNameAutoDeletePodWhenVolumeDetachedUnexpectedly = SettingName("auto-delete-pod-when-volume-detached-unexpectedly") SettingNameRegistrySecret = SettingName("registry-secret") SettingNameDisableSchedulingOnCordonedNode = SettingName("disable-scheduling-on-cordoned-node") SettingNameReplicaZoneSoftAntiAffinity = SettingName("replica-zone-soft-anti-affinity") SettingNameNodeDownPodDeletionPolicy = SettingName("node-down-pod-deletion-policy") SettingNameAllowNodeDrainWithLastHealthyReplica = SettingName("allow-node-drain-with-last-healthy-replica") SettingNameNodeDrainPolicy = SettingName("node-drain-policy") SettingNameMkfsExt4Parameters = SettingName("mkfs-ext4-parameters") SettingNamePriorityClass = SettingName("priority-class") SettingNameDisableRevisionCounter = SettingName("disable-revision-counter") SettingNameDisableReplicaRebuild = SettingName("disable-replica-rebuild") SettingNameReplicaReplenishmentWaitInterval = SettingName("replica-replenishment-wait-interval") SettingNameConcurrentReplicaRebuildPerNodeLimit = SettingName("concurrent-replica-rebuild-per-node-limit") SettingNameSystemManagedPodsImagePullPolicy = SettingName("system-managed-pods-image-pull-policy") SettingNameAllowVolumeCreationWithDegradedAvailability = SettingName("allow-volume-creation-with-degraded-availability") SettingNameAutoCleanupSystemGeneratedSnapshot = SettingName("auto-cleanup-system-generated-snapshot") SettingNameConcurrentAutomaticEngineUpgradePerNodeLimit = SettingName("concurrent-automatic-engine-upgrade-per-node-limit") SettingNameBackingImageCleanupWaitInterval = SettingName("backing-image-cleanup-wait-interval") SettingNameBackingImageRecoveryWaitInterval = SettingName("backing-image-recovery-wait-interval") SettingNameGuaranteedEngineManagerCPU = SettingName("guaranteed-engine-manager-cpu") SettingNameGuaranteedReplicaManagerCPU = SettingName("guaranteed-replica-manager-cpu") SettingNameKubernetesClusterAutoscalerEnabled = SettingName("kubernetes-cluster-autoscaler-enabled") SettingNameOrphanAutoDeletion = SettingName("orphan-auto-deletion") SettingNameStorageNetwork = SettingName("storage-network") SettingNameFailedBackupTTL = SettingName("failed-backup-ttl") )
View Source
const ( SettingCategoryGeneral = SettingCategory("general") SettingCategoryBackup = SettingCategory("backup") SettingCategoryOrphan = SettingCategory("orphan") SettingCategoryScheduling = SettingCategory("scheduling") SettingCategoryDangerZone = SettingCategory("danger Zone") )
View Source
const ( NodeDownPodDeletionPolicyDoNothing = NodeDownPodDeletionPolicy("do-nothing") // Kubernetes default behavior NodeDownPodDeletionPolicyDeleteStatefulSetPod = NodeDownPodDeletionPolicy("delete-statefulset-pod") NodeDownPodDeletionPolicyDeleteDeploymentPod = NodeDownPodDeletionPolicy("delete-deployment-pod") NodeDownPodDeletionPolicyDeleteBothStatefulsetAndDeploymentPod = NodeDownPodDeletionPolicy("delete-both-statefulset-and-deployment-pod") )
View Source
const ( NodeDrainPolicyBlockIfContainsLastReplica = NodeWithLastHealthyReplicaDrainPolicy("block-if-contains-last-replica") NodeDrainPolicyAllowIfReplicaIsStopped = NodeWithLastHealthyReplicaDrainPolicy("allow-if-replica-is-stopped") NodeDrainPolicyAlwaysAllow = NodeWithLastHealthyReplicaDrainPolicy("always-allow") )
View Source
const ( SystemManagedPodsImagePullPolicyNever = SystemManagedPodsImagePullPolicy("never") SystemManagedPodsImagePullPolicyIfNotPresent = SystemManagedPodsImagePullPolicy("if-not-present") SystemManagedPodsImagePullPolicyAlways = SystemManagedPodsImagePullPolicy("always") )
View Source
const ( CNIAnnotationNetworks = CNIAnnotation("k8s.v1.cni.cncf.io/networks") CNIAnnotationNetworkStatus = CNIAnnotation("k8s.v1.cni.cncf.io/networks-status") )
View Source
const ( LonghornKindNode = "Node" LonghornKindVolume = "Volume" LonghornKindEngineImage = "EngineImage" LonghornKindInstanceManager = "InstanceManager" LonghornKindBackingImage = "BackingImage" LonghornKindBackingImageManager = "BackingImageManager" LonghornKindRecurringJob = "RecurringJob" LonghornKindBackingImageDataSource = "BackingImageDataSource" CRDAPIVersionV1alpha1 = "longhorn.rancher.io/v1alpha1" CRDAPIVersionV1beta1 = "longhorn.io/v1beta1" CRDAPIVersionV1beta2 = "longhorn.io/v1beta2" CurrentCRDAPIVersion = CRDAPIVersionV1beta2 )
View Source
const ( DefaultAPIPort = 9500 DefaultWebhookServerPort = 9443 WebhookTypeConversion = "conversion" WebhookTypeAdmission = "admission" ValidatingWebhookName = "longhorn-webhook-validator" MutatingWebhookName = "longhorn-webhook-mutator" EngineBinaryDirectoryInContainer = "/engine-binaries/" EngineBinaryDirectoryOnHost = "/var/lib/longhorn/engine-binaries/" ReplicaHostPrefix = "/host" EngineBinaryName = "longhorn" BackingImageManagerDirectory = "/backing-images/" BackingImageFileName = "backing" TLSDirectoryInContainer = "/tls-files/" TLSSecretName = "longhorn-grpc-tls" TLSCAFile = "ca.crt" TLSCertFile = "tls.crt" TLSKeyFile = "tls.key" DefaultBackupTargetName = "default" LonghornNodeKey = "longhornnode" LonghornDiskUUIDKey = "longhorndiskuuid" NodeCreateDefaultDiskLabelKey = "node.longhorn.io/create-default-disk" NodeCreateDefaultDiskLabelValueTrue = "true" NodeCreateDefaultDiskLabelValueConfig = "config" KubeNodeDefaultDiskConfigAnnotationKey = "node.longhorn.io/default-disks-config" KubeNodeDefaultNodeTagConfigAnnotationKey = "node.longhorn.io/default-node-tags" LastAppliedTolerationAnnotationKeySuffix = "last-applied-tolerations" ConfigMapResourceVersionKey = "configmap-resource-version" KubernetesStatusLabel = "KubernetesStatus" KubernetesReplicaSet = "ReplicaSet" KubernetesStatefulSet = "StatefulSet" RecurringJobLabel = "RecurringJob" LonghornLabelKeyPrefix = "longhorn.io" LonghornLabelRecurringJobKeyPrefixFmt = "recurring-%s.longhorn.io" LonghornLabelEngineImage = "engine-image" LonghornLabelInstanceManager = "instance-manager" LonghornLabelNode = "node" LonghornLabelDiskUUID = "disk-uuid" LonghornLabelInstanceManagerType = "instance-manager-type" LonghornLabelInstanceManagerImage = "instance-manager-image" LonghornLabelVolume = "longhornvolume" LonghornLabelBackingImage = "backing-image" LonghornLabelBackingImageManager = "backing-image-manager" LonghornLabelManagedBy = "managed-by" LonghornLabelSnapshotForCloningVolume = "for-cloning-volume" LonghornLabelBackingImageDataSource = "backing-image-data-source" LonghornLabelBackupVolume = "backup-volume" LonghornLabelRecurringJob = "job" LonghornLabelRecurringJobGroup = "job-group" LonghornLabelOrphan = "orphan" LonghornLabelOrphanType = "orphan-type" LonghornLabelCRDAPIVersion = "crd-api-version" LonghornLabelVolumeAccessMode = "volume-access-mode" LonghornLabelValueEnabled = "enabled" LonghornLabelExportFromVolume = "export-from-volume" LonghornLabelSnapshotForExportingBackingImage = "for-exporting-backing-image" KubernetesFailureDomainRegionLabelKey = "failure-domain.beta.kubernetes.io/region" KubernetesFailureDomainZoneLabelKey = "failure-domain.beta.kubernetes.io/zone" KubernetesTopologyRegionLabelKey = "topology.kubernetes.io/region" KubernetesTopologyZoneLabelKey = "topology.kubernetes.io/zone" KubernetesClusterAutoscalerSafeToEvictKey = "cluster-autoscaler.kubernetes.io/safe-to-evict" LonghornDriverName = "driver.longhorn.io" DefaultDiskPrefix = "default-disk-" DeprecatedProvisionerName = "rancher.io/longhorn" DepracatedDriverName = "io.rancher.longhorn" DefaultStorageClassConfigMapName = "longhorn-storageclass" DefaultDefaultSettingConfigMapName = "longhorn-default-setting" DefaultStorageClassName = "longhorn" ControlPlaneName = "longhorn-manager" DefaultRecurringJobConcurrency = 10 PVAnnotationLonghornVolumeSchedulingError = "longhorn.io/volume-scheduling-error" CniNetworkNone = "" StorageNetworkInterface = "lhnet1" )
View Source
const ( EnvNodeName = "NODE_NAME" EnvPodNamespace = "POD_NAMESPACE" EnvPodIP = "POD_IP" EnvServiceAccount = "SERVICE_ACCOUNT" BackupStoreTypeS3 = "s3" AWSIAMRoleAnnotation = "iam.amazonaws.com/role" AWSIAMRoleArn = "AWS_IAM_ROLE_ARN" AWSAccessKey = "AWS_ACCESS_KEY_ID" AWSSecretKey = "AWS_SECRET_ACCESS_KEY" AWSEndPoint = "AWS_ENDPOINTS" AWSCert = "AWS_CERT" HTTPSProxy = "HTTPS_PROXY" HTTPProxy = "HTTP_PROXY" NOProxy = "NO_PROXY" VirtualHostedStyle = "VIRTUAL_HOSTED_STYLE" OptionFromBackup = "fromBackup" OptionNumberOfReplicas = "numberOfReplicas" OptionStaleReplicaTimeout = "staleReplicaTimeout" OptionBaseImage = "baseImage" OptionFrontend = "frontend" OptionDiskSelector = "diskSelector" OptionNodeSelector = "nodeSelector" // DefaultStaleReplicaTimeout in minutes. 48h by default DefaultStaleReplicaTimeout = "2880" ImageChecksumNameLength = 8 InstanceManagerSuffixChecksumLength = 32 )
View Source
const (
BackingImageDataSourcePodNamePrefix = "backing-image-ds-"
)
View Source
const (
DefaultSettingYAMLFileName = "default-setting.yaml"
)
View Source
const (
KubernetesMinVersion = "v1.18.0"
)
Variables ¶
View Source
var ( SettingDefinitionBackupTarget = SettingDefinition{ DisplayName: "Backup Target", Description: "The endpoint used to access the backupstore. NFS and S3 are supported.", Category: SettingCategoryBackup, Type: SettingTypeString, Required: false, ReadOnly: false, } SettingDefinitionBackupTargetCredentialSecret = SettingDefinition{ DisplayName: "Backup Target Credential Secret", Description: "The name of the Kubernetes secret associated with the backup target.", Category: SettingCategoryBackup, Type: SettingTypeString, Required: false, ReadOnly: false, } SettingDefinitionAllowRecurringJobWhileVolumeDetached = SettingDefinition{ DisplayName: "Allow Recurring Job While Volume Is Detached", Description: "If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup. \n\n" + "Note that the volume is not ready for workload during the period when the volume was automatically attached. " + "Workload will have to wait until the recurring job finishes.", Category: SettingCategoryBackup, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionBackupstorePollInterval = SettingDefinition{ DisplayName: "Backupstore Poll Interval", Description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling.", Category: SettingCategoryBackup, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "300", } SettingDefinitionFailedBackupTTL = SettingDefinition{ DisplayName: "Failed Backup Time to Live", Description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.\n" + "Failed backups will be checked and cleaned up during backupstore polling which is controlled by **Backupstore Poll Interval** setting.\n" + "Hence this value determines the minimal wait interval of the cleanup. And the actual cleanup interval is multiple of **Backupstore Poll Interval**.\n" + "Disabling **Backupstore Poll Interval** also means to disable failed backup auto-deletion.\n\n", Category: SettingCategoryBackup, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "1440", } SettingDefinitionCreateDefaultDiskLabeledNodes = SettingDefinition{ DisplayName: "Create Default Disk on Labeled Nodes", Description: "Create default Disk automatically only on Nodes with the label " + "\"node.longhorn.io/create-default-disk=true\" if no other disks exist. If disabled, the default disk will " + "be created on all new nodes when each node is first added.", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionDefaultDataPath = SettingDefinition{ DisplayName: "Default Data Path", Description: "Default path to use for storing data on a host", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: false, Default: "/var/lib/longhorn/", } SettingDefinitionDefaultEngineImage = SettingDefinition{ DisplayName: "Default Engine Image", Description: "The default engine image used by the manager. Can be changed on the manager starting command line only", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: true, } SettingDefinitionDefaultInstanceManagerImage = SettingDefinition{ DisplayName: "Default Instance Manager Image", Description: "The default instance manager image used by the manager. Can be changed on the manager starting command line only", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: true, } DisplayName: "Default Share Manager Image", Description: "The default share manager image used by the manager. Can be changed on the manager starting command line only", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: true, } SettingDefinitionDefaultBackingImageManagerImage = SettingDefinition{ DisplayName: "Default Backing Image Manager Image", Description: "The default backing image manager image used by the manager. Can be changed on the manager starting command line only", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: true, } SettingDefinitionReplicaSoftAntiAffinity = SettingDefinition{ DisplayName: "Replica Node Level Soft Anti-Affinity", Description: "Allow scheduling on nodes with existing healthy replicas of the same volume", Category: SettingCategoryScheduling, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionReplicaAutoBalance = SettingDefinition{ DisplayName: "Replica Auto Balance", Description: "Enable this setting automatically rebalances replicas when discovered an available node.\n\n" + "The available global options are: \n\n" + "- **disabled**. This is the default option. No replica auto-balance will be done.\n" + "- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.\n" + "- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.\n\n" + "Longhorn also support individual volume setting. The setting can be specified on Volume page, this overrules the global setting.\n\n" + "The available volume setting options are: \n\n" + "- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.\n" + "- **disabled**. This option instructs Longhorn no replica auto-balance should be done.\n" + "- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.\n" + "- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.\n", Category: SettingCategoryScheduling, Type: SettingTypeString, Required: true, ReadOnly: false, Default: string(longhorn.ReplicaAutoBalanceDisabled), Choices: []string{ string(longhorn.ReplicaAutoBalanceDisabled), string(longhorn.ReplicaAutoBalanceLeastEffort), string(longhorn.ReplicaAutoBalanceBestEffort), }, } SettingDefinitionStorageOverProvisioningPercentage = SettingDefinition{ DisplayName: "Storage Over Provisioning Percentage", Description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity", Category: SettingCategoryScheduling, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "200", } SettingDefinitionStorageMinimalAvailablePercentage = SettingDefinition{ DisplayName: "Storage Minimal Available Percentage", Description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up.", Category: SettingCategoryScheduling, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "25", } SettingDefinitionUpgradeChecker = SettingDefinition{ DisplayName: "Enable Upgrade Checker", Description: "Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "true", } SettingDefinitionCurrentLonghornVersion = SettingDefinition{ DisplayName: "Current Longhorn Version", Description: "The current Longhorn version.", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: false, ReadOnly: true, Default: meta.Version, } SettingDefinitionLatestLonghornVersion = SettingDefinition{ DisplayName: "Latest Longhorn Version", Description: "The latest version of Longhorn available. Updated by Upgrade Checker automatically", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: false, ReadOnly: true, } SettingDefinitionStableLonghornVersions = SettingDefinition{ DisplayName: "Stable Longhorn Versions", Description: "The latest stable version of every minor release line. Updated by Upgrade Checker automatically", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: false, ReadOnly: true, } SettingDefinitionDefaultReplicaCount = SettingDefinition{ DisplayName: "Default Replica Count", Description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass", Category: SettingCategoryGeneral, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "3", } SettingDefinitionDefaultDataLocality = SettingDefinition{ DisplayName: "Default Data Locality", Description: "We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.\n\n" + "This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass\n\n" + "The available modes are: \n\n" + "- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)\n" + "- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.\n", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: false, Default: string(longhorn.DataLocalityDisabled), Choices: []string{ string(longhorn.DataLocalityDisabled), string(longhorn.DataLocalityBestEffort), }, } SettingDefinitionGuaranteedEngineCPU = SettingDefinition{ DisplayName: "Guaranteed Engine CPU (Deprecated)", Description: "This setting is replaced by 2 new settings \"Guaranteed Engine Manager CPU\" and \"Guaranteed Replica Manager CPU\" since Longhorn version v1.1.1. \n" + "This setting was used to control the CPU requests of all Longhorn Instance Manager pods. \n", Category: SettingCategoryDangerZone, Type: SettingTypeDeprecated, Required: false, ReadOnly: true, Default: "", } SettingDefinitionDefaultLonghornStaticStorageClass = SettingDefinition{ DisplayName: "Default Longhorn Static StorageClass Name", Description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object.", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: false, Default: "longhorn-static", } SettingDefinitionTaintToleration = SettingDefinition{ DisplayName: "Kubernetes Taint Toleration", Description: "If you want to dedicate nodes to just store Longhorn replicas and reject other general workloads, you can set tolerations for **all** Longhorn components and add taints to the nodes dedicated for storage. " + "Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.) " + "This setting only sets taint tolerations for system managed components. " + "Depending on how you deployed Longhorn, you need to set taint tolerations for user deployed components in Helm chart or deployment YAML file. " + "All Longhorn volumes should be detached before modifying toleration settings. " + "We recommend setting tolerations during Longhorn deployment because the Longhorn system cannot be operated during the update. " + "Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example: \n\n" + "* `key1=value1:NoSchedule; key2:NoExecute` \n\n" + "* `:` this toleration tolerates everything because an empty key with operator `Exists` matches all keys, values and effects \n\n" + "* `key1=value1:` this toleration has empty effect. It matches all effects with key `key1` \n\n" + "Because `kubernetes.io` is used as the key of all Kubernetes default tolerations, it should not be used in the toleration settings.\n\n " + "WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES! ", Category: SettingCategoryDangerZone, Type: SettingTypeString, Required: false, ReadOnly: false, } SettingDefinitionSystemManagedComponentsNodeSelector = SettingDefinition{ DisplayName: "System Managed Components Node Selector", Description: "If you want to restrict Longhorn components to only run on particular set of nodes, you can set node selector for **all** Longhorn components. " + "Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.) " + "You must follow the below order when set the node selector:\n\n" + "1. Set node selector for user deployed components in Helm chart or deployment YAML file depending on how you deployed Longhorn.\n\n" + "2. Set node selector for system managed components in here.\n\n" + "All Longhorn volumes should be detached before modifying node selector settings. " + "We recommend setting node selector during Longhorn deployment because the Longhorn system cannot be operated during the update. " + "Multiple label key-value pairs are separated by semicolon. For example: \n\n" + "* `label-key1=label-value1; label-key2=label-value2` \n\n" + "WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES! \n\n" + "Please see the documentation at https://longhorn.io for more detailed instructions about changing node selector", Category: SettingCategoryDangerZone, Type: SettingTypeString, Required: false, ReadOnly: false, } SettingDefinitionCRDAPIVersion = SettingDefinition{ DisplayName: "Custom Resource API Version", Description: "The current customer resource's API version, e.g. longhorn.io/v1beta2. Set by manager automatically", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: true, } SettingDefinitionAutoSalvage = SettingDefinition{ DisplayName: "Automatic salvage", Description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume.", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "true", } SettingDefinitionAutoDeletePodWhenVolumeDetachedUnexpectedly = SettingDefinition{ DisplayName: "Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly", Description: "If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). " + "By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount. \n\n" + "If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume. \n\n" + "**Note:** This setting doesn't apply to the workload pods that don't have a controller. Longhorn never deletes them.", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "true", } SettingDefinitionRegistrySecret = SettingDefinition{ DisplayName: "Registry secret", Description: "The Kubernetes Secret name", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: false, ReadOnly: false, Default: "", } SettingDefinitionDisableSchedulingOnCordonedNode = SettingDefinition{ DisplayName: "Disable Scheduling On Cordoned Node", Description: `Disable Longhorn manager to schedule replica on Kubernetes cordoned node`, Category: SettingCategoryScheduling, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "true", } SettingDefinitionReplicaZoneSoftAntiAffinity = SettingDefinition{ DisplayName: "Replica Zone Level Soft Anti-Affinity", Description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone.", Category: SettingCategoryScheduling, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "true", } SettingDefinitionNodeDownPodDeletionPolicy = SettingDefinition{ DisplayName: "Pod Deletion Policy When Node is Down", Description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.\n" + "- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.\n" + "- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.\n" + "- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.\n" + "- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.\n", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: false, Default: string(NodeDownPodDeletionPolicyDoNothing), Choices: []string{ string(NodeDownPodDeletionPolicyDoNothing), string(NodeDownPodDeletionPolicyDeleteStatefulSetPod), string(NodeDownPodDeletionPolicyDeleteDeploymentPod), string(NodeDownPodDeletionPolicyDeleteBothStatefulsetAndDeploymentPod), }, } SettingDefinitionAllowNodeDrainWithLastHealthyReplica = SettingDefinition{ DisplayName: "Allow Node Drain with the Last Healthy Replica", Description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume.\n\n" + "If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume.", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionNodeDrainPolicy = SettingDefinition{ DisplayName: "Node Drain Policy", Description: "Define the policy to use when a node with the last healthy replica of a volume is drained. \n" + "- **block-if-contains-last-replica** Longhorn will block the drain when the node contains the last healthy replica of a volume.\n" + "- **allow-if-replica-is-stopped** Longhorn will allow the drain when the node contains the last healthy replica of a volume but the replica is stopped. WARNING: possible data loss if the node is removed after draining. Select this option if you want to drain the node and do in-place upgrade/maintenance.\n" + "- **always-allow** Longhorn will allow the drain even though the node contains the last healthy replica of a volume. WARNING: possible data loss if the node is removed after draining. Also possible data corruption if the last replica was running during the draining.\n", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: false, Default: string(NodeDrainPolicyBlockIfContainsLastReplica), Choices: []string{ string(NodeDrainPolicyBlockIfContainsLastReplica), string(NodeDrainPolicyAllowIfReplicaIsStopped), string(NodeDrainPolicyAlwaysAllow), }, } SettingDefinitionMkfsExt4Parameters = SettingDefinition{ DisplayName: "Custom mkfs.ext4 parameters", Description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: false, ReadOnly: false, } SettingDefinitionPriorityClass = SettingDefinition{ DisplayName: "Priority Class", Description: "The name of the Priority Class to set on the Longhorn components. This can help prevent Longhorn components from being evicted under Node Pressure. \n" + "Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.) " + "Note that this setting only sets Priority Class for system managed components. " + "Depending on how you deployed Longhorn, you need to set Priority Class for user deployed components in Helm chart or deployment YAML file. \n" + "WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES.", Category: SettingCategoryDangerZone, Required: false, ReadOnly: false, } SettingDefinitionDisableRevisionCounter = SettingDefinition{ DisplayName: "Disable Revision Counter", Description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a revision counter file to track every write to the volume. During salvage recovering Longhorn will pick the repica with largest revision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume.", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionDisableReplicaRebuild = SettingDefinition{ DisplayName: "Disable Replica Rebuild", Description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume.", Category: SettingCategoryDangerZone, Type: SettingTypeDeprecated, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionReplicaReplenishmentWaitInterval = SettingDefinition{ DisplayName: "Replica Replenishment Wait Interval", Description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.\n" + "Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case.", Category: SettingCategoryGeneral, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "600", } SettingDefinitionConcurrentReplicaRebuildPerNodeLimit = SettingDefinition{ DisplayName: "Concurrent Replica Rebuild Per Node Limit", Description: "This setting controls how many replicas on a node can be rebuilt simultaneously. \n\n" + "Typically, Longhorn can block the replica starting once the current rebuilding count on a node exceeds the limit. But when the value is 0, it means disabling the replica rebuilding. \n\n" + "WARNING: \n\n" + " - The old setting \"Disable Replica Rebuild\" is replaced by this setting. \n\n" + " - Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped. \n\n" + " - When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore.", Category: SettingCategoryDangerZone, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "5", } SettingDefinitionSystemManagedPodsImagePullPolicy = SettingDefinition{ DisplayName: "System Managed Pod Image Pull Policy", Description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. " + "The new Image Pull Policy will only apply after the system managed pods restart.", Category: SettingCategoryGeneral, Type: SettingTypeString, Required: true, ReadOnly: false, Default: string(SystemManagedPodsImagePullPolicyIfNotPresent), Choices: []string{ string(SystemManagedPodsImagePullPolicyIfNotPresent), string(SystemManagedPodsImagePullPolicyNever), string(SystemManagedPodsImagePullPolicyAlways), }, } SettingDefinitionAllowVolumeCreationWithDegradedAvailability = SettingDefinition{ DisplayName: "Allow Volume Creation with Degraded Availability", Description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation.", Category: SettingCategoryScheduling, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "true", } SettingDefinitionAutoCleanupSystemGeneratedSnapshot = SettingDefinition{ DisplayName: "Automatically Cleanup System Generated Snapshot", Description: "This setting enables Longhorn to automatically cleanup the system generated snapshot before and after replica rebuilding.", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "true", } SettingDefinitionConcurrentAutomaticEngineUpgradePerNodeLimit = SettingDefinition{ DisplayName: "Concurrent Automatic Engine Upgrade Per Node Limit", Description: "This setting controls how Longhorn automatically upgrades volumes' engines after upgrading Longhorn manager. " + "The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. " + "If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version.", Category: SettingCategoryGeneral, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "0", } SettingDefinitionBackingImageCleanupWaitInterval = SettingDefinition{ DisplayName: "Backing Image Cleanup Wait Interval", Description: "In minutes. The interval determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it.", Category: SettingCategoryGeneral, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "60", } SettingDefinitionBackingImageRecoveryWaitInterval = SettingDefinition{ DisplayName: "Backing Image Recovery Wait Interval", Description: "In seconds. The interval determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown. \n\n" + "WARNING: \n\n" + " - This recovery only works for the backing image of which the creation type is \"download\". \n\n" + " - File state \"unknown\" means the related manager pods on the pod is not running or the node itself is down/disconnected.", Category: SettingCategoryGeneral, Type: SettingTypeInt, Required: true, ReadOnly: false, Default: "300", } SettingDefinitionGuaranteedEngineManagerCPU = SettingDefinition{ DisplayName: "Guaranteed Engine Manager CPU", Description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload. \n\n" + "In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: \n\n" + "`Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100` \n\n" + "The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. \n\n" + "If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. \n\n" + "WARNING: \n\n" + " - Value 0 means unsetting CPU requests for engine manager pods. \n\n" + " - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40. \n\n" + " - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. \n\n" + " - This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set. \n\n" + " - After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. \n\n", Category: SettingCategoryDangerZone, Type: SettingTypeString, Required: true, ReadOnly: false, Default: "12", } SettingDefinitionGuaranteedReplicaManagerCPU = SettingDefinition{ DisplayName: "Guaranteed Replica Manager CPU", Description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload. \n\n" + "In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: \n\n" + "`Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100` \n\n" + "The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. \n\n" + "If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. \n\n" + "WARNING: \n\n" + " - Value 0 means unsetting CPU requests for replica manager pods. \n\n" + " - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40. \n\n" + " - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. \n\n" + " - This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set. \n\n" + " - After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. \n\n", Category: SettingCategoryDangerZone, Type: SettingTypeString, Required: true, ReadOnly: false, Default: "12", } SettingDefinitionKubernetesClusterAutoscalerEnabled = SettingDefinition{ DisplayName: "Kubernetes Cluster Autoscaler Enabled (Experimental)", Description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler. \n\n" + "Longhorn prevents data loss by only allowing the Cluster Autoscaler to scale down a node that met all conditions: \n\n" + " - No volume attached to the node \n\n" + " - Is not the last node containing the replica of any volume. \n\n" + " - Is not running backing image components pod. \n\n" + " - Is not running share manager components pod. \n\n", Category: SettingCategoryGeneral, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionOrphanAutoDeletion = SettingDefinition{ DisplayName: "Orphan Auto-Deletion", Description: "This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically. \n\n" + "Orphan resources on down or unknown nodes will not be cleaned up automatically. \n\n", Category: SettingCategoryOrphan, Type: SettingTypeBool, Required: true, ReadOnly: false, Default: "false", } SettingDefinitionStorageNetwork = SettingDefinition{ DisplayName: "Storage Network", Description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network. \n\n" + "To segregate the storage network, input the pre-existing NetworkAttachmentDefinition in **<namespace>/<name>** format. \n\n" + "WARNING: \n\n" + " - The cluster must have pre-existing Multus installed, and NetworkAttachmentDefinition IPs are reachable between nodes. \n\n" + " - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will try to block this setting update when there are attached volumes. \n\n" + " - When applying the setting, Longhorn will restart all instance-manager, and backing-image-manager pods. \n\n", Category: SettingCategoryDangerZone, Type: SettingTypeString, Required: false, ReadOnly: false, Default: CniNetworkNone, } )
View Source
var (
LonghornSystemKey = "longhorn"
)
View Source
var ( SettingNameList = []SettingName{ SettingNameBackupTarget, SettingNameBackupTargetCredentialSecret, SettingNameAllowRecurringJobWhileVolumeDetached, SettingNameCreateDefaultDiskLabeledNodes, SettingNameDefaultDataPath, SettingNameDefaultEngineImage, SettingNameDefaultInstanceManagerImage, SettingNameDefaultShareManagerImage, SettingNameDefaultBackingImageManagerImage, SettingNameReplicaSoftAntiAffinity, SettingNameReplicaAutoBalance, SettingNameStorageOverProvisioningPercentage, SettingNameStorageMinimalAvailablePercentage, SettingNameUpgradeChecker, SettingNameCurrentLonghornVersion, SettingNameLatestLonghornVersion, SettingNameStableLonghornVersions, SettingNameDefaultReplicaCount, SettingNameDefaultDataLocality, SettingNameGuaranteedEngineCPU, SettingNameDefaultLonghornStaticStorageClass, SettingNameBackupstorePollInterval, SettingNameTaintToleration, SettingNameSystemManagedComponentsNodeSelector, SettingNameCRDAPIVersion, SettingNameAutoSalvage, SettingNameAutoDeletePodWhenVolumeDetachedUnexpectedly, SettingNameRegistrySecret, SettingNameDisableSchedulingOnCordonedNode, SettingNameReplicaZoneSoftAntiAffinity, SettingNameNodeDownPodDeletionPolicy, SettingNameAllowNodeDrainWithLastHealthyReplica, SettingNameNodeDrainPolicy, SettingNameMkfsExt4Parameters, SettingNamePriorityClass, SettingNameDisableRevisionCounter, SettingNameDisableReplicaRebuild, SettingNameReplicaReplenishmentWaitInterval, SettingNameConcurrentReplicaRebuildPerNodeLimit, SettingNameSystemManagedPodsImagePullPolicy, SettingNameAllowVolumeCreationWithDegradedAvailability, SettingNameAutoCleanupSystemGeneratedSnapshot, SettingNameConcurrentAutomaticEngineUpgradePerNodeLimit, SettingNameBackingImageCleanupWaitInterval, SettingNameBackingImageRecoveryWaitInterval, SettingNameGuaranteedEngineManagerCPU, SettingNameGuaranteedReplicaManagerCPU, SettingNameKubernetesClusterAutoscalerEnabled, SettingNameOrphanAutoDeletion, SettingNameStorageNetwork, SettingNameFailedBackupTTL, } )
Functions ¶
func CreateCniAnnotationFromSetting ¶ added in v1.3.0
func CreateDefaultDisk ¶ added in v0.8.1
func CreateDisksFromAnnotation ¶ added in v0.8.1
func ErrorAlreadyExists ¶ added in v0.6.0
func ErrorIsNotFound ¶ added in v0.6.0
func GetBackingImageDataSourceLabels ¶ added in v1.2.0
func GetBackingImageDataSourcePodName ¶ added in v1.2.0
func GetBackingImageDirectoryName ¶ added in v1.1.1
func GetBackingImageDirectoryOnHost ¶ added in v1.1.1
func GetBackingImageLabels ¶ added in v1.1.1
func GetBackingImageManagerDirectoryOnHost ¶ added in v1.1.1
func GetBackingImageManagerLabels ¶ added in v1.1.1
func GetBackingImageManagerName ¶ added in v1.1.1
func GetBackingImagePathForReplicaManagerContainer ¶ added in v1.1.1
func GetBackupVolumeLabels ¶ added in v1.2.0
func GetBaseLabelsForSystemManagedComponent ¶ added in v1.1.1
func GetCondition ¶ added in v0.8.1
GetCondition returns a copy of conditions[conditionType], and automatically fill the unknown condition
func GetCronJobLabels ¶ added in v1.1.2
func GetCronJobLabels(job *longhorn.RecurringJobSpec) map[string]string
func GetCronJobNameForRecurringJob ¶ added in v1.2.0
func GetCustomizedDefaultSettings ¶ added in v0.6.0
func GetDaemonSetNameFromEngineImageName ¶ added in v0.6.0
func GetDefaultManagerURL ¶ added in v1.0.1
func GetDefaultManagerURL() string
func GetEIDaemonSetLabelSelector ¶ added in v1.1.1
GetEIDaemonSetLabelSelector returns labels for engine image daemonset's Spec.Selector.MatchLabels
func GetEngineBinaryDirectoryForEngineManagerContainer ¶ added in v0.8.0
func GetEngineBinaryDirectoryForReplicaManagerContainer ¶ added in v0.8.0
func GetEngineImageComponentLabel ¶ added in v1.1.1
func GetEngineImageLabels ¶ added in v0.6.0
func GetEngineImageNameFromDaemonSetName ¶ added in v0.6.0
func GetImageCanonicalName ¶
func GetInstanceManagerComponentLabel ¶ added in v0.6.0
func GetInstanceManagerImageChecksumName ¶ added in v1.0.1
func GetInstanceManagerLabels ¶ added in v0.6.0
func GetInstanceManagerLabels(node, instanceManagerImage string, managerType longhorn.InstanceManagerType) map[string]string
func GetInstanceManagerName ¶ added in v0.8.0
func GetInstanceManagerName(imType longhorn.InstanceManagerType, nodeName, image string) (string, error)
func GetInstanceManagerPrefix ¶ added in v1.1.0
func GetInstanceManagerPrefix(imType longhorn.InstanceManagerType) string
func GetLonghornLabelCRDAPIVersionKey ¶ added in v1.3.0
func GetLonghornLabelCRDAPIVersionKey() string
func GetLonghornLabelComponentKey ¶ added in v0.6.0
func GetLonghornLabelComponentKey() string
func GetLonghornLabelKey ¶ added in v0.6.0
func GetNodeTagsFromAnnotation ¶ added in v0.8.1
func GetOrphanChecksumNameForOrphanedDirectory ¶ added in v1.3.0
func GetOrphanLabelsForOrphanedDirectory ¶ added in v1.3.0
func GetRecurringJobLabelKey ¶ added in v1.2.0
func GetRecurringJobLabelValueMap ¶ added in v1.2.0
func GetRegionAndZone ¶ added in v0.8.0
func GetReplicaDataPath ¶ added in v1.1.0
func GetReplicaMountedDataPath ¶ added in v0.6.0
func GetShareManagerComponentLabel ¶ added in v1.1.0
func GetShareManagerImageChecksumName ¶ added in v1.1.0
func GetShareManagerInstanceLabel ¶ added in v1.1.0
func GetShareManagerLabels ¶ added in v1.1.0
func GetShareManagerNameFromShareManagerPodName ¶ added in v1.1.1
func GetShareManagerPodNameFromShareManagerName ¶ added in v1.1.1
func GetSnapshotName ¶ added in v1.2.3
func GetSnapshotName(vds longhorn.VolumeDataSource) string
func GetVolumeLabels ¶ added in v0.7.0
func GetVolumeName ¶ added in v1.2.3
func GetVolumeName(vds longhorn.VolumeDataSource) string
func IsDataFromVolume ¶ added in v1.2.3
func IsDataFromVolume(vds longhorn.VolumeDataSource) bool
func IsValidVolumeDataSource ¶ added in v1.2.0
func IsValidVolumeDataSource(vds longhorn.VolumeDataSource) bool
func LabelsToString ¶ added in v0.7.0
func NewVolumeDataSource ¶ added in v1.2.0
func NewVolumeDataSource(volumeDataSourceType longhorn.VolumeDataSourceType, parameters map[string]string) (dataSource longhorn.VolumeDataSource, err error)
func NewVolumeDataSourceTypeSnapshot ¶ added in v1.2.0
func NewVolumeDataSourceTypeSnapshot(volumeName, snapshotName string) longhorn.VolumeDataSource
func NewVolumeDataSourceTypeVolume ¶ added in v1.2.0
func NewVolumeDataSourceTypeVolume(volumeName string) longhorn.VolumeDataSource
func SetCondition ¶ added in v0.8.1
func SetConditionAndRecord ¶ added in v0.8.1
func SetConditionWithoutTimestamp ¶ added in v1.3.0
func SetSettingDefinition ¶ added in v1.2.5
func SetSettingDefinition(name SettingName, definition SettingDefinition)
func UnmarshalNodeSelector ¶ added in v1.1.1
func UnmarshalToNodeTags ¶ added in v0.8.1
UnmarshalToNodeTags input format should be: `["worker1","enabled"]`
func UnmarshalTolerations ¶ added in v0.6.0
func UnmarshalTolerations(tolerationSetting string) ([]v1.Toleration, error)
func ValidateAccessMode ¶ added in v1.1.0
func ValidateAccessMode(mode longhorn.AccessMode) error
func ValidateAndUnmarshalToleration ¶ added in v0.6.0
func ValidateAndUnmarshalToleration(s string) (*v1.Toleration, error)
func ValidateCPUReservationValues ¶ added in v1.1.1
func ValidateDataLocality ¶ added in v1.1.0
func ValidateDataLocality(mode longhorn.DataLocality) error
func ValidateEngineImageChecksumName ¶ added in v0.8.0
func ValidateReplicaAutoBalance ¶ added in v1.2.0
func ValidateReplicaAutoBalance(option longhorn.ReplicaAutoBalance) error
func ValidateReplicaCount ¶ added in v0.6.0
func ValidateSetting ¶ added in v1.3.0
func ValidateStorageNetwork ¶ added in v1.3.0
Types ¶
type CNIAnnotation ¶ added in v1.3.0
type CNIAnnotation string
type CniNetwork ¶ added in v1.3.0
type DiskSpecWithName ¶ added in v0.8.1
func UnmarshalToDisks ¶ added in v0.8.1
func UnmarshalToDisks(s string) (ret []DiskSpecWithName, err error)
UnmarshalToDisks input format should be: `[{"path":"/mnt/disk1","allowScheduling":false},
{"path":"/mnt/disk2","allowScheduling":false,"storageReserved":1024,"tags":["ssd","fast"]}]`
type NodeDownPodDeletionPolicy ¶ added in v1.1.0
type NodeDownPodDeletionPolicy string
type NodeWithLastHealthyReplicaDrainPolicy ¶ added in v1.3.3
type NodeWithLastHealthyReplicaDrainPolicy string
type NotFoundError ¶
type NotFoundError struct {
Name string
}
func (*NotFoundError) Error ¶
func (e *NotFoundError) Error() string
type SettingCategory ¶
type SettingCategory string
type SettingDefinition ¶
type SettingDefinition struct { DisplayName string `json:"displayName"` Description string `json:"description"` Category SettingCategory `json:"category"` Type SettingType `json:"type"` Required bool `json:"required"` ReadOnly bool `json:"readOnly"` Default string `json:"default"` Choices []string `json:"options,omitempty"` // +optional }
func GetSettingDefinition ¶ added in v1.2.5
func GetSettingDefinition(name SettingName) (SettingDefinition, bool)
type SettingName ¶
type SettingName string
type SettingType ¶
type SettingType string
type SystemManagedPodsImagePullPolicy ¶ added in v1.1.0
type SystemManagedPodsImagePullPolicy string
Click to show internal directories.
Click to hide internal directories.