Documentation ¶
Index ¶
- Constants
- Variables
- func AddUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set)
- func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func CreateSelectorFromLabels(aL map[string]string) labels.Selector
- func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func FilterPodsByNamespace(pods []*v1.Pod, ns string) []*v1.Pod
- func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string
- func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func GetEquivalencePod(pod *v1.Pod) interface{}
- func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource
- func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo PersistentVolumeInfo, ...) algorithm.FitPredicate
- func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate
- func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate
- func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer
- func NewVolumeNodePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, ...) algorithm.FitPredicate
- func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate
- func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, ...) (bool, []algorithm.PredicateFailureReason, error)
- func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer)
- type CachedNodeInfo
- type CachedPersistentVolumeClaimInfo
- type CachedPersistentVolumeInfo
- type EquivalencePod
- type FailureReason
- type InsufficientResourceError
- type MaxPDVolumeCountChecker
- type NodeInfo
- type NodeLabelChecker
- type PersistentVolumeClaimInfo
- type PersistentVolumeInfo
- type PodAffinityChecker
- type PredicateFailureError
- type PredicateMetadataFactory
- type PredicateMetadataProducer
- type ServiceAffinity
- type VolumeFilter
- type VolumeNodeChecker
- type VolumeZoneChecker
Examples ¶
Constants ¶
const (
MatchInterPodAffinity = "MatchInterPodAffinity"
)
Variables ¶
var ( // NOTE: If you add a new predicate failure error for a predicate that can never // be made to pass by removing pods, or you change an existing predicate so that // it can never be made to pass by removing pods, you need to add the predicate // failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go ErrDiskConflict = newPredicateFailureError("NoDiskConflict") ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict") ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector") ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity") ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch") ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch") ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch") ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints") ErrPodNotMatchHostName = newPredicateFailureError("HostName") ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts") ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence") ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity") ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount") ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure") ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure") ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk") ErrNodeNotReady = newPredicateFailureError("NodeNotReady") ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable") ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition") ErrVolumeNodeConflict = newPredicateFailureError("NoVolumeNodeConflict") // ErrFakePredicate is used for test only. The fake predicates returning false also returns error // as ErrFakePredicate. ErrFakePredicate = newPredicateFailureError("FakePredicateError") )
Functions ¶
func AddUnsetLabelsToMap ¶
AddUnsetLabelsToMap backfills missing values with values we find in a map.
func CheckNodeConditionPredicate ¶
func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting out of disk, network unavailable and not ready condition. Only node conditions are accounted in this predicate.
func CheckNodeDiskPressurePredicate ¶
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node reporting disk pressure condition.
func CheckNodeMemoryPressurePredicate ¶
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node reporting memory pressure condition.
func CreateSelectorFromLabels ¶
CreateSelectorFromLabels is used to define a selector that corresponds to the keys in a map.
func EssentialPredicates ¶
func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
EssentialPredicates are the predicates that all pods, including critical pods, need
func FilterPodsByNamespace ¶
FilterPodsByNamespace filters pods outside a namespace from the given list.
func FindLabelsInSet ¶
FindLabelsInSet gets as many key/value pairs as possible out of a label set.
Example ¶
ExampleUtils is a https://blog.golang.org/examples styled unit test.
labelSubset := labels.Set{} labelSubset["label1"] = "value1" labelSubset["label2"] = "value2" // Lets make believe that these pods are on the cluster. // Utility functions will inspect their labels, filter them, and so on. nsPods := []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns1", Labels: map[string]string{ "label1": "wontSeeThis", "label2": "wontSeeThis", "label3": "will_see_this", }, }, }, // first pod which will be used via the utilities { ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns1", }, }, { ObjectMeta: metav1.ObjectMeta{ Name: "pod3ThatWeWontSee", }, }, } fmt.Println(FindLabelsInSet([]string{"label1", "label2", "label3"}, nsPods[0].ObjectMeta.Labels)["label3"]) AddUnsetLabelsToMap(labelSubset, []string{"label1", "label2", "label3"}, nsPods[0].ObjectMeta.Labels) fmt.Println(labelSubset) for _, pod := range FilterPodsByNamespace(nsPods, "ns1") { fmt.Print(pod.Name, ",") }
Output: will_see_this label1=value1,label2=value2,label3=will_see_this pod1,pod2,
func GeneralPredicates ¶
func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need
func GetEquivalencePod ¶
GetEquivalencePod returns a EquivalencePod which contains a group of pod attributes which can be reused.
func GetResourceRequest ¶
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource
GetResourceRequest returns a *schedulercache.Resource that covers the largest width in each resource dimension. Because init-containers run sequentially, we collect the max in each dimension iteratively. In contrast, we sum the resource vectors for regular containers since they run simultaneously.
Example:
Pod:
InitContainers IC1: CPU: 2 Memory: 1G IC2: CPU: 2 Memory: 3G Containers C1: CPU: 2 Memory: 1G C2: CPU: 1 Memory: 1G
Result: CPU: 3, Memory: 3G
func NewMaxPDVolumeCountPredicate ¶
func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate
NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the number of volumes which match a filter that it requests, and those that are already present. The maximum number is configurable to accommodate different systems.
The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume types, counts the number of unique volumes, and rejects the new pod if it would place the total count over the maximum.
func NewNodeLabelPredicate ¶
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate
func NewPodAffinityPredicate ¶
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate
func NewPredicateMetadataFactory ¶
func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer
func NewVolumeNodePredicate ¶
func NewVolumeNodePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, client clientset.Interface) algorithm.FitPredicate
NewVolumeNodePredicate evaluates if a pod can fit due to the volumes it requests, given that some volumes have node topology constraints, particularly when using Local PVs. The requirement is that any pod that uses a PVC that is bound to a PV with topology constraints must be scheduled to a node that satisfies the PV's topology labels.
func NewVolumeZonePredicate ¶
func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate
NewVolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given that some volumes may have zone scheduling constraints. The requirement is that any volume zone-labels must match the equivalent zone-labels on the node. It is OK for the node to have more zone-label constraints (for example, a hypothetical replicated volume might allow region-wide access)
Currently this is only supported with PersistentVolumeClaims, and looks to the labels only on the bound PersistentVolume.
Working with volumes declared inline in the pod specification (i.e. not using a PersistentVolume) is likely to be harder, as it would require determining the zone of a volume during scheduling, and that is likely to require calling out to the cloud provider. It seems that we are moving away from inline volume declarations anyway.
func NoDiskConflict ¶
func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume can't be scheduled there. This is GCE, Amazon EBS, and Ceph RBD specific for now: - GCE PD allows multiple mounts as long as they're all read-only - AWS EBS forbids any two pods mounting the same volume ID - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. - ISCSI forbids if any two pods share at least same IQN, LUN and Target TODO: migrate this into some per-volume specific code?
func PodFitsHost ¶
func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
PodFitsHost checks if a pod spec node name matches the current node.
func PodFitsHostPorts ¶
func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
PodFitsHostPorts checks if a node has free ports for the requested pod ports.
func PodFitsResources ¶
func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the predicate failure reasons if the node has insufficient resources to run the pod.
func PodMatchNodeSelector ¶
func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
PodMatchNodeSelector checks if a pod node selector matches the node label.
func PodToleratesNodeNoExecuteTaints ¶
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints
func PodToleratesNodeTaints ¶
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
func RegisterPredicateMetadataProducer ¶
func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer)
Types ¶
type CachedNodeInfo ¶
type CachedNodeInfo struct {
corelisters.NodeLister
}
func (*CachedNodeInfo) GetNodeInfo ¶
func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error)
GetNodeInfo returns cached data for the node 'id'.
type CachedPersistentVolumeClaimInfo ¶
type CachedPersistentVolumeClaimInfo struct {
corelisters.PersistentVolumeClaimLister
}
CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo
func (*CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo ¶
func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name
type CachedPersistentVolumeInfo ¶
type CachedPersistentVolumeInfo struct {
corelisters.PersistentVolumeLister
}
CachedPersistentVolumeInfo implements PersistentVolumeInfo
func (*CachedPersistentVolumeInfo) GetPersistentVolumeInfo ¶
func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
type EquivalencePod ¶
type EquivalencePod struct {
ControllerRef metav1.OwnerReference
}
EquivalencePod is a group of pod attributes which can be reused as equivalence to schedule other pods.
type FailureReason ¶
type FailureReason struct {
// contains filtered or unexported fields
}
func NewFailureReason ¶
func NewFailureReason(msg string) *FailureReason
func (*FailureReason) GetReason ¶
func (e *FailureReason) GetReason() string
type InsufficientResourceError ¶
type InsufficientResourceError struct { // resourceName is the name of the resource that is insufficient ResourceName v1.ResourceName // contains filtered or unexported fields }
InsufficientResourceError is an error type that indicates what kind of resource limit is hit and caused the unfitting failure.
func NewInsufficientResourceError ¶
func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError
func (*InsufficientResourceError) Error ¶
func (e *InsufficientResourceError) Error() string
func (*InsufficientResourceError) GetInsufficientAmount ¶
func (e *InsufficientResourceError) GetInsufficientAmount() int64
func (*InsufficientResourceError) GetReason ¶
func (e *InsufficientResourceError) GetReason() string
type MaxPDVolumeCountChecker ¶
type MaxPDVolumeCountChecker struct {
// contains filtered or unexported fields
}
type NodeLabelChecker ¶
type NodeLabelChecker struct {
// contains filtered or unexported fields
}
func (*NodeLabelChecker) CheckNodeLabelPresence ¶
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value If "presence" is false, then returns false if any of the requested labels matches any of the node's labels, otherwise returns true. If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels, otherwise returns true.
Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected
Alternately, eliminating nodes that have a certain label, regardless of value, is also useful A node may have a label with "retiring" as key and the date as the value and it may be desirable to avoid scheduling new pods on this node
type PersistentVolumeClaimInfo ¶
type PersistentVolumeClaimInfo interface {
GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
}
type PersistentVolumeInfo ¶
type PersistentVolumeInfo interface {
GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
}
type PodAffinityChecker ¶
type PodAffinityChecker struct {
// contains filtered or unexported fields
}
func (*PodAffinityChecker) InterPodAffinityMatches ¶
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)
InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the predicate failure reasons if the pod cannot be scheduled on the specified node.
type PredicateFailureError ¶
type PredicateFailureError struct {
PredicateName string
}
func (*PredicateFailureError) Error ¶
func (e *PredicateFailureError) Error() string
func (*PredicateFailureError) GetReason ¶
func (e *PredicateFailureError) GetReason() string
type PredicateMetadataFactory ¶
type PredicateMetadataFactory struct {
// contains filtered or unexported fields
}
func (*PredicateMetadataFactory) GetMetadata ¶
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata
GetMetadata returns the predicateMetadata used which will be used by various predicates.
type PredicateMetadataProducer ¶
type PredicateMetadataProducer func(pm *predicateMetadata)
PredicateMetadataProducer: Helper types/variables...
func NewServiceAffinityPredicate ¶
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer)
type ServiceAffinity ¶
type ServiceAffinity struct {
// contains filtered or unexported fields
}
type VolumeFilter ¶
type VolumeFilter struct { // Filter normal volumes FilterVolume func(vol *v1.Volume) (id string, relevant bool) FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) }
VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps
var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AzureDisk != nil { return vol.AzureDisk.DiskName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AzureDisk != nil { return pv.Spec.AzureDisk.DiskName, true } return "", false }, }
AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes
var EBSVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AWSElasticBlockStore != nil { return vol.AWSElasticBlockStore.VolumeID, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AWSElasticBlockStore != nil { return pv.Spec.AWSElasticBlockStore.VolumeID, true } return "", false }, }
EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
var GCEPDVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.GCEPersistentDisk != nil { return vol.GCEPersistentDisk.PDName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.GCEPersistentDisk != nil { return pv.Spec.GCEPersistentDisk.PDName, true } return "", false }, }
GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
type VolumeNodeChecker ¶
type VolumeNodeChecker struct {
// contains filtered or unexported fields
}
type VolumeZoneChecker ¶
type VolumeZoneChecker struct {
// contains filtered or unexported fields
}