Documentation ¶
Index ¶
- Variables
- func IgnoredForTopology(p *v1.Pod) bool
- func IncompatibleReqAcrossInstanceTypes(requirements scheduling.Requirements, ...) (string, int)
- func InstanceTypeList(instanceTypeOptions []*cloudprovider.InstanceType) string
- func NominatePodEvent(pod *v1.Pod, node *v1.Node, nodeClaim *v1beta1.NodeClaim) events.Event
- func PodFailedToScheduleEvent(pod *v1.Pod, err error) events.Event
- func RequirementIncompatibleWithMinValues(cumulativeMinRequirementsFromInstanceTypes map[string]sets.Set[string], ...) string
- func TopologyListOptions(namespace string, labelSelector *metav1.LabelSelector) *client.ListOptions
- type ExistingNode
- type NodeClaim
- type NodeClaimTemplate
- type Preferences
- type Queue
- type Results
- type Scheduler
- type Topology
- func (t *Topology) AddRequirements(podRequirements, nodeRequirements scheduling.Requirements, p *v1.Pod, ...) (scheduling.Requirements, error)
- func (t *Topology) Record(p *v1.Pod, requirements scheduling.Requirements, ...)
- func (t *Topology) Register(topologyKey string, domain string)
- func (t *Topology) Update(ctx context.Context, p *v1.Pod) error
- type TopologyGroup
- func (t *TopologyGroup) AddOwner(key types.UID)
- func (t *TopologyGroup) Counts(pod *v1.Pod, requirements scheduling.Requirements, ...) bool
- func (t *TopologyGroup) Get(pod *v1.Pod, podDomains, nodeDomains *scheduling.Requirement) *scheduling.Requirement
- func (t *TopologyGroup) Hash() uint64
- func (t *TopologyGroup) IsOwnedBy(key types.UID) bool
- func (t *TopologyGroup) Record(domains ...string)
- func (t *TopologyGroup) Register(domains ...string)
- func (t *TopologyGroup) RemoveOwner(key types.UID)
- type TopologyNodeFilter
- type TopologyType
- type VolumeTopology
Constants ¶
This section is empty.
Variables ¶
var ( SimulationDurationSeconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: metrics.Namespace, Subsystem: "provisioner", Name: "scheduling_simulation_duration_seconds", Help: "Duration of scheduling simulations used for deprovisioning and provisioning in seconds.", Buckets: metrics.DurationBuckets(), }, []string{ controllerLabel, }, ) QueueDepth = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: metrics.Namespace, Subsystem: "provisioner", Name: "scheduling_queue_depth", Help: "The number of pods currently waiting to be scheduled.", }, []string{ controllerLabel, schedulingIDLabel, }, ) )
var MaxInstanceTypes = 60
MaxInstanceTypes is a constant that restricts the number of instance types to be sent for launch. Note that this is intentionally changed to var just to help in testing the code.
var PodNominationRateLimiter = flowcontrol.NewTokenBucketRateLimiter(5, 10)
PodNominationRateLimiter is a pointer so it rate-limits across events
Functions ¶
func IgnoredForTopology ¶
func IncompatibleReqAcrossInstanceTypes ¶ added in v0.35.0
func IncompatibleReqAcrossInstanceTypes(requirements scheduling.Requirements, instanceTypes cloudprovider.InstanceTypes) (string, int)
func InstanceTypeList ¶
func InstanceTypeList(instanceTypeOptions []*cloudprovider.InstanceType) string
func NominatePodEvent ¶
func RequirementIncompatibleWithMinValues ¶ added in v0.35.0
func RequirementIncompatibleWithMinValues(cumulativeMinRequirementsFromInstanceTypes map[string]sets.Set[string], requirements scheduling.Requirements) string
func TopologyListOptions ¶
func TopologyListOptions(namespace string, labelSelector *metav1.LabelSelector) *client.ListOptions
Types ¶
type ExistingNode ¶
type ExistingNode struct { *state.StateNode Pods []*v1.Pod // contains filtered or unexported fields }
func NewExistingNode ¶
func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.ResourceList) *ExistingNode
type NodeClaim ¶
type NodeClaim struct { NodeClaimTemplate Pods []*v1.Pod // contains filtered or unexported fields }
NodeClaim is a set of constraints, compatible pods, and possible instance types that could fulfill these constraints. This will be turned into one or more actual node instances within the cluster after bin packing.
func NewNodeClaim ¶
func NewNodeClaim(nodeClaimTemplate *NodeClaimTemplate, topology *Topology, daemonResources v1.ResourceList, instanceTypes []*cloudprovider.InstanceType) *NodeClaim
func (*NodeClaim) FinalizeScheduling ¶
func (n *NodeClaim) FinalizeScheduling()
FinalizeScheduling is called once all scheduling has completed and allows the node to perform any cleanup necessary before its requirements are used for instance launching
type NodeClaimTemplate ¶
type NodeClaimTemplate struct { v1beta1.NodeClaimTemplate NodePoolName string InstanceTypeOptions cloudprovider.InstanceTypes Requirements scheduling.Requirements }
NodeClaimTemplate encapsulates the fields required to create a node and mirrors the fields in NodePool. These structs are maintained separately in order for fields like Requirements to be able to be stored more efficiently.
func NewNodeClaimTemplate ¶
func NewNodeClaimTemplate(nodePool *v1beta1.NodePool) *NodeClaimTemplate
func (*NodeClaimTemplate) ToNodeClaim ¶
func (i *NodeClaimTemplate) ToNodeClaim(nodePool *v1beta1.NodePool) *v1beta1.NodeClaim
type Preferences ¶
type Preferences struct { // ToleratePreferNoSchedule controls if preference relaxation adds a toleration for PreferNoSchedule taints. This only // helps if there is a corresponding taint, so we don't always add it. ToleratePreferNoSchedule bool }
type Queue ¶
type Queue struct {
// contains filtered or unexported fields
}
Queue is a queue of pods that is scheduled. It's used to attempt to schedule pods as long as we are making progress in scheduling. This is sometimes required to maintain zonal topology spreads with constrained pods, and can satisfy pod affinities that occur in a batch of pods if there are enough constraints provided.
func NewQueue ¶
NewQueue constructs a new queue given the input pods, sorting them to optimize for bin-packing into nodes.
type Results ¶
type Results struct { NewNodeClaims []*NodeClaim ExistingNodes []*ExistingNode PodErrors map[*v1.Pod]error }
Results contains the results of the scheduling operation
func (Results) AllNonPendingPodsScheduled ¶
AllNonPendingPodsScheduled returns true if all pods scheduled. We don't care if a pod was pending before consolidation and will still be pending after. It may be a pod that we can't schedule at all and don't want it to block consolidation.
func (Results) NonPendingPodSchedulingErrors ¶
NonPendingPodSchedulingErrors creates a string that describes why pods wouldn't schedule that is suitable for presentation
func (Results) Record ¶ added in v0.35.0
Record sends eventing and log messages back for the results that were produced from a scheduling run It also nominates nodes in the cluster state based on the scheduling run to signal to other components leveraging the cluster state that a previous scheduling run that was recorded is relying on these nodes
func (Results) TruncateInstanceTypes ¶ added in v0.35.0
TruncateInstanceTypes filters the result based on the maximum number of instanceTypes that needs to be considered. This could potentially impact if minValues is specified for a requirement key. So, this method re-evaluates the NodeClaims in the result returned by the scheduler after truncation and removes invalid NodeClaims, shifts the pods to errorPods so that the scheduler can re-consider those in the next iteration. This is a corner case where even 100 instanceTypes in the NodeClaim are failing to meet the a particular minimum requirement.
type Scheduler ¶
type Scheduler struct {
// contains filtered or unexported fields
}
func NewScheduler ¶
type Topology ¶
type Topology struct {
// contains filtered or unexported fields
}
func NewTopology ¶
func (*Topology) AddRequirements ¶
func (t *Topology) AddRequirements(podRequirements, nodeRequirements scheduling.Requirements, p *v1.Pod, compatabilityOptions ...functional.Option[scheduling.CompatibilityOptions]) (scheduling.Requirements, error)
AddRequirements tightens the input requirements by adding additional requirements that are being enforced by topology spreads affinities, anti-affinities or inverse anti-affinities. The nodeHostname is the hostname that we are currently considering placing the pod on. It returns these newly tightened requirements, or an error in the case of a set of requirements that cannot be satisfied.
func (*Topology) Record ¶
func (t *Topology) Record(p *v1.Pod, requirements scheduling.Requirements, compatabilityOptions ...functional.Option[scheduling.CompatibilityOptions])
Record records the topology changes given that pod p schedule on a node with the given requirements
func (*Topology) Register ¶
Register is used to register a domain as available across topologies for the given topology key.
func (*Topology) Update ¶
Update unregisters the pod as the owner of all affinities and then creates any new topologies based on the pod spec registered the pod as the owner of all associated affinities, new or old. This allows Update() to be called after relaxation of a preference to properly break the topology <-> owner relationship so that the preferred topology will no longer influence scheduling.
type TopologyGroup ¶
type TopologyGroup struct { // Hashed Fields Key string Type TopologyType // contains filtered or unexported fields }
TopologyGroup is used to track pod counts that match a selector by the topology domain (e.g. SELECT COUNT(*) FROM pods GROUP BY(topology_ke
func NewTopologyGroup ¶
func NewTopologyGroup(topologyType TopologyType, topologyKey string, pod *v1.Pod, namespaces sets.Set[string], labelSelector *metav1.LabelSelector, maxSkew int32, minDomains *int32, domains sets.Set[string]) *TopologyGroup
func (*TopologyGroup) AddOwner ¶
func (t *TopologyGroup) AddOwner(key types.UID)
func (*TopologyGroup) Counts ¶
func (t *TopologyGroup) Counts(pod *v1.Pod, requirements scheduling.Requirements, compatabilityOptions ...functional.Option[scheduling.CompatibilityOptions]) bool
Counts returns true if the pod would count for the topology, given that it schedule to a node with the provided requirements
func (*TopologyGroup) Get ¶
func (t *TopologyGroup) Get(pod *v1.Pod, podDomains, nodeDomains *scheduling.Requirement) *scheduling.Requirement
func (*TopologyGroup) Hash ¶
func (t *TopologyGroup) Hash() uint64
Hash is used so we can track single topologies that affect multiple groups of pods. If a deployment has 100x pods with self anti-affinity, we track that as a single topology with 100 owners instead of 100x topologies.
func (*TopologyGroup) Record ¶
func (t *TopologyGroup) Record(domains ...string)
func (*TopologyGroup) Register ¶
func (t *TopologyGroup) Register(domains ...string)
Register ensures that the topology is aware of the given domain names.
func (*TopologyGroup) RemoveOwner ¶
func (t *TopologyGroup) RemoveOwner(key types.UID)
type TopologyNodeFilter ¶
type TopologyNodeFilter []scheduling.Requirements
TopologyNodeFilter is used to determine if a given actual node or scheduling node matches the pod's node selectors and required node affinity terms. This is used with topology spread constraints to determine if the node should be included for topology counting purposes. This is only used with topology spread constraints as affinities/anti-affinities always count across all nodes. A nil or zero-value TopologyNodeFilter behaves well and the filter returns true for all nodes.
func MakeTopologyNodeFilter ¶
func MakeTopologyNodeFilter(p *v1.Pod) TopologyNodeFilter
func (TopologyNodeFilter) Matches ¶
func (t TopologyNodeFilter) Matches(node *v1.Node) bool
Matches returns true if the TopologyNodeFilter doesn't prohibit node from the participating in the topology
func (TopologyNodeFilter) MatchesRequirements ¶
func (t TopologyNodeFilter) MatchesRequirements(requirements scheduling.Requirements, compatabilityOptions ...functional.Option[scheduling.CompatibilityOptions]) bool
MatchesRequirements returns true if the TopologyNodeFilter doesn't prohibit a node with the requirements from participating in the topology. This method allows checking the requirements from a scheduling.NodeClaim to see if the node we will soon create participates in this topology.
type TopologyType ¶
type TopologyType byte
const ( TopologyTypeSpread TopologyType = iota TopologyTypePodAffinity TopologyTypePodAntiAffinity )
func (TopologyType) String ¶
func (t TopologyType) String() string
type VolumeTopology ¶
type VolumeTopology struct {
// contains filtered or unexported fields
}
func NewVolumeTopology ¶
func NewVolumeTopology(kubeClient client.Client) *VolumeTopology
func (*VolumeTopology) ValidatePersistentVolumeClaims ¶
ValidatePersistentVolumeClaims returns an error if the pod doesn't appear to be valid with respect to PVCs (e.g. the PVC is not found or references an unknown storage class).