Documentation ¶
Index ¶
- Constants
- Variables
- func AddToBeRemovedTaint(node *apiv1.Node, client kubernetes.Interface, taintEffect apiv1.TaintEffect) (*apiv1.Node, error)
- func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeInfo
- func DeleteNode(node *v1.Node, client kubernetes.Interface) error
- func DeleteNodes(nodes []*v1.Node, client kubernetes.Interface) error
- func DeleteToBeRemovedTaint(node *apiv1.Node, client kubernetes.Interface) (*apiv1.Node, error)
- func GetLeaderElector(ctx context.Context, config LeaderElectConfig, coreClient v1.CoreV1Interface, ...) (*leaderelection.LeaderElector, context.Context, <-chan struct{}, error)
- func GetResourceLock(ns string, name string, coreClient v1.CoreV1Interface, ...) (resourcelock.Interface, error)
- func GetToBeRemovedTaint(node *apiv1.Node) (apiv1.Taint, bool)
- func GetToBeRemovedTime(node *apiv1.Node) (*time.Time, error)
- func NewCacheNodeWatcher(client kubernetes.Interface, stop <-chan struct{}) (v1lister.NodeLister, cache.InformerSynced)
- func NewCachePodWatcher(client kubernetes.Interface, stop <-chan struct{}) (v1lister.PodLister, cache.InformerSynced)
- func NewInClusterClient() (*kubernetes.Clientset, error)
- func NewOutOfClusterClient(kubeconfig string) (*kubernetes.Clientset, error)
- func NodeEmpty(node *v1.Node, nodeInfoMap map[string]*NodeInfo) bool
- func NodePodsRemaining(node *v1.Node, nodeInfoMap map[string]*NodeInfo) (int, bool)
- func PodIsDaemonSet(pod *v1.Pod) bool
- func PodIsStatic(pod *v1.Pod) bool
- func WaitForSync(tries int, stopChan <-chan struct{}, informers ...cache.InformerSynced) bool
- type FilteredNodesLister
- type FilteredPodsLister
- type LeaderElectConfig
- type NodeAvailableCapacity
- type NodeFilterFunc
- type NodeInfo
- type NodeLister
- type PodFilterFunc
- type PodLister
- type PodRequestedUsage
Constants ¶
const (
// ToBeRemovedByAutoscalerKey specifies the key the autoscaler uses to taint nodes as MARKED
ToBeRemovedByAutoscalerKey = "atlassian.com/escalator"
)
Variables ¶
var TaintEffectTypes = map[apiv1.TaintEffect]bool{ apiv1.TaintEffectNoExecute: true, apiv1.TaintEffectNoSchedule: true, apiv1.TaintEffectPreferNoSchedule: true, }
TaintEffectTypes a map of TaintEffect to boolean true used for validating supported taint types
Functions ¶
func AddToBeRemovedTaint ¶
func AddToBeRemovedTaint(node *apiv1.Node, client kubernetes.Interface, taintEffect apiv1.TaintEffect) (*apiv1.Node, error)
AddToBeRemovedTaint takes a k8s node and adds the ToBeRemovedByAutoscaler taint to the node returns the most recent update of the node that is successful
func CreateNodeNameToInfoMap ¶
CreateNodeNameToInfoMap creates a map of NodeInfo which maps node names to nodes and pods to nodes From K8s cluster-autoscaler. Based off the old scheduler cache.NodeInfo
func DeleteNode ¶
func DeleteNode(node *v1.Node, client kubernetes.Interface) error
DeleteNode deletes a single node from Kubernetes
func DeleteNodes ¶
func DeleteNodes(nodes []*v1.Node, client kubernetes.Interface) error
DeleteNodes deletes multiple nodes from Kubernetes
func DeleteToBeRemovedTaint ¶
DeleteToBeRemovedTaint removes the ToBeRemovedByAutoscaler taint fromt the node if it exists returns the latest successful update of the node
func GetLeaderElector ¶ added in v1.1.0
func GetLeaderElector(ctx context.Context, config LeaderElectConfig, coreClient v1.CoreV1Interface, coordClient coordinationv1.CoordinationV1Interface, recorder record.EventRecorder, resourceLockID string) (*leaderelection.LeaderElector, context.Context, <-chan struct{}, error)
GetLeaderElector returns a leader elector
func GetResourceLock ¶ added in v1.1.0
func GetResourceLock(ns string, name string, coreClient v1.CoreV1Interface, coordClient coordinationv1.CoordinationV1Interface, recorder record.EventRecorder, resourceLockID string) (resourcelock.Interface, error)
GetResourceLock returns a resource lock for leader election
func GetToBeRemovedTaint ¶
GetToBeRemovedTaint returns whether the node is tainted with the ToBeRemovedByAutoscalerKey taint and the taint associated
func GetToBeRemovedTime ¶
GetToBeRemovedTime returns the time the node was tainted result will be nil if does not exist
func NewCacheNodeWatcher ¶
func NewCacheNodeWatcher(client kubernetes.Interface, stop <-chan struct{}) (v1lister.NodeLister, cache.InformerSynced)
NewCacheNodeWatcher creates a new IndexerInformer for watching nodes from cache
func NewCachePodWatcher ¶
func NewCachePodWatcher(client kubernetes.Interface, stop <-chan struct{}) (v1lister.PodLister, cache.InformerSynced)
NewCachePodWatcher creates a new IndexerInformer for watching pods from cache
func NewInClusterClient ¶
func NewInClusterClient() (*kubernetes.Clientset, error)
NewInClusterClient returns a new kubernetes clientset from inside the cluster
func NewOutOfClusterClient ¶
func NewOutOfClusterClient(kubeconfig string) (*kubernetes.Clientset, error)
NewOutOfClusterClient returns a new kubernetes clientset using a kubeconfig file For running outside the cluster
func NodePodsRemaining ¶
NodePodsRemaining returns the number of pods on the node, except for daemonset pods
func PodIsDaemonSet ¶
PodIsDaemonSet returns if the pod is a daemonset or not
func PodIsStatic ¶
PodIsStatic returns if the pod is static or not
func WaitForSync ¶
func WaitForSync(tries int, stopChan <-chan struct{}, informers ...cache.InformerSynced) bool
WaitForSync wait for the cache sync for all the registered listers it will try <tries> times and return the result
Types ¶
type FilteredNodesLister ¶
type FilteredNodesLister struct {
// contains filtered or unexported fields
}
FilteredNodesLister lists nodes filtered by labels
type FilteredPodsLister ¶
type FilteredPodsLister struct {
// contains filtered or unexported fields
}
FilteredPodsLister lists pods from a podLister and filters out by namespace
type LeaderElectConfig ¶ added in v1.1.0
type LeaderElectConfig struct { LeaseDuration time.Duration RenewDeadline time.Duration RetryPeriod time.Duration Namespace string Name string }
LeaderElectConfig stores the configuration for a leader election lock
type NodeAvailableCapacity ¶ added in v1.13.2
type NodeAvailableCapacity struct { Total scheduler.Resource LargestAvailableMemory scheduler.Resource LargestAvailableCPU scheduler.Resource }
func CalculateNodesCapacity ¶ added in v1.13.2
CalculateNodesCapacity calculates the total Allocatable node capacity for all nodes, as well as the 2 nodes with the largest available CPU and memory
type NodeFilterFunc ¶
NodeFilterFunc provides a definition for a predicate based on matching a node return true for keep node
type NodeInfo ¶ added in v1.10.0
type NodeInfo struct {
// contains filtered or unexported fields
}
NodeInfo provides an abstraction on top of node to pods mappings replaces scheduler cache.NodeInfo that was removed from public in an older version of kubernetes Maintains the same interface NodeInfo this thread safe
func NewNodeInfo ¶ added in v1.10.0
func NewNodeInfo() *NodeInfo
NewNodeInfo creates a new empty NodeInfo struct
type NodeLister ¶
NodeLister provides an interface for anything that can list a node
func NewFilteredNodesLister ¶
func NewFilteredNodesLister(nodeLister v1lister.NodeLister, filterFunc NodeFilterFunc) NodeLister
NewFilteredNodesLister creates a new lister and informerSynced for all nodes filter by nodegroup (nodeLabels)
type PodFilterFunc ¶
PodFilterFunc provides a definition for a predicate based on matching a pod return true for keep node
type PodLister ¶
PodLister provides an interface for anything that can list a pod
func NewFilteredPodsLister ¶
func NewFilteredPodsLister(podLister v1lister.PodLister, filterFunc PodFilterFunc) PodLister
NewFilteredPodsLister creates a new lister and informerSynced for a FilteredPodsLister
type PodRequestedUsage ¶ added in v1.13.2
type PodRequestedUsage struct { Total scheduler.Resource LargestPendingMemory scheduler.Resource LargestPendingCPU scheduler.Resource }
func CalculatePodsRequestedUsage ¶ added in v1.13.2
func CalculatePodsRequestedUsage(pods []*v1.Pod) (PodRequestedUsage, error)
CalculatePodsRequestedUsage returns the requested usage of all pods, both as Total across all nodes, as well as the largest pod by CPU and Memory