Documentation ¶
Index ¶
- Constants
- func CleanUpAndRecordFailedScaleDownEvent(ctx *context.AutoscalingContext, node *apiv1.Node, nodeGroupId string, ...)
- func IsNodeBeingDeleted(node *apiv1.Node, timestamp time.Time) bool
- func ParseShutdownGracePeriodsAndPriorities(priorityGracePeriodStr string) []kubelet_config.ShutdownGracePeriodByPodPriority
- func RegisterAndRecordSuccessfulScaleDownEvent(ctx *context.AutoscalingContext, ...)
- func SingleRuleDrainConfig(shutdownGracePeriodSeconds int) []kubelet_config.ShutdownGracePeriodByPodPriority
- func UpdateSoftDeletionTaints(context *context.AutoscalingContext, uneededNodes, neededNodes []*apiv1.Node) (errors []error)
- func WaitForDelayDeletion(node *apiv1.Node, nodeLister kubernetes.NodeLister, timeout time.Duration) errors.AutoscalerError
- type Actuator
- func (a *Actuator) CheckStatus() scaledown.ActuationStatus
- func (a *Actuator) ClearResultsNotNewerThan(t time.Time)
- func (a *Actuator) DeletionResults() (map[string]status.NodeDeleteResult, time.Time)
- func (a *Actuator) StartDeletion(empty, drain []*apiv1.Node) (status.ScaleDownResult, []*status.ScaleDownNode, errors.AutoscalerError)
- type Evictor
- type GroupDeletionScheduler
- func (ds *GroupDeletionScheduler) AbortNodeDeletion(node *apiv1.Node, nodeGroupId string, drain bool, errMsg string, ...)
- func (ds *GroupDeletionScheduler) ResetAndReportMetrics()
- func (ds *GroupDeletionScheduler) ScheduleDeletion(nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup, batchSize int, ...)
- type NodeDeletionBatcher
- type UpdateLatencyTracker
Constants ¶
const ( // MaxKubernetesEmptyNodeDeletionTime is the maximum time needed by Kubernetes to delete an empty node. MaxKubernetesEmptyNodeDeletionTime = 3 * time.Minute // MaxCloudProviderNodeDeletionTime is the maximum time needed by cloud provider to delete a node. MaxCloudProviderNodeDeletionTime = 5 * time.Minute )
const ( // DefaultEvictionRetryTime is the time after CA retries failed pod eviction. DefaultEvictionRetryTime = 10 * time.Second // DefaultPodEvictionHeadroom is the extra time we wait to catch situations when the pod is ignoring SIGTERM and // is killed with SIGKILL after GracePeriodSeconds elapses DefaultPodEvictionHeadroom = 30 * time.Second )
const ( // DelayDeletionAnnotationPrefix is the prefix of annotation marking node as it needs to wait // for other K8s components before deleting node. DelayDeletionAnnotationPrefix = "delay-deletion.cluster-autoscaler.kubernetes.io/" )
Variables ¶
This section is empty.
Functions ¶
func CleanUpAndRecordFailedScaleDownEvent ¶
func CleanUpAndRecordFailedScaleDownEvent(ctx *context.AutoscalingContext, node *apiv1.Node, nodeGroupId string, drain bool, nodeDeletionTracker *deletiontracker.NodeDeletionTracker, errMsg string, status status.NodeDeleteResult)
CleanUpAndRecordFailedScaleDownEvent record failed scale down event and log an error.
func IsNodeBeingDeleted ¶
IsNodeBeingDeleted returns true iff a given node is being deleted.
func ParseShutdownGracePeriodsAndPriorities ¶
func ParseShutdownGracePeriodsAndPriorities(priorityGracePeriodStr string) []kubelet_config.ShutdownGracePeriodByPodPriority
ParseShutdownGracePeriodsAndPriorities parse priorityGracePeriodStr and returns an array of ShutdownGracePeriodByPodPriority if succeeded. Otherwise, returns an empty list
func RegisterAndRecordSuccessfulScaleDownEvent ¶
func RegisterAndRecordSuccessfulScaleDownEvent(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, node *apiv1.Node, nodeGroup cloudprovider.NodeGroup, drain bool, nodeDeletionTracker *deletiontracker.NodeDeletionTracker)
RegisterAndRecordSuccessfulScaleDownEvent register scale down and record successful scale down event.
func SingleRuleDrainConfig ¶
func SingleRuleDrainConfig(shutdownGracePeriodSeconds int) []kubelet_config.ShutdownGracePeriodByPodPriority
SingleRuleDrainConfig returns an array of ShutdownGracePeriodByPodPriority with a single ShutdownGracePeriodByPodPriority
func UpdateSoftDeletionTaints ¶
func UpdateSoftDeletionTaints(context *context.AutoscalingContext, uneededNodes, neededNodes []*apiv1.Node) (errors []error)
UpdateSoftDeletionTaints manages soft taints of unneeded nodes.
func WaitForDelayDeletion ¶
func WaitForDelayDeletion(node *apiv1.Node, nodeLister kubernetes.NodeLister, timeout time.Duration) errors.AutoscalerError
WaitForDelayDeletion waits until the provided node has no annotations beginning with DelayDeletionAnnotationPrefix, or until the provided timeout is reached - whichever comes first.
Types ¶
type Actuator ¶
type Actuator struct {
// contains filtered or unexported fields
}
Actuator is responsible for draining and deleting nodes.
func NewActuator ¶
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter) *Actuator
NewActuator returns a new instance of Actuator.
func (*Actuator) CheckStatus ¶
func (a *Actuator) CheckStatus() scaledown.ActuationStatus
CheckStatus should returns an immutable snapshot of ongoing deletions.
func (*Actuator) ClearResultsNotNewerThan ¶
ClearResultsNotNewerThan removes information about deletions finished before or exactly at the provided timestamp.
func (*Actuator) DeletionResults ¶
DeletionResults returns deletion results since the last ClearResultsNotNewerThan call in a map form, along with the timestamp of last result.
func (*Actuator) StartDeletion ¶
func (a *Actuator) StartDeletion(empty, drain []*apiv1.Node) (status.ScaleDownResult, []*status.ScaleDownNode, errors.AutoscalerError)
StartDeletion triggers a new deletion process.
type Evictor ¶
type Evictor struct { EvictionRetryTime time.Duration PodEvictionHeadroom time.Duration // contains filtered or unexported fields }
Evictor keeps configurations of pod eviction
func NewEvictor ¶
func NewEvictor(evictionRegister evictionRegister, shutdownGracePeriodByPodPriority []kubelet_config.ShutdownGracePeriodByPodPriority, fullDsEviction bool) Evictor
NewEvictor returns an instance of Evictor.
func (Evictor) DrainNode ¶
func (e Evictor) DrainNode(ctx *acontext.AutoscalingContext, nodeInfo *framework.NodeInfo) (map[string]status.PodEvictionResult, error)
DrainNode groups pods in the node in to priority groups and, evicts pods in the ascending order of priorities. If priority evictor is not enable, eviction of daemonSet pods is the best effort.
func (Evictor) EvictDaemonSetPods ¶
func (e Evictor) EvictDaemonSetPods(ctx *acontext.AutoscalingContext, nodeInfo *framework.NodeInfo) (map[string]status.PodEvictionResult, error)
EvictDaemonSetPods creates eviction objects for all DaemonSet pods on the node. Eviction of DaemonSet pods are best effort. Does not wait for evictions to finish.
type GroupDeletionScheduler ¶
GroupDeletionScheduler is a wrapper over NodeDeletionBatcher responsible for grouping nodes for deletion and rolling back deletion of all nodes from a group in case deletion fails for any of the other nodes.
func NewGroupDeletionScheduler ¶
func NewGroupDeletionScheduler(ctx *context.AutoscalingContext, ndt *deletiontracker.NodeDeletionTracker, b batcher, evictor Evictor) *GroupDeletionScheduler
NewGroupDeletionScheduler creates an instance of GroupDeletionScheduler.
func (*GroupDeletionScheduler) AbortNodeDeletion ¶
func (ds *GroupDeletionScheduler) AbortNodeDeletion(node *apiv1.Node, nodeGroupId string, drain bool, errMsg string, result status.NodeDeleteResult)
AbortNodeDeletion frees up a node that couldn't be deleted successfully. If it was a part of a group, the same is applied for other nodes queued for deletion.
func (*GroupDeletionScheduler) ResetAndReportMetrics ¶
func (ds *GroupDeletionScheduler) ResetAndReportMetrics()
ResetAndReportMetrics should be invoked for GroupDeletionScheduler before each scale-down phase.
func (*GroupDeletionScheduler) ScheduleDeletion ¶
func (ds *GroupDeletionScheduler) ScheduleDeletion(nodeInfo *framework.NodeInfo, nodeGroup cloudprovider.NodeGroup, batchSize int, drain bool)
ScheduleDeletion schedules deletion of the node. Nodes that should be deleted in groups are queued until whole group is scheduled for deletion, other nodes are passed over to NodeDeletionBatcher immediately.
type NodeDeletionBatcher ¶
NodeDeletionBatcher batch scale down candidates for one node group and remove them.
func NewNodeDeletionBatcher ¶
func NewNodeDeletionBatcher(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, nodeDeletionTracker *deletiontracker.NodeDeletionTracker, deleteInterval time.Duration) *NodeDeletionBatcher
NewNodeDeletionBatcher return new NodeBatchDeleter
func (*NodeDeletionBatcher) AddNodes ¶
func (d *NodeDeletionBatcher) AddNodes(nodes []*apiv1.Node, nodeGroup cloudprovider.NodeGroup, drain bool)
AddNodes adds node list to delete candidates and schedules deletion. The deletion is performed asynchronously.
type UpdateLatencyTracker ¶
type UpdateLatencyTracker struct { // Sends node tainting start timestamps to the tracker StartTimeChan chan nodeTaintStartTime // Passing a bool will wait for all the started nodes to get tainted and calculate // latency based on latencies observed. (If all the nodes did not get tained within // waitForTaintingTimeoutDuration after passing a bool, latency calculation will be // aborted and the ResultChan will be closed without returning a value) Closing the // AwaitOrStopChan without passing any bool will abort the latency calculation. AwaitOrStopChan chan bool // Communicate back the measured latency ResultChan chan time.Duration // contains filtered or unexported fields }
UpdateLatencyTracker can be used to calculate round-trip time between CA and api-server when adding ToBeDeletedTaint to nodes
func NewUpdateLatencyTracker ¶
func NewUpdateLatencyTracker(nodeLister kubernetes.NodeLister) *UpdateLatencyTracker
NewUpdateLatencyTracker returns a new NewUpdateLatencyTracker object
func NewUpdateLatencyTrackerForTesting ¶
func NewUpdateLatencyTrackerForTesting(nodeLister kubernetes.NodeLister, now func() time.Time) *UpdateLatencyTracker
NewUpdateLatencyTrackerForTesting returns a UpdateLatencyTracker object with reduced sleepDurationWhenPolling and mock clock for testing
func (*UpdateLatencyTracker) Start ¶
func (u *UpdateLatencyTracker) Start()
Start starts listening for node tainting start timestamps and update the timestamps that the taint appears for the first time for a particular node. Listen AwaitOrStopChan for stop/await signals