Documentation ¶
Overview ¶
Package node contains code for syncing cloud instances with node registry
Index ¶
Constants ¶
const ( NodeControllerSubsystem = "node_collector" ZoneHealthStatisticKey = "zone_health" ZoneSizeKey = "zone_size" ZoneNoUnhealthyNodesKey = "unhealthy_nodes_in_zone" EvictionsNumberKey = "evictions_number" )
Variables ¶
var ( ZoneHealth = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: ZoneHealthStatisticKey, Help: "Gauge measuring percentage of healthy nodes per zone.", }, []string{"zone"}, ) ZoneSize = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: ZoneSizeKey, Help: "Gauge measuring number of registered Nodes per zones.", }, []string{"zone"}, ) UnhealthyNodes = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: ZoneNoUnhealthyNodesKey, Help: "Gauge measuring number of not Ready Nodes per zones.", }, []string{"zone"}, ) EvictionsNumber = prometheus.NewCounterVec( prometheus.CounterOpts{ Subsystem: NodeControllerSubsystem, Name: EvictionsNumberKey, Help: "Number of Node evictions that happened since current instance of NodeController started.", }, []string{"zone"}, ) )
var ( UnreachableTaintTemplate = &v1.Taint{ Key: algorithm.TaintNodeUnreachable, Effect: v1.TaintEffectNoExecute, } NotReadyTaintTemplate = &v1.Taint{ Key: algorithm.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute, } )
Functions ¶
Types ¶
type NodeController ¶
type NodeController struct {
// contains filtered or unexported fields
}
func NewNodeController ¶
func NewNodeController( podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, daemonSetInformer extensionsinformers.DaemonSetInformer, cloud cloudprovider.Interface, kubeClient clientset.Interface, podEvictionTimeout time.Duration, evictionLimiterQPS float32, secondaryEvictionLimiterQPS float32, largeClusterThreshold int32, unhealthyZoneThreshold float32, nodeMonitorGracePeriod time.Duration, nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, allocateNodeCIDRs bool, allocatorType ipam.CIDRAllocatorType, runTaintManager bool, useTaintBasedEvictions bool) (*NodeController, error)
NewNodeController returns a new node controller to sync instances from cloudprovider. This method returns an error if it is unable to initialize the CIDR bitmap with podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes currently, this should be handled as a fatal error.
func (*NodeController) ComputeZoneState ¶
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition) (int, zoneState)
ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone. The zone is considered: - fullyDisrupted if there're no Ready Nodes, - partiallyDisrupted if at least than nc.unhealthyZoneThreshold percent of Nodes are not Ready, - normal otherwise
func (*NodeController) HealthyQPSFunc ¶
func (nc *NodeController) HealthyQPSFunc(nodeNum int) float32
Default value for cluster eviction rate - we take nodeNum for consistency with ReducedQPSFunc.
func (*NodeController) ReducedQPSFunc ¶
func (nc *NodeController) ReducedQPSFunc(nodeNum int) float32
If the cluster is large make evictions slower, if they're small stop evictions altogether.
func (*NodeController) Run ¶
func (nc *NodeController) Run(stopCh <-chan struct{})
Run starts an asynchronous loop that monitors the status of cluster nodes.