Documentation ¶
Overview ¶
Package node contains code for syncing cloud instances with node registry
Index ¶
- Constants
- Variables
- func Register()
- type ActionFunc
- type CIDRAllocator
- type FakeLegacyHandler
- type FakeNodeHandler
- func (c *FakeNodeHandler) Core() unversionedcore.CoreInterface
- func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error)
- func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error
- func (m *FakeNodeHandler) DeleteCollection(opt *api.DeleteOptions, listOpts api.ListOptions) error
- func (m *FakeNodeHandler) Get(name string) (*api.Node, error)
- func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error)
- func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*api.Node, error)
- func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*api.Node, error)
- func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error)
- func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error)
- func (m *FakeNodeHandler) Watch(opts api.ListOptions) (watch.Interface, error)
- type FakeRecorder
- type NodeController
- type RateLimitedTimedQueue
- type TimedQueue
- type TimedValue
- type UniqueQueue
Constants ¶
const ( NodeControllerSubsystem = "node_collector" ZoneHealthStatisticKey = "zone_health" ZoneSizeKey = "zone_size" ZoneNoUnhealthyNodesKey = "unhealty_nodes_in_zone" EvictionsIn10MinutesKey = "10_minute_evictions" EvictionsIn1HourKey = "1_hour_evictions" )
const (
// Number of Nodes that needs to be in the cluster for it to be treated as "large"
LargeClusterThreshold = 20
)
Variables ¶
var ( ZoneHealth = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: ZoneHealthStatisticKey, Help: "Gauge measuring percentage of healty nodes per zone.", }, []string{"zone"}, ) ZoneSize = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: ZoneSizeKey, Help: "Gauge measuring number of registered Nodes per zones.", }, []string{"zone"}, ) UnhealthyNodes = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: ZoneNoUnhealthyNodesKey, Help: "Gauge measuring number of not Ready Nodes per zones.", }, []string{"zone"}, ) Evictions10Minutes = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: EvictionsIn10MinutesKey, Help: "Gauge measuring number of Node evictions that happened in previous 10 minutes per zone.", }, []string{"zone"}, ) Evictions1Hour = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: NodeControllerSubsystem, Name: EvictionsIn1HourKey, Help: "Gauge measuring number of Node evictions that happened in previous hour per zone.", }, []string{"zone"}, ) )
var (
ErrCloudInstance = errors.New("cloud provider doesn't support instances.")
)
Functions ¶
Types ¶
type ActionFunc ¶
type ActionFunc func(TimedValue) (bool, time.Duration)
ActionFunc takes a timed value and returns false if the item must be retried, with an optional time.Duration if some minimum wait interval should be used.
type CIDRAllocator ¶ added in v1.3.0
type CIDRAllocator interface { AllocateOrOccupyCIDR(node *api.Node) error ReleaseCIDR(node *api.Node) error }
CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes.
func NewCIDRRangeAllocator ¶ added in v1.3.0
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *api.NodeList) (CIDRAllocator, error)
NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node Caller must ensure subNetMaskSize is not less than cluster CIDR mask size. Caller must always pass in a list of existing nodes so the new allocator can initialize its CIDR map. NodeList is only nil in testing.
type FakeLegacyHandler ¶ added in v1.3.2
type FakeLegacyHandler struct { unversionedcore.CoreInterface // contains filtered or unexported fields }
func (*FakeLegacyHandler) Nodes ¶ added in v1.3.2
func (m *FakeLegacyHandler) Nodes() unversionedcore.NodeInterface
type FakeNodeHandler ¶ added in v1.3.2
type FakeNodeHandler struct { *fake.Clientset // Input: Hooks determine if request is valid or not CreateHook func(*FakeNodeHandler, *api.Node) bool Existing []*api.Node // Output CreatedNodes []*api.Node DeletedNodes []*api.Node UpdatedNodes []*api.Node UpdatedNodeStatuses []*api.Node RequestCount int // contains filtered or unexported fields }
FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It allows test cases to have fine-grained control over mock behaviors. We also need PodsInterface and PodInterface to test list & delet pods, which is implemented in the embedded client.Fake field.
func (*FakeNodeHandler) Core ¶ added in v1.3.2
func (c *FakeNodeHandler) Core() unversionedcore.CoreInterface
func (*FakeNodeHandler) Delete ¶ added in v1.3.2
func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error
func (*FakeNodeHandler) DeleteCollection ¶ added in v1.3.2
func (m *FakeNodeHandler) DeleteCollection(opt *api.DeleteOptions, listOpts api.ListOptions) error
func (*FakeNodeHandler) Get ¶ added in v1.3.2
func (m *FakeNodeHandler) Get(name string) (*api.Node, error)
func (*FakeNodeHandler) List ¶ added in v1.3.2
func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error)
func (*FakeNodeHandler) PatchStatus ¶ added in v1.3.2
func (*FakeNodeHandler) UpdateStatus ¶ added in v1.3.2
func (*FakeNodeHandler) Watch ¶ added in v1.3.2
func (m *FakeNodeHandler) Watch(opts api.ListOptions) (watch.Interface, error)
type FakeRecorder ¶ added in v1.4.0
type FakeRecorder struct {
// contains filtered or unexported fields
}
FakeRecorder is used as a fake during testing.
func NewFakeRecorder ¶ added in v1.4.0
func NewFakeRecorder() *FakeRecorder
func (*FakeRecorder) Event ¶ added in v1.4.0
func (f *FakeRecorder) Event(obj runtime.Object, eventtype, reason, message string)
func (*FakeRecorder) Eventf ¶ added in v1.4.0
func (f *FakeRecorder) Eventf(obj runtime.Object, eventtype, reason, messageFmt string, args ...interface{})
func (*FakeRecorder) PastEventf ¶ added in v1.4.0
func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{})
type NodeController ¶
type NodeController struct {
// contains filtered or unexported fields
}
func NewNodeController ¶
func NewNodeController( podInformer framework.SharedIndexInformer, cloud cloudprovider.Interface, kubeClient clientset.Interface, podEvictionTimeout time.Duration, evictionLimiterQPS float32, secondaryEvictionLimiterQPS float32, largeClusterThreshold int32, unhealthyZoneThreshold float32, nodeMonitorGracePeriod time.Duration, nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, allocateNodeCIDRs bool) (*NodeController, error)
NewNodeController returns a new node controller to sync instances from cloudprovider. This method returns an error if it is unable to initialize the CIDR bitmap with podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes currently, this should be handled as a fatal error.
func NewNodeControllerFromClient ¶ added in v1.4.0
func NewNodeControllerFromClient( cloud cloudprovider.Interface, kubeClient clientset.Interface, podEvictionTimeout time.Duration, evictionLimiterQPS float32, secondaryEvictionLimiterQPS float32, largeClusterThreshold int32, unhealthyZoneThreshold float32, nodeMonitorGracePeriod time.Duration, nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, allocateNodeCIDRs bool) (*NodeController, error)
func (*NodeController) ComputeZoneState ¶ added in v1.4.0
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*api.NodeCondition) (int, zoneState)
This function is expected to get a slice of NodeReadyConditions for all Nodes in a given zone. The zone is considered: - fullyDisrupted if there're no Ready Nodes, - partiallyDisrupted if at least than nc.unhealthyZoneThreshold percent of Nodes are not Ready, - normal otherwise
func (*NodeController) HealthyQPSFunc ¶ added in v1.4.0
func (nc *NodeController) HealthyQPSFunc(nodeNum int) float32
Default value for cluster eviction rate - we take nodeNum for consistency with ReducedQPSFunc.
func (*NodeController) ReducedQPSFunc ¶ added in v1.4.0
func (nc *NodeController) ReducedQPSFunc(nodeNum int) float32
If the cluster is large make evictions slower, if they're small stop evictions altogether.
func (*NodeController) Run ¶
func (nc *NodeController) Run()
Run starts an asynchronous loop that monitors the status of cluster nodes.
type RateLimitedTimedQueue ¶
type RateLimitedTimedQueue struct {
// contains filtered or unexported fields
}
RateLimitedTimedQueue is a unique item priority queue ordered by the expected next time of execution. It is also rate limited.
func NewRateLimitedTimedQueue ¶
func NewRateLimitedTimedQueue(limiter flowcontrol.RateLimiter) *RateLimitedTimedQueue
Creates new queue which will use given RateLimiter to oversee execution.
func (*RateLimitedTimedQueue) Add ¶
func (q *RateLimitedTimedQueue) Add(value string, uid interface{}) bool
Adds value to the queue to be processed. Won't add the same value(comparsion by value) a second time if it was already added and not removed.
func (*RateLimitedTimedQueue) Clear ¶ added in v1.3.0
func (q *RateLimitedTimedQueue) Clear()
Removes all items from the queue
func (*RateLimitedTimedQueue) Remove ¶
func (q *RateLimitedTimedQueue) Remove(value string) bool
Removes Node from the Evictor. The Node won't be processed until added again.
func (*RateLimitedTimedQueue) SwapLimiter ¶ added in v1.4.0
func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32)
SwapLimiter safely swaps current limiter for this queue with the passed one if capacities or qps's differ.
func (*RateLimitedTimedQueue) Try ¶
func (q *RateLimitedTimedQueue) Try(fn ActionFunc)
Try processes the queue. Ends prematurely if RateLimiter forbids an action and leak is true. Otherwise, requeues the item to be processed. Each value is processed once if fn returns true, otherwise it is added back to the queue. The returned remaining is used to identify the minimum time to execute the next item in the queue.
type TimedQueue ¶
type TimedQueue []*TimedValue
TimedQueue is a priority heap where the lowest ProcessAt is at the front of the queue
func (TimedQueue) Len ¶
func (h TimedQueue) Len() int
func (TimedQueue) Less ¶
func (h TimedQueue) Less(i, j int) bool
func (*TimedQueue) Pop ¶
func (h *TimedQueue) Pop() interface{}
func (*TimedQueue) Push ¶
func (h *TimedQueue) Push(x interface{})
func (TimedQueue) Swap ¶
func (h TimedQueue) Swap(i, j int)
type TimedValue ¶
type TimedValue struct { Value string // UID could be anything that helps identify the value UID interface{} AddedAt time.Time ProcessAt time.Time }
TimedValue is a value that should be processed at a designated time.
type UniqueQueue ¶
type UniqueQueue struct {
// contains filtered or unexported fields
}
A FIFO queue which additionally guarantees that any element can be added only once until it is removed.
func (*UniqueQueue) Add ¶
func (q *UniqueQueue) Add(value TimedValue) bool
Adds a new value to the queue if it wasn't added before, or was explicitly removed by the Remove call. Returns true if new value was added.
func (*UniqueQueue) Clear ¶ added in v1.3.0
func (q *UniqueQueue) Clear()
Clear removes all items from the queue and duplication preventing set.
func (*UniqueQueue) Get ¶
func (q *UniqueQueue) Get() (TimedValue, bool)
Returns the oldest added value that wasn't returned yet.
func (*UniqueQueue) Head ¶
func (q *UniqueQueue) Head() (TimedValue, bool)
Head returns the oldest added value that wasn't returned yet without removing it.
func (*UniqueQueue) Remove ¶
func (q *UniqueQueue) Remove(value string) bool
Removes the value from the queue, so Get() call won't return it, and allow subsequent addition of the given value. If the value is not present does nothing and returns false.
func (*UniqueQueue) Replace ¶
func (q *UniqueQueue) Replace(value TimedValue) bool
Replace replaces an existing value in the queue if it already exists, otherwise it does nothing. Returns true if the item was found.