watchers

package
v1.14.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 14, 2023 License: Apache-2.0 Imports: 44 Imported by: 0

Documentation

Index

Constants

View Source
const PodNodeNameIndex = "pod-node"

Variables

View Source
var (

	// CiliumEndpointStore contains all CiliumEndpoint present in k8s.
	// Warning: The CiliumEndpoints stored in the cache are not intended to be
	// used for Update operations in k8s as some of its fields were are not
	// populated.
	CiliumEndpointStore cache.Indexer

	// CiliumEndpointsSynced is closed once the CiliumEndpointStore is synced
	// with k8s.
	CiliumEndpointsSynced = make(chan struct{})
)
View Source
var (
	// PodStore has a minimal copy of all pods running in the cluster.
	// Warning: The pods stored in the cache are not intended to be used for Update
	// operations in k8s as some of its fields are not populated.
	PodStore cache.Store

	// PodStoreSynced is closed once the PodStore is synced with k8s.
	PodStoreSynced = make(chan struct{})

	// UnmanagedPodStore has a minimal copy of the unmanaged pods running
	// in the cluster.
	// Warning: The pods stored in the cache are not intended to be used for Update
	// operations in k8s as some of its fields are not populated.
	UnmanagedPodStore cache.Store

	// UnmanagedPodStoreSynced is closed once the UnmanagedKubeDNSPodStore is synced
	// with k8s.
	UnmanagedPodStoreSynced = make(chan struct{})
)
View Source
var (
	K8sSvcCache = k8s.NewServiceCache(nil)
)

Functions

func CiliumEndpointsInit

func CiliumEndpointsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)

CiliumEndpointsInit starts a CiliumEndpointWatcher

func CiliumEndpointsSliceInit

func CiliumEndpointsSliceInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset,
	cbController *ces.CiliumEndpointSliceController)

CiliumEndpointsSliceInit starts a CiliumEndpointWatcher and caches cesController locally.

func HandleNodeTolerationAndTaints

func HandleNodeTolerationAndTaints(wg *sync.WaitGroup, clientset k8sClient.Clientset, stopCh <-chan struct{})

HandleNodeTolerationAndTaints remove node

func HasCE

func HasCE(ns, name string) (*cilium_api_v2.CiliumEndpoint, bool, error)

HasCE returns true or false if the Cilium Endpoint store has the endpoint with the given name.

func HasCEWithIdentity

func HasCEWithIdentity(identity string) bool

HasCEWithIdentity returns true or false if the Cilium Endpoint store has the given identity.

func HasCiliumIsUpCondition

func HasCiliumIsUpCondition(n *slim_corev1.Node) bool

HasCiliumIsUpCondition returns true if the given k8s node has the cilium node condition set.

func NodeQueueShutDown

func NodeQueueShutDown()

NodeQueueShutDown is a wrapper to expose ShutDown for the global nodeQueue. It is meant to be used in unit test like the identity-gc one in operator/identity/ in order to avoid goleak complaining about leaked goroutines.

func PodsInit

func PodsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)

func RunCiliumNodeGC

func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ciliumNodeStore cache.Store, interval time.Duration)

RunCiliumNodeGC performs garbage collector for cilium node resource

func StartBGPBetaLBIPAllocator

func StartBGPBetaLBIPAllocator(ctx context.Context, clientset client.Clientset, services resource.Resource[*slim_corev1.Service])

StartBGPBetaLBIPAllocator starts the service watcher if it hasn't already and looks for service of type LoadBalancer. Once it finds a service of that type, it will try to allocate an external IP (LoadBalancerIP) for it.

func StartCECController

func StartCECController(ctx context.Context, clientset k8sClient.Clientset, services resource.Resource[*slim_corev1.Service], ports []string, defaultAlgorithm string)

StartCECController starts the service watcher if it hasn't already and looks for service of type with envoy enabled LB annotation. Once such service is found, it will try to create one CEC associated with the service.

func StartSynchronizingServices

func StartSynchronizingServices(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, services resource.Resource[*slim_corev1.Service], shared bool, cfg ServiceSyncConfiguration)

StartSynchronizingServices starts a controller for synchronizing services from k8s to kvstore 'shared' specifies whether only shared services are synchronized. If 'false' then all services will be synchronized. For clustermesh we only need to synchronize shared services, while for VM support we need to sync all the services.

func UnmanagedPodsInit

func UnmanagedPodsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)

Types

type ServiceGetter

type ServiceGetter struct {
	// contains filtered or unexported fields
}

ServiceGetter is a wrapper for 2 k8sCaches, its intention is for `shortCutK8sCache` to be used until `k8sSvcCacheSynced` is closed, for which `k8sCache` is started to be used.

func NewServiceGetter

func NewServiceGetter(sc *k8s.ServiceCache) *ServiceGetter

NewServiceGetter returns a new ServiceGetter holding 2 k8sCaches

func (*ServiceGetter) GetServiceIP

func (s *ServiceGetter) GetServiceIP(svcID k8s.ServiceID) *loadbalancer.L3n4Addr

GetServiceIP returns the result of GetServiceIP for `s.shortCutK8sCache` until `k8sSvcCacheSynced` is closed. This is helpful as we can have a shortcut of `s.k8sCache` since we can pre-populate `s.shortCutK8sCache` with the entries that we need until `s.k8sCache` is synchronized with kubernetes.

type ServiceSyncConfiguration

type ServiceSyncConfiguration interface {
	// LocalClusterName must return the local cluster name
	LocalClusterName() string

	// LocalClusterID must return the local cluster id
	LocalClusterID() uint32

	utils.ServiceConfiguration
}

ServiceSyncConfiguration is the required configuration for StartSynchronizingServices

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL