watchers

package
v1.15.5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 10, 2024 License: Apache-2.0 Imports: 42 Imported by: 3

Documentation

Index

Constants

View Source
const PodNodeNameIndex = "pod-node"

Variables

View Source
var (

	// CiliumEndpointStore contains all CiliumEndpoint present in k8s.
	// Warning: The CiliumEndpoints stored in the cache are not intended to be
	// used for Update operations in k8s as some of its fields were are not
	// populated.
	CiliumEndpointStore cache.Indexer

	// CiliumEndpointsSynced is closed once the CiliumEndpointStore is synced
	// with k8s.
	CiliumEndpointsSynced = make(chan struct{})
)
View Source
var (
	// PodStore has a minimal copy of all pods running in the cluster.
	// Warning: The pods stored in the cache are not intended to be used for Update
	// operations in k8s as some of its fields are not populated.
	PodStore cache.Store

	// PodStoreSynced is closed once the PodStore is synced with k8s.
	PodStoreSynced = make(chan struct{})

	// UnmanagedPodStore has a minimal copy of the unmanaged pods running
	// in the cluster.
	// Warning: The pods stored in the cache are not intended to be used for Update
	// operations in k8s as some of its fields are not populated.
	UnmanagedPodStore cache.Store

	// UnmanagedPodStoreSynced is closed once the UnmanagedKubeDNSPodStore is synced
	// with k8s.
	UnmanagedPodStoreSynced = make(chan struct{})
)
View Source
var (
	K8sSvcCache = k8s.NewServiceCache(nil)
)

Functions

func CiliumEndpointsInit

func CiliumEndpointsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)

CiliumEndpointsInit starts a CiliumEndpointWatcher

func HandleNodeTolerationAndTaints

func HandleNodeTolerationAndTaints(wg *sync.WaitGroup, clientset k8sClient.Clientset, stopCh <-chan struct{})

HandleNodeTolerationAndTaints remove node

func HasCE

func HasCE(ns, name string) (*cilium_api_v2.CiliumEndpoint, bool, error)

HasCE returns true or false if the Cilium Endpoint store has the endpoint with the given name.

func HasCiliumIsUpCondition

func HasCiliumIsUpCondition(n *slim_corev1.Node) bool

HasCiliumIsUpCondition returns true if the given k8s node has the cilium node condition set.

func NodeQueueShutDown

func NodeQueueShutDown()

NodeQueueShutDown is a wrapper to expose ShutDown for the global nodeQueue. It is meant to be used in unit test like the identity-gc one in operator/identity/ in order to avoid goleak complaining about leaked goroutines.

func PodsInit

func PodsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)

func RunCiliumNodeGC

func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ciliumNodeStore cache.Store, interval time.Duration)

RunCiliumNodeGC performs garbage collector for cilium node resource

func StartBGPBetaLBIPAllocator

func StartBGPBetaLBIPAllocator(ctx context.Context, clientset client.Clientset, services resource.Resource[*slim_corev1.Service])

StartBGPBetaLBIPAllocator starts the service watcher if it hasn't already and looks for service of type LoadBalancer. Once it finds a service of that type, it will try to allocate an external IP (LoadBalancerIP) for it.

func StartIPPoolAllocator

func StartIPPoolAllocator(
	ctx context.Context,
	clientset client.Clientset,
	allocator PooledAllocatorProvider,
	ipPools resource.Resource[*cilium_v2alpha1.CiliumPodIPPool],
)

func StartSynchronizingServices

func StartSynchronizingServices(ctx context.Context, wg *sync.WaitGroup, cfg ServiceSyncParameters)

StartSynchronizingServices starts a controller for synchronizing services from k8s to kvstore 'shared' specifies whether only shared services are synchronized. If 'false' then all services will be synchronized. For clustermesh we only need to synchronize shared services, while for VM support we need to sync all the services.

func TransformToUnmanagedPod

func TransformToUnmanagedPod(obj interface{}) (interface{}, error)

func UnmanagedPodsInit

func UnmanagedPodsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)

Types

type PooledAllocatorProvider

type PooledAllocatorProvider interface {
	UpsertPool(ctx context.Context, pool *cilium_v2alpha1.CiliumPodIPPool) error
	DeletePool(ctx context.Context, pool *cilium_v2alpha1.CiliumPodIPPool) error
}

PooledAllocatorProvider defines the functions of IPAM provider front-end which additionally allow definition of IP pools at runtime. This is implemented by e.g. pkg/ipam/allocator/multipool

type ServiceGetter

type ServiceGetter struct {
	// contains filtered or unexported fields
}

ServiceGetter is a wrapper for 2 k8sCaches, its intention is for `shortCutK8sCache` to be used until `k8sSvcCacheSynced` is closed, for which `k8sCache` is started to be used.

func NewServiceGetter

func NewServiceGetter(sc *k8s.ServiceCache) *ServiceGetter

NewServiceGetter returns a new ServiceGetter holding 2 k8sCaches

func (*ServiceGetter) GetServiceIP

func (s *ServiceGetter) GetServiceIP(svcID k8s.ServiceID) *loadbalancer.L3n4Addr

GetServiceIP returns the result of GetServiceIP for `s.shortCutK8sCache` until `k8sSvcCacheSynced` is closed. This is helpful as we can have a shortcut of `s.k8sCache` since we can pre-populate `s.shortCutK8sCache` with the entries that we need until `s.k8sCache` is synchronized with kubernetes.

type ServiceSyncParameters

type ServiceSyncParameters struct {
	ClusterInfo  cmtypes.ClusterInfo
	Clientset    k8sClient.Clientset
	Services     resource.Resource[*slim_corev1.Service]
	Endpoints    resource.Resource[*k8s.Endpoints]
	Backend      store.SyncStoreBackend
	SharedOnly   bool
	StoreFactory store.Factory
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL