Documentation ¶
Index ¶
- Variables
- func CiliumEndpointsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)
- func HandleNodeTolerationAndTaints(wg *sync.WaitGroup, clientset k8sClient.Clientset, stopCh <-chan struct{}, ...)
- func HasCE(ns, name string) (*cilium_api_v2.CiliumEndpoint, bool, error)
- func HasCiliumIsUpCondition(n *slim_corev1.Node) bool
- func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ...)
- func StartBGPBetaLBIPAllocator(ctx context.Context, clientset client.Clientset, ...)
- func StartIPPoolAllocator(ctx context.Context, clientset client.Clientset, ...)
- func StartSynchronizingServices(ctx context.Context, wg *sync.WaitGroup, cfg ServiceSyncParameters, ...)
- func TransformToUnmanagedPod(obj interface{}) (interface{}, error)
- func UnmanagedPodsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)
- type PooledAllocatorProvider
- type ServiceSyncParameters
Constants ¶
This section is empty.
Variables ¶
var ( // CiliumEndpointStore contains all CiliumEndpoint present in k8s. // Warning: The CiliumEndpoints stored in the cache are not intended to be // used for Update operations in k8s as some of its fields were are not // populated. CiliumEndpointStore cache.Indexer // CiliumEndpointsSynced is closed once the CiliumEndpointStore is synced // with k8s. CiliumEndpointsSynced = make(chan struct{}) )
var ( // PodStore has a minimal copy of all pods running in the cluster. // Warning: The pods stored in the cache are not intended to be used for Update // operations in k8s as some of its fields are not populated. PodStore cache.Store // UnmanagedPodStore has a minimal copy of the unmanaged pods running // in the cluster. // Warning: The pods stored in the cache are not intended to be used for Update // operations in k8s as some of its fields are not populated. UnmanagedPodStore cache.Store )
var (
K8sSvcCache = k8s.NewServiceCache(nil, nil)
)
Functions ¶
func CiliumEndpointsInit ¶
CiliumEndpointsInit starts a CiliumEndpointWatcher
func HandleNodeTolerationAndTaints ¶
func HandleNodeTolerationAndTaints(wg *sync.WaitGroup, clientset k8sClient.Clientset, stopCh <-chan struct{}, logger *slog.Logger)
HandleNodeTolerationAndTaints remove node
func HasCE ¶
func HasCE(ns, name string) (*cilium_api_v2.CiliumEndpoint, bool, error)
HasCE returns true or false if the Cilium Endpoint store has the endpoint with the given name.
func HasCiliumIsUpCondition ¶
func HasCiliumIsUpCondition(n *slim_corev1.Node) bool
HasCiliumIsUpCondition returns true if the given k8s node has the cilium node condition set.
func RunCiliumNodeGC ¶
func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ciliumNodeStore cache.Store, interval time.Duration, logger *slog.Logger)
RunCiliumNodeGC performs garbage collector for cilium node resource
func StartBGPBetaLBIPAllocator ¶
func StartBGPBetaLBIPAllocator(ctx context.Context, clientset client.Clientset, services resource.Resource[*slim_corev1.Service], logger *slog.Logger)
StartBGPBetaLBIPAllocator starts the service watcher if it hasn't already and looks for service of type LoadBalancer. Once it finds a service of that type, it will try to allocate an external IP (LoadBalancerIP) for it.
func StartIPPoolAllocator ¶
func StartIPPoolAllocator( ctx context.Context, clientset client.Clientset, allocator PooledAllocatorProvider, ipPools resource.Resource[*cilium_v2alpha1.CiliumPodIPPool], logger *slog.Logger, )
func StartSynchronizingServices ¶
func StartSynchronizingServices(ctx context.Context, wg *sync.WaitGroup, cfg ServiceSyncParameters, logger *slog.Logger)
StartSynchronizingServices starts a controller for synchronizing services from k8s to kvstore 'shared' specifies whether only shared services are synchronized. If 'false' then all services will be synchronized. For clustermesh we only need to synchronize shared services, while for VM support we need to sync all the services.
func TransformToUnmanagedPod ¶
func TransformToUnmanagedPod(obj interface{}) (interface{}, error)
Types ¶
type PooledAllocatorProvider ¶
type PooledAllocatorProvider interface { UpsertPool(ctx context.Context, pool *cilium_v2alpha1.CiliumPodIPPool) error DeletePool(ctx context.Context, pool *cilium_v2alpha1.CiliumPodIPPool) error }
PooledAllocatorProvider defines the functions of IPAM provider front-end which additionally allow definition of IP pools at runtime. This is implemented by e.g. pkg/ipam/allocator/multipool