Documentation ¶
Index ¶
- Constants
- Variables
- func CiliumEndpointsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)
- func CiliumEndpointsSliceInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ...)
- func HandleNodeTolerationAndTaints(wg *sync.WaitGroup, clientset k8sClient.Clientset, stopCh <-chan struct{})
- func HasCE(ns, name string) (*cilium_api_v2.CiliumEndpoint, bool, error)
- func HasCEWithIdentity(identity string) bool
- func HasCiliumIsUpCondition(n *slim_corev1.Node) bool
- func NodeQueueShutDown()
- func PodsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)
- func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ...)
- func StartBGPBetaLBIPAllocator(ctx context.Context, clientset client.Clientset, ...)
- func StartCECController(ctx context.Context, clientset k8sClient.Clientset, ...)
- func StartIPPoolAllocator(ctx context.Context, clientset client.Clientset, ...)
- func StartSynchronizingServices(ctx context.Context, wg *sync.WaitGroup, cfg ServiceSyncParameters)
- func TransformToUnmanagedPod(obj interface{}) (interface{}, error)
- func UnmanagedPodsInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset)
- type PooledAllocatorProvider
- type ServiceGetter
- type ServiceSyncConfiguration
- type ServiceSyncParameters
Constants ¶
const PodNodeNameIndex = "pod-node"
Variables ¶
var ( // CiliumEndpointStore contains all CiliumEndpoint present in k8s. // Warning: The CiliumEndpoints stored in the cache are not intended to be // used for Update operations in k8s as some of its fields were are not // populated. CiliumEndpointStore cache.Indexer // CiliumEndpointsSynced is closed once the CiliumEndpointStore is synced // with k8s. CiliumEndpointsSynced = make(chan struct{}) )
var ( // PodStore has a minimal copy of all pods running in the cluster. // Warning: The pods stored in the cache are not intended to be used for Update // operations in k8s as some of its fields are not populated. PodStore cache.Store // PodStoreSynced is closed once the PodStore is synced with k8s. PodStoreSynced = make(chan struct{}) // UnmanagedPodStore has a minimal copy of the unmanaged pods running // in the cluster. // Warning: The pods stored in the cache are not intended to be used for Update // operations in k8s as some of its fields are not populated. UnmanagedPodStore cache.Store // UnmanagedPodStoreSynced is closed once the UnmanagedKubeDNSPodStore is synced // with k8s. UnmanagedPodStoreSynced = make(chan struct{}) )
var (
K8sSvcCache = k8s.NewServiceCache(nil)
)
Functions ¶
func CiliumEndpointsInit ¶
CiliumEndpointsInit starts a CiliumEndpointWatcher
func CiliumEndpointsSliceInit ¶
func CiliumEndpointsSliceInit(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, cbController *ces.CiliumEndpointSliceController)
CiliumEndpointsSliceInit starts a CiliumEndpointWatcher and caches cesController locally.
func HandleNodeTolerationAndTaints ¶
func HandleNodeTolerationAndTaints(wg *sync.WaitGroup, clientset k8sClient.Clientset, stopCh <-chan struct{})
HandleNodeTolerationAndTaints remove node
func HasCE ¶
func HasCE(ns, name string) (*cilium_api_v2.CiliumEndpoint, bool, error)
HasCE returns true or false if the Cilium Endpoint store has the endpoint with the given name.
func HasCEWithIdentity ¶
HasCEWithIdentity returns true or false if the Cilium Endpoint store has the given identity.
func HasCiliumIsUpCondition ¶
func HasCiliumIsUpCondition(n *slim_corev1.Node) bool
HasCiliumIsUpCondition returns true if the given k8s node has the cilium node condition set.
func NodeQueueShutDown ¶
func NodeQueueShutDown()
NodeQueueShutDown is a wrapper to expose ShutDown for the global nodeQueue. It is meant to be used in unit test like the identity-gc one in operator/identity/ in order to avoid goleak complaining about leaked goroutines.
func RunCiliumNodeGC ¶
func RunCiliumNodeGC(ctx context.Context, wg *sync.WaitGroup, clientset k8sClient.Clientset, ciliumNodeStore cache.Store, interval time.Duration)
RunCiliumNodeGC performs garbage collector for cilium node resource
func StartBGPBetaLBIPAllocator ¶
func StartBGPBetaLBIPAllocator(ctx context.Context, clientset client.Clientset, services resource.Resource[*slim_corev1.Service])
StartBGPBetaLBIPAllocator starts the service watcher if it hasn't already and looks for service of type LoadBalancer. Once it finds a service of that type, it will try to allocate an external IP (LoadBalancerIP) for it.
func StartCECController ¶
func StartCECController(ctx context.Context, clientset k8sClient.Clientset, services resource.Resource[*slim_corev1.Service], ports []string, defaultAlgorithm string, idleTimeoutSeconds int)
StartCECController starts the service watcher if it hasn't already and looks for service of type with envoy enabled LB annotation. Once such service is found, it will try to create one CEC associated with the service.
func StartIPPoolAllocator ¶
func StartIPPoolAllocator( ctx context.Context, clientset client.Clientset, allocator PooledAllocatorProvider, ipPools resource.Resource[*cilium_v2alpha1.CiliumPodIPPool], )
func StartSynchronizingServices ¶
func StartSynchronizingServices(ctx context.Context, wg *sync.WaitGroup, cfg ServiceSyncParameters)
StartSynchronizingServices starts a controller for synchronizing services from k8s to kvstore 'shared' specifies whether only shared services are synchronized. If 'false' then all services will be synchronized. For clustermesh we only need to synchronize shared services, while for VM support we need to sync all the services.
func TransformToUnmanagedPod ¶
func TransformToUnmanagedPod(obj interface{}) (interface{}, error)
Types ¶
type PooledAllocatorProvider ¶
type PooledAllocatorProvider interface { UpsertPool(ctx context.Context, pool *cilium_v2alpha1.CiliumPodIPPool) error DeletePool(ctx context.Context, pool *cilium_v2alpha1.CiliumPodIPPool) error }
PooledAllocatorProvider defines the functions of IPAM provider front-end which additionally allow definition of IP pools at runtime. This is implemented by e.g. pkg/ipam/allocator/multipool
type ServiceGetter ¶
type ServiceGetter struct {
// contains filtered or unexported fields
}
ServiceGetter is a wrapper for 2 k8sCaches, its intention is for `shortCutK8sCache` to be used until `k8sSvcCacheSynced` is closed, for which `k8sCache` is started to be used.
func NewServiceGetter ¶
func NewServiceGetter(sc *k8s.ServiceCache) *ServiceGetter
NewServiceGetter returns a new ServiceGetter holding 2 k8sCaches
func (*ServiceGetter) GetServiceIP ¶
func (s *ServiceGetter) GetServiceIP(svcID k8s.ServiceID) *loadbalancer.L3n4Addr
GetServiceIP returns the result of GetServiceIP for `s.shortCutK8sCache` until `k8sSvcCacheSynced` is closed. This is helpful as we can have a shortcut of `s.k8sCache` since we can pre-populate `s.shortCutK8sCache` with the entries that we need until `s.k8sCache` is synchronized with kubernetes.
type ServiceSyncConfiguration ¶
type ServiceSyncConfiguration interface { // LocalClusterName must return the local cluster name LocalClusterName() string // LocalClusterID must return the local cluster id LocalClusterID() uint32 utils.ServiceConfiguration }
ServiceSyncConfiguration is the required configuration for StartSynchronizingServices
type ServiceSyncParameters ¶
type ServiceSyncParameters struct { ServiceSyncConfiguration Clientset k8sClient.Clientset Services resource.Resource[*slim_corev1.Service] Endpoints resource.Resource[*k8s.Endpoints] Backend store.SyncStoreBackend }