Documentation ¶
Index ¶
- Variables
- func GenericEventHandler(handler func(nsName types.NamespacedName)) cache.ResourceEventHandler
- func GenericEventHandlerWithStaleState[InformerType any](handler func(obj InformerType, stillPresent bool)) cache.ResourceEventHandler
- func NewInformers[ClientT any, FactoryT InformerFactory](args InformersArgs[ClientT, FactoryT]) component.Declared[FactoryT]
- type Client
- type ClientArgs
- type ClientDeps
- type ClientOptions
- type ClientState
- type ClusterName
- type Elector
- type ElectorArgs
- type ElectorDeps
- type ElectorName
- type ElectorObserver
- type ElectorObserverArg
- type ElectorOptions
- type ElectorState
- type EventRecorderArgs
- type EventRecorderDeps
- type EventRecorderOptions
- type EventRecorderState
- type InformerFactory
- type InformerPhase
- type InformerSetName
- type InformersArgs
- type InformersDeps
- type InformersOptions
- type InformersState
Constants ¶
This section is empty.
Variables ¶
var NewClient = component.Declare( func(args ClientArgs) string { return fmt.Sprintf("%s-kube", args.ClusterName) }, func(args ClientArgs, fs *flag.FlagSet) ClientOptions { return ClientOptions{ kubeconfigPath: fs.String( "config", "", fmt.Sprintf("path to %s cluster kubeconfig file", args.ClusterName), ), masterUrl: fs.String( "master", "", fmt.Sprintf("URL to %s cluster apiserver", args.ClusterName), ), TargetNamespace: fs.String( "target-namespace", metav1.NamespaceAll, fmt.Sprintf( "namespaces accessible in %s cluster (empty to match all)", args.ClusterName, ), ), impersonateUsername: fs.String( "impersonate-username", "", "username to impersonate as", ), impersonateUid: fs.String("impersonate-uid", "", "UID to impersonate as"), impersonateGroups: utilflag.StringSlice( fs, "impersonate-groups", []string{}, "comma-separated user groups to impersonate as", ), qps: fs.Float64("qps", float64(rest.DefaultQPS), "client QPS (for each clientset)"), burst: fs.Int("burst", rest.DefaultBurst, "client burst (for each clientset)"), } }, func(_ ClientArgs, requests *component.DepRequests) ClientDeps { component.DepPtr(requests, kubemetrics.NewComp(kubemetrics.CompArgs{})) return ClientDeps{} }, func(_ context.Context, _ ClientArgs, options ClientOptions, _ ClientDeps) (*ClientState, error) { restConfig, err := clientcmd.BuildConfigFromFlags( *options.masterUrl, *options.kubeconfigPath, ) if err != nil { return nil, errors.TagWrapf("BuildConfig", err, "build rest config from arguments") } if *options.impersonateUsername != "" { restConfig.Impersonate.UserName = *options.impersonateUsername } if *options.impersonateUid != "" { restConfig.Impersonate.UID = *options.impersonateUid } if len(*options.impersonateGroups) > 0 { restConfig.Impersonate.Groups = *options.impersonateGroups } restConfig.QPS = float32(*options.qps) restConfig.Burst = *options.burst restConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" kubeClientSet, err := kubernetes.NewForConfig(restConfig) if err != nil { return nil, errors.TagWrapf( "NewClientSet", err, "create new kubernetes client set from config", ) } podseidonClientSet, err := podseidonclient.NewForConfig(restConfig) if err != nil { return nil, errors.TagWrapf( "NewClientSet", err, "create new podseidon client set from config", ) } return &ClientState{ restConfig: restConfig, kubeClientSet: kubeClientSet, podseidonClientSet: podseidonClientSet, }, nil }, component.Lifecycle[ClientArgs, ClientOptions, ClientDeps, ClientState]{ Start: nil, Join: nil, HealthChecks: nil, }, func(d *component.Data[ClientArgs, ClientOptions, ClientDeps, ClientState]) *Client { return &Client{targetNs: d.Options.TargetNamespace, state: d.State} }, )
Request a Kubernetes client config through command line.
var NewElector = component.Declare( func(args ElectorArgs) string { return fmt.Sprintf("%s-leader-elector", args.ElectorName) }, func(args ElectorArgs, fs *flag.FlagSet) ElectorOptions { return ElectorOptions{ Enabled: fs.Bool( "enable", true, fmt.Sprintf("whether to enable %s leader election", args.ElectorName), ), Namespace: fs.String( "namespace", metav1.NamespaceDefault, fmt.Sprintf("namespace for the lease object for %s", args.ElectorName), ), Name: fs.String( "name", fmt.Sprintf("podseidon-%s", args.ElectorName), fmt.Sprintf("name of the lease object for %s", args.ElectorName), ), LeaseDuration: fs.Duration( "lease-duration", time.Second*15, "leader election lease duration", ), RenewDeadline: fs.Duration( "renew-deadline", time.Second*10, "leader election renew deadline", ), RetryPeriod: fs.Duration( "retry-period", time.Second*2, "leader election retry period", ), } }, func(args ElectorArgs, requests *component.DepRequests) ElectorDeps { return ElectorDeps{ KubeConfig: component.DepPtr( requests, NewClient(ClientArgs{ClusterName: args.ClusterName}), ), EventRecorder: component.DepPtr(requests, NewEventRecorder(EventRecorderArgs{ ClusterName: args.ClusterName, Component: fmt.Sprintf("%s-leader-election", args.ElectorName), })), Observer: o11y.Request[ElectorObserver](requests), } }, func(_ context.Context, _ ElectorArgs, options ElectorOptions, _ ElectorDeps) (*ElectorState, error) { electorReaderOpt := optional.None[util.LateInitReader[*leaderelection.LeaderElector]]() electorWriterOpt := optional.None[util.LateInitWriter[*leaderelection.LeaderElector]]() if *options.Enabled { electorReader, electorWriter := util.NewLateInit[*leaderelection.LeaderElector]() electorReaderOpt = optional.Some(electorReader) electorWriterOpt = optional.Some(electorWriter) } return &ElectorState{ electedCh: make(chan util.Empty), electorReader: electorReaderOpt, electorWriter: electorWriterOpt, electionErr: nil, electionCloseCh: nil, cancelElectorFunc: atomic.Pointer[context.CancelFunc]{}, }, nil }, component.Lifecycle[ElectorArgs, ElectorOptions, ElectorDeps, ElectorState]{ Start: runElector, Join: func(_ context.Context, _ *ElectorArgs, _ *ElectorOptions, _ *ElectorDeps, state *ElectorState) error { cancelFunc := state.cancelElectorFunc.Load() if cancelFunc != nil { (*cancelFunc)() } return nil }, HealthChecks: func(state *ElectorState) component.HealthChecks { return component.HealthChecks{ "elector": optional.Map(state.electorReader, func(reader util.LateInitReader[*leaderelection.LeaderElector]) func() error { return func() error { if err := reader.Get().Check(0); err != nil { return errors.Tag("ElectorTimeout", err) } return nil } }). GetOrZero(), } }, }, func(d *component.Data[ElectorArgs, ElectorOptions, ElectorDeps, ElectorState]) *Elector { return &Elector{ electedCh: d.State.electedCh, electionErr: &d.State.electionErr, electionCloseCh: &d.State.electionCloseCh, } }, )
Request an event recorder for objects in the specified cluster.
var NewEventRecorder = component.Declare( func(args EventRecorderArgs) string { return fmt.Sprintf("event-recorder-%s", args.Component) }, func(EventRecorderArgs, *flag.FlagSet) EventRecorderOptions { return EventRecorderOptions{} }, func(args EventRecorderArgs, requests *component.DepRequests) EventRecorderDeps { return EventRecorderDeps{ config: component.DepPtr( requests, NewClient(ClientArgs{ClusterName: args.ClusterName}), ), } }, func(_ context.Context, args EventRecorderArgs, _ EventRecorderOptions, deps EventRecorderDeps) (*EventRecorderState, error) { broadcaster := record.NewBroadcaster() broadcaster.StartStructuredLogging(klog.Level(3)) broadcaster.StartRecordingToSink( &corev1client.EventSinkImpl{ Interface: deps.config.Get(). NativeClientSet(). CoreV1(). Events(deps.config.Get().TargetNamespace()), }, ) recorder := broadcaster.NewRecorder( scheme.Scheme, corev1.EventSource{Component: args.Component, Host: ""}, ) return &EventRecorderState{recorder: recorder}, nil }, component.Lifecycle[EventRecorderArgs, EventRecorderOptions, EventRecorderDeps, EventRecorderState]{ Start: nil, Join: nil, HealthChecks: nil, }, func(d *component.Data[EventRecorderArgs, EventRecorderOptions, EventRecorderDeps, EventRecorderState]) record.EventRecorder { return d.State.recorder }, )
Request an event recorder for objects in the specified cluster. Exposes the EventRecorder directly.
Functions ¶
func GenericEventHandler ¶
func GenericEventHandler(handler func(nsName types.NamespacedName)) cache.ResourceEventHandler
func GenericEventHandlerWithStaleState ¶
func GenericEventHandlerWithStaleState[InformerType any]( handler func(obj InformerType, stillPresent bool), ) cache.ResourceEventHandler
func NewInformers ¶
func NewInformers[ClientT any, FactoryT InformerFactory]( args InformersArgs[ClientT, FactoryT], ) component.Declared[FactoryT]
Request a shared informer factory corresponding to the specified cluster.
Types ¶
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
Api type for `NewClient`.
func MockClient ¶
Create a mock client with the given objects in the cluster.
func (*Client) NativeClientSet ¶
func (client *Client) NativeClientSet() kubernetes.Interface
func (*Client) PodseidonClientSet ¶
func (client *Client) PodseidonClientSet() podseidonclient.Interface
func (*Client) RestConfig ¶
This method should only be used for constructing extension client sets for a cluster. Not supported by `MockClient`.
func (*Client) TargetNamespace ¶
type ClientArgs ¶
type ClientArgs struct {
ClusterName ClusterName
}
type ClientDeps ¶
type ClientDeps struct{}
type ClientOptions ¶
type ClientOptions struct { TargetNamespace *string // contains filtered or unexported fields }
type ClientState ¶
type ClientState struct {
// contains filtered or unexported fields
}
type ClusterName ¶
type ClusterName string
type Elector ¶
type Elector struct {
// contains filtered or unexported fields
}
Api type for `NewElector`.
func MockReadyElector ¶
A mock elector that always gets ready immediately.
func (*Elector) Await ¶
Waits for the elector to acquire leader lease.
Returns a child context that cancels when the acquired leader lease is lost. Returns an error if the election fails or if the context is canceled.
func (*Elector) HasElected ¶
Returns whether the elector has acquired the leader lease.
type ElectorArgs ¶
type ElectorArgs struct { ElectorName ElectorName ClusterName ClusterName }
type ElectorDeps ¶
type ElectorDeps struct { KubeConfig component.Dep[*Client] EventRecorder component.Dep[record.EventRecorder] Observer component.Dep[ElectorObserver] }
type ElectorName ¶
type ElectorName string
type ElectorObserver ¶
type ElectorObserver struct { Acquired o11y.ObserveScopeFunc[ElectorObserverArg] Lost o11y.ObserveFunc[ElectorObserverArg] Heartbeat o11y.ObserveFunc[ElectorObserverArg] }
func (ElectorObserver) ComponentName ¶
func (ElectorObserver) ComponentName() string
func (ElectorObserver) Join ¶
func (observer ElectorObserver) Join(other ElectorObserver) ElectorObserver
type ElectorObserverArg ¶
type ElectorOptions ¶
type ElectorState ¶
type ElectorState struct {
// contains filtered or unexported fields
}
type EventRecorderArgs ¶
type EventRecorderArgs struct { ClusterName ClusterName Component string }
type EventRecorderDeps ¶
type EventRecorderDeps struct {
// contains filtered or unexported fields
}
type EventRecorderOptions ¶
type EventRecorderOptions struct{}
type EventRecorderState ¶
type EventRecorderState struct {
// contains filtered or unexported fields
}
type InformerFactory ¶
type InformerPhase ¶
type InformerPhase string
type InformerSetName ¶
type InformerSetName string
type InformersArgs ¶
type InformersArgs[ClientT any, FactoryT InformerFactory] struct { ClusterName ClusterName Phase InformerPhase Elector optional.Optional[ElectorArgs] ClientDep func() component.Declared[ClientT] InformerSetName InformerSetName InformerSetCtor func(ClientT) FactoryT }
func NativeInformers ¶
func NativeInformers( clusterName ClusterName, phase InformerPhase, elector optional.Optional[ElectorArgs], ) InformersArgs[*Client, kubeinformers.SharedInformerFactory]
Pass to `NewInformers` to request a shared informer factory for native types corresponding to the specified cluster.
func PodseidonInformers ¶
func PodseidonInformers( clusterName ClusterName, phase InformerPhase, elector optional.Optional[ElectorArgs], ) InformersArgs[*Client, podseidoninformers.SharedInformerFactory]
Pass to `NewInformers` to request a shared informer factory for Podseidon types corresponding to the specified cluster.
type InformersDeps ¶
type InformersDeps[ClientT any] struct { // contains filtered or unexported fields }
type InformersOptions ¶
type InformersOptions struct{}
type InformersState ¶
type InformersState[FactoryT InformerFactory] struct { // contains filtered or unexported fields }