Documentation
¶
Overview ¶
Package v1beta1 contains API Schema definitions for the flinkoperator v1beta1 API group +kubebuilder:object:generate=true +groupName=flinkoperator.k8s.io
Index ¶
- Constants
- Variables
- type BatchSchedulerSpec
- type CleanupAction
- type CleanupPolicy
- type ClusterState
- type ComponentState
- type ConfigMapStatus
- type DeploymentType
- type FlinkCluster
- func (in *FlinkCluster) DeepCopy() *FlinkCluster
- func (in *FlinkCluster) DeepCopyInto(out *FlinkCluster)
- func (in *FlinkCluster) DeepCopyObject() runtime.Object
- func (cluster *FlinkCluster) Default()
- func (fc *FlinkCluster) GetHAConfigMapName() string
- func (fc *FlinkCluster) IsHighAvailabilityEnabled() bool
- func (cluster *FlinkCluster) SetupWebhookWithManager(mgr ctrl.Manager) error
- func (cluster *FlinkCluster) ValidateCreate() (admission.Warnings, error)
- func (cluster *FlinkCluster) ValidateDelete() (admission.Warnings, error)
- func (cluster *FlinkCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error)
- type FlinkClusterComponentsStatus
- type FlinkClusterControlStatus
- type FlinkClusterList
- type FlinkClusterSpec
- type FlinkClusterStatus
- type GCPConfig
- type GCPServiceAccount
- type HadoopConfig
- type HorizontalPodAutoscalerSpec
- type ImageSpec
- type JobManagerIngressSpec
- type JobManagerIngressStatus
- type JobManagerPorts
- type JobManagerServiceStatus
- type JobManagerSpec
- type JobManagerStatus
- type JobMode
- type JobRestartPolicy
- type JobSpec
- type JobState
- type JobStatus
- func (in *JobStatus) DeepCopy() *JobStatus
- func (in *JobStatus) DeepCopyInto(out *JobStatus)
- func (j *JobStatus) IsActive() bool
- func (j *JobStatus) IsFailed() bool
- func (j *JobStatus) IsPending() bool
- func (j *JobStatus) IsSavepointUpToDate(spec *JobSpec, compareTime time.Time) bool
- func (j *JobStatus) IsStopped() bool
- func (j *JobStatus) IsTerminated(spec *JobSpec) bool
- func (j *JobStatus) ShouldRestart(spec *JobSpec) bool
- func (j *JobStatus) UpdateReady(spec *JobSpec, observeTime time.Time) bool
- type NamedPort
- type RevisionStatus
- type SavepointReason
- type SavepointStatus
- type TaskManagerPorts
- type TaskManagerSpec
- type TaskManagerStatus
- type Validator
Constants ¶
const ( DefaultJobManagerReplicas = 1 DefaultTaskManagerReplicas = 3 )
const ( AccessScopeCluster = "Cluster" AccessScopeVPC = "VPC" AccessScopeExternal = "External" AccessScopeNodePort = "NodePort" AccessScopeHeadless = "Headless" )
AccessScope defines the access scope of JobManager service.
const ( // control annotation key ControlAnnotation = "flinkclusters.flinkoperator.k8s.io/user-control" // control name ControlNameSavepoint = "savepoint" ControlNameJobCancel = "job-cancel" // control state ControlStateRequested = "Requested" ControlStateInProgress = "InProgress" ControlStateSucceeded = "Succeeded" ControlStateFailed = "Failed" )
User requested control
const ( SavepointStateInProgress = "InProgress" SavepointStateTriggerFailed = "TriggerFailed" SavepointStateFailed = "Failed" SavepointStateSucceeded = "Succeeded" SavepointReasonUserRequested SavepointReason = "user requested" SavepointReasonJobCancel SavepointReason = "job cancel" SavepointReasonScheduled SavepointReason = "scheduled" SavepointReasonUpdate SavepointReason = "update" )
const ( // This refers to the Kubernetes Type [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset) // Use persistent volumes for recovery. DeploymentTypeStatefulSet = "StatefulSet" // This refers to the Kubernetes Type [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment) // Faster startup, but the volumes are ephemeral DeploymentTypeDeployment = "Deployment" )
const ( // CleanupActionKeepCluster - keep the entire cluster. CleanupActionKeepCluster = "KeepCluster" // CleanupActionDeleteCluster - delete the entire cluster. CleanupActionDeleteCluster = "DeleteCluster" // CleanupActionDeleteTaskManager - delete task manager, keep job manager. CleanupActionDeleteTaskManager = "DeleteTaskManager" )
const ( InvalidControlAnnMsg = "invalid value for annotation key: %v, value: %v, available values: savepoint, job-cancel" InvalidJobStateForJobCancelMsg = "job-cancel is not allowed because job is not started yet or already terminated, annotation: %v" InvalidJobStateForSavepointMsg = "savepoint is not allowed because job is not started yet or already stopped, annotation: %v" InvalidSavepointDirMsg = "savepoint is not allowed without spec.job.savepointsDir, annotation: %v" SessionClusterWarnMsg = "%v is not allowed for session cluster, annotation: %v" ControlChangeWarnMsg = "change is not allowed for control in progress, annotation: %v" )
Variables ¶
var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "flinkoperator.k8s.io", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme )
Functions ¶
This section is empty.
Types ¶
type BatchSchedulerSpec ¶
type BatchSchedulerSpec struct { // BatchScheduler name. Name string `json:"name"` // _(Optional)_ Queue defines the queue in which resources will be allocates; if queue is // not specified, resources will be allocated in the schedulers default queue. // +optional Queue string `json:"queue,omitempty"` // _(Optional)_ If specified, indicates the PodGroup's priority. "system-node-critical" and // "system-cluster-critical" are two special keywords which indicate the // highest priorities with the former being the highest priority. Any other // name must be defined by creating a PriorityClass object with that name. // If not specified, the priority will be default or zero if there is no // default. // +optional PriorityClassName string `json:"priorityClassName,omitempty"` }
func (*BatchSchedulerSpec) DeepCopy ¶
func (in *BatchSchedulerSpec) DeepCopy() *BatchSchedulerSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchSchedulerSpec.
func (*BatchSchedulerSpec) DeepCopyInto ¶
func (in *BatchSchedulerSpec) DeepCopyInto(out *BatchSchedulerSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type CleanupAction ¶
type CleanupAction string
CleanupAction defines the action to take after job finishes.
type CleanupPolicy ¶
type CleanupPolicy struct { // Action to take after job succeeds, default: `DeleteCluster`. // +kubebuilder:default=DeleteCluster // +kubebuilder:validation:Enum=KeepCluster;DeleteCluster;DeleteTaskManager AfterJobSucceeds CleanupAction `json:"afterJobSucceeds,omitempty"` // Action to take after job fails, default: `KeepCluster`. // +kubebuilder:default=KeepCluster // +kubebuilder:validation:Enum=KeepCluster;DeleteCluster;DeleteTaskManager AfterJobFails CleanupAction `json:"afterJobFails,omitempty"` // Action to take after job is cancelled, default: `DeleteCluster`. // +kubebuilder:default=DeleteCluster // +kubebuilder:validation:Enum=KeepCluster;DeleteCluster;DeleteTaskManager AfterJobCancelled CleanupAction `json:"afterJobCancelled,omitempty"` }
CleanupPolicy defines the action to take after job finishes. Use one of `KeepCluster, DeleteCluster, DeleteTaskManager` for the below fields.
func (*CleanupPolicy) DeepCopy ¶
func (in *CleanupPolicy) DeepCopy() *CleanupPolicy
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupPolicy.
func (*CleanupPolicy) DeepCopyInto ¶
func (in *CleanupPolicy) DeepCopyInto(out *CleanupPolicy)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterState ¶ added in v0.4.2
type ClusterState string
const ( ClusterStateCreating ClusterState = "Creating" ClusterStateRunning ClusterState = "Running" ClusterStateReconciling ClusterState = "Reconciling" ClusterStateUpdating ClusterState = "Updating" ClusterStateStopping ClusterState = "Stopping" ClusterStatePartiallyStopped ClusterState = "PartiallyStopped" ClusterStateStopped ClusterState = "Stopped" )
ClusterState defines states for a cluster.
func (ClusterState) String ¶ added in v0.4.2
func (cs ClusterState) String() string
type ComponentState ¶ added in v0.4.2
type ComponentState string
const ( ComponentStateNotReady ComponentState = "NotReady" ComponentStateReady ComponentState = "Ready" ComponentStateUpdating ComponentState = "Updating" ComponentStateDeleted ComponentState = "Deleted" )
ComponentState defines states for a cluster component.
func (ComponentState) String ¶ added in v0.4.2
func (cs ComponentState) String() string
type ConfigMapStatus ¶ added in v0.4.2
type ConfigMapStatus struct { // The resource name of the component. Name string `json:"name"` // The state of the component. State ComponentState `json:"state"` }
func (*ConfigMapStatus) DeepCopy ¶ added in v0.4.2
func (in *ConfigMapStatus) DeepCopy() *ConfigMapStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapStatus.
func (*ConfigMapStatus) DeepCopyInto ¶ added in v0.4.2
func (in *ConfigMapStatus) DeepCopyInto(out *ConfigMapStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DeploymentType ¶ added in v0.4.2
type DeploymentType string
K8s workload API kind for TaskManager workers
type FlinkCluster ¶
type FlinkCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec FlinkClusterSpec `json:"spec"` Status FlinkClusterStatus `json:"status,omitempty"` }
FlinkCluster is the Schema for the flinkclusters API +kubebuilder:object:root=true +kubebuilder:resource:shortName={fc,fcs} +kubebuilder:subresource:status +kubebuilder:subresource:scale:specpath=.spec.taskManager.replicas,statuspath=.status.components.taskManager.replicas,selectorpath=.status.components.taskManager.selector +kubebuilder:printcolumn:name="version",type=string,JSONPath=`.spec.flinkVersion` +kubebuilder:printcolumn:name="status",type=string,JSONPath=`.status.state` +kubebuilder:printcolumn:name="age",type=date,JSONPath=`.metadata.creationTimestamp` +kubebuilder:printcolumn:name="jm replicas",type=string,priority=1,JSONPath=`.status.components.jobManager.ready` +kubebuilder:printcolumn:name="jm zone",type=string,priority=1,JSONPath=`.spec.jobManager.nodeSelector.topology\.kubernetes\.io\/zone` +kubebuilder:printcolumn:name="tm replicas",type=string,priority=1,JSONPath=`.status.components.taskManager.ready` +kubebuilder:printcolumn:name="tm zone",type=string,priority=1,JSONPath=`.spec.taskManager.nodeSelector.topology\.kubernetes\.io\/zone` +kubebuilder:printcolumn:name="Image",type="string",priority=1,JSONPath=".spec.image.name"
func (*FlinkCluster) DeepCopy ¶
func (in *FlinkCluster) DeepCopy() *FlinkCluster
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkCluster.
func (*FlinkCluster) DeepCopyInto ¶
func (in *FlinkCluster) DeepCopyInto(out *FlinkCluster)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FlinkCluster) DeepCopyObject ¶
func (in *FlinkCluster) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*FlinkCluster) Default ¶
func (cluster *FlinkCluster) Default()
Default implements webhook.Defaulter so a webhook will be registered for the type.
func (*FlinkCluster) GetHAConfigMapName ¶ added in v0.4.2
func (fc *FlinkCluster) GetHAConfigMapName() string
func (*FlinkCluster) IsHighAvailabilityEnabled ¶ added in v0.4.2
func (fc *FlinkCluster) IsHighAvailabilityEnabled() bool
func (*FlinkCluster) SetupWebhookWithManager ¶
func (cluster *FlinkCluster) SetupWebhookWithManager(mgr ctrl.Manager) error
SetupWebhookWithManager adds webhook for FlinkCluster.
func (*FlinkCluster) ValidateCreate ¶
func (cluster *FlinkCluster) ValidateCreate() (admission.Warnings, error)
ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (*FlinkCluster) ValidateDelete ¶
func (cluster *FlinkCluster) ValidateDelete() (admission.Warnings, error)
ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (*FlinkCluster) ValidateUpdate ¶
ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
type FlinkClusterComponentsStatus ¶
type FlinkClusterComponentsStatus struct { // The state of configMap. ConfigMap *ConfigMapStatus `json:"configMap,omitempty"` // The state of JobManager. JobManager *JobManagerStatus `json:"jobManager,omitempty"` // The state of JobManager service. JobManagerService JobManagerServiceStatus `json:"jobManagerService,omitempty"` // The state of JobManager ingress. JobManagerIngress *JobManagerIngressStatus `json:"jobManagerIngress,omitempty"` // The state of TaskManager. TaskManager *TaskManagerStatus `json:"taskManager,omitempty"` // The status of the job, available only when JobSpec is provided. Job *JobStatus `json:"job,omitempty"` }
FlinkClusterComponentsStatus defines the observed status of the components of a FlinkCluster.
func (*FlinkClusterComponentsStatus) DeepCopy ¶
func (in *FlinkClusterComponentsStatus) DeepCopy() *FlinkClusterComponentsStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkClusterComponentsStatus.
func (*FlinkClusterComponentsStatus) DeepCopyInto ¶
func (in *FlinkClusterComponentsStatus) DeepCopyInto(out *FlinkClusterComponentsStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FlinkClusterControlStatus ¶
type FlinkClusterControlStatus struct { // Control name Name string `json:"name"` // Control data Details map[string]string `json:"details,omitempty"` // State State string `json:"state"` // Message Message string `json:"message,omitempty"` // State update time UpdateTime string `json:"updateTime"` }
Control state
func (*FlinkClusterControlStatus) DeepCopy ¶
func (in *FlinkClusterControlStatus) DeepCopy() *FlinkClusterControlStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkClusterControlStatus.
func (*FlinkClusterControlStatus) DeepCopyInto ¶
func (in *FlinkClusterControlStatus) DeepCopyInto(out *FlinkClusterControlStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FlinkClusterList ¶
type FlinkClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []FlinkCluster `json:"items"` }
FlinkClusterList contains a list of FlinkCluster
func (*FlinkClusterList) DeepCopy ¶
func (in *FlinkClusterList) DeepCopy() *FlinkClusterList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkClusterList.
func (*FlinkClusterList) DeepCopyInto ¶
func (in *FlinkClusterList) DeepCopyInto(out *FlinkClusterList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*FlinkClusterList) DeepCopyObject ¶
func (in *FlinkClusterList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type FlinkClusterSpec ¶
type FlinkClusterSpec struct { // The version of Flink to be managed. This version must match the version in the image. FlinkVersion string `json:"flinkVersion"` // Flink image for JobManager, TaskManager and job containers. Image ImageSpec `json:"image"` // _(Optional)_ The service account assigned to JobManager, TaskManager and Job submitter Pods. If empty, the default service account in the namespace will be used. ServiceAccountName *string `json:"serviceAccountName,omitempty"` // Deprecated: BatchSchedulerName specifies the batch scheduler name for JobManager, TaskManager. // If empty, no batch scheduling is enabled. BatchSchedulerName *string `json:"batchSchedulerName,omitempty"` // _(Optional)_ BatchScheduler specifies the batch scheduler for JobManager, TaskManager. // If empty, no batch scheduling is enabled. BatchScheduler *BatchSchedulerSpec `json:"batchScheduler,omitempty"` // _(Optional)_ Defines the PodDisruptionBudget for JobManager and TaskManager. // If empty, no PodDisruptionBudget is created. PodDisruptionBudget *policyv1.PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty"` // _(Optional)_ Flink JobManager spec. // +kubebuilder:default:={replicas:1} JobManager *JobManagerSpec `json:"jobManager,omitempty"` // _(Optional)_ Flink TaskManager spec. // +kubebuilder:default:={replicas:3} TaskManager *TaskManagerSpec `json:"taskManager,omitempty"` // _(Optional)_ Job spec. If specified, this cluster is an ephemeral Job // Cluster, which will be automatically terminated after the job finishes; // otherwise, it is a long-running Session Cluster. Job *JobSpec `json:"job,omitempty"` // _(Optional)_ Environment variables shared by all JobManager, TaskManager and job // containers. // [More info](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/) EnvVars []corev1.EnvVar `json:"envVars,omitempty"` // _(Optional)_ Environment variables injected from a source, shared by all JobManager, // TaskManager and job containers. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables) EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` // _(Optional)_ Flink properties which are appened to flink-conf.yaml. FlinkProperties map[string]string `json:"flinkProperties,omitempty"` // _(Optional)_ Config for Hadoop. HadoopConfig *HadoopConfig `json:"hadoopConfig,omitempty"` // _(Optional)_ Config for GCP. GCPConfig *GCPConfig `json:"gcpConfig,omitempty"` // _(Optional)_ The logging configuration, which should have keys 'log4j-console.properties' and 'logback-console.xml'. // These will end up in the 'flink-config-volume' ConfigMap, which gets mounted at /opt/flink/conf. // If not provided, defaults that log to console only will be used. // <br> - log4j-console.properties: The contents of the log4j properties file to use. If not provided, a default that logs only to stdout will be provided. // <br> - logback-console.xml: The contents of the logback XML file to use. If not provided, a default that logs only to stdout will be provided. // <br> - Other arbitrary keys are also allowed, and will become part of the ConfigMap. LogConfig map[string]string `json:"logConfig,omitempty"` // The maximum number of revision history to keep, default: 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` // Recreate components when updating flinkcluster, default: true. // +kubebuilder:default:=true RecreateOnUpdate *bool `json:"recreateOnUpdate,omitempty"` }
FlinkClusterSpec defines the desired state of FlinkCluster
func (*FlinkClusterSpec) DeepCopy ¶
func (in *FlinkClusterSpec) DeepCopy() *FlinkClusterSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkClusterSpec.
func (*FlinkClusterSpec) DeepCopyInto ¶
func (in *FlinkClusterSpec) DeepCopyInto(out *FlinkClusterSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FlinkClusterStatus ¶
type FlinkClusterStatus struct { // The overall state of the Flink cluster. State ClusterState `json:"state"` // The status of the components. Components FlinkClusterComponentsStatus `json:"components"` // The status of control requested by user. Control *FlinkClusterControlStatus `json:"control,omitempty"` // The status of savepoint progress. Savepoint *SavepointStatus `json:"savepoint,omitempty"` // The status of revision. Revision RevisionStatus `json:"revision,omitempty"` // Last update timestamp for this status. LastUpdateTime string `json:"lastUpdateTime,omitempty"` }
FlinkClusterStatus defines the observed state of FlinkCluster
func (*FlinkClusterStatus) DeepCopy ¶
func (in *FlinkClusterStatus) DeepCopy() *FlinkClusterStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlinkClusterStatus.
func (*FlinkClusterStatus) DeepCopyInto ¶
func (in *FlinkClusterStatus) DeepCopyInto(out *FlinkClusterStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type GCPConfig ¶
type GCPConfig struct { // GCP service account. ServiceAccount *GCPServiceAccount `json:"serviceAccount,omitempty"` }
GCPConfig defines configs for GCP.
func (*GCPConfig) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPConfig.
func (*GCPConfig) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type GCPServiceAccount ¶
type GCPServiceAccount struct { // The name of the Secret holding the GCP service account key file. // The Secret must be in the same namespace as the FlinkCluster. SecretName string `json:"secretName,omitempty"` // The name of the service account key file. KeyFile string `json:"keyFile,omitempty"` // The path where to mount the Volume of the Secret. MountPath string `json:"mountPath,omitempty"` }
GCPServiceAccount defines the config about GCP service account.
func (*GCPServiceAccount) DeepCopy ¶
func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount.
func (*GCPServiceAccount) DeepCopyInto ¶
func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type HadoopConfig ¶
type HadoopConfig struct { // The name of the ConfigMap which contains the Hadoop config files. // The ConfigMap must be in the same namespace as the FlinkCluster. // +kubebuilder:validation:MinLength=1 ConfigMapName string `json:"configMapName,omitempty"` // The path where to mount the Volume of the ConfigMap. // default: `/etc/hadoop/conf`. // +kubebuilder:default:=/etc/hadoop/conf MountPath string `json:"mountPath,omitempty"` }
HadoopConfig defines configs for Hadoop.
func (*HadoopConfig) DeepCopy ¶
func (in *HadoopConfig) DeepCopy() *HadoopConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopConfig.
func (*HadoopConfig) DeepCopyInto ¶
func (in *HadoopConfig) DeepCopyInto(out *HadoopConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type HorizontalPodAutoscalerSpec ¶ added in v0.5.0
type HorizontalPodAutoscalerSpec struct { // minReplicas is the lower limit for the number of replicas to which the autoscaler // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the // alpha feature gate HPAScaleToZero is enabled and at least one Object or External // metric is configured. Scaling is active as long as at least one metric value is // available. MinReplicas *int32 `json:"minReplicas,omitempty"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas"` // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the // ratio between the target value and the current value by the current // number of pods. Ergo, metrics used must decrease as the pod count is // increased, and vice-versa. See the individual metric source types for // more information about how each type of metric must respond. // If not set, the default metric will be set to 80% average CPU utilization. Metrics []autoscalingv2.MetricSpec `json:"metrics,omitempty"` // behavior configures the scaling behavior of the target // in both Up and Down directions (scaleUp and scaleDown fields respectively). // If not set, the default HPAScalingRules for scale up and scale down are used. Behavior *autoscalingv2.HorizontalPodAutoscalerBehavior `json:"behavior,omitempty" protobuf:"bytes,5,opt,name=behavior"` }
func (*HorizontalPodAutoscalerSpec) DeepCopy ¶ added in v0.5.0
func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
func (*HorizontalPodAutoscalerSpec) DeepCopyInto ¶ added in v0.5.0
func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ImageSpec ¶
type ImageSpec struct { // Flink image name. // +kubebuilder:validation:MinLength=1 Name string `json:"name"` // Image pull policy. One of `Always, Never, IfNotPresent`, default: `Always`. // if :latest tag is specified, or IfNotPresent otherwise. // [More info](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) // +kubebuilder:default:=Always // +kubebuilder:validation:Enum=Always;Never;IfNotPresent PullPolicy corev1.PullPolicy `json:"pullPolicy,omitempty"` // _(Optional)_ Secrets for image pull. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) PullSecrets []corev1.LocalObjectReference `json:"pullSecrets,omitempty"` }
ImageSpec defines Flink image of JobManager and TaskManager containers.
func (*ImageSpec) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
func (*ImageSpec) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobManagerIngressSpec ¶
type JobManagerIngressSpec struct { // _(Optional)_ Ingress host format. ex) {{$clusterName}}.example.com HostFormat *string `json:"hostFormat,omitempty"` // _(Optional)_Annotations for ingress configuration. // [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) Annotations map[string]string `json:"annotations,omitempty"` // TLS use, default: `false`. // +kubebuilder:default:=false UseTLS *bool `json:"useTls,omitempty"` // _(Optional)_TLS secret name. TLSSecretName *string `json:"tlsSecretName,omitempty"` }
JobManagerIngressSpec defines ingress of JobManager
func (*JobManagerIngressSpec) DeepCopy ¶
func (in *JobManagerIngressSpec) DeepCopy() *JobManagerIngressSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobManagerIngressSpec.
func (*JobManagerIngressSpec) DeepCopyInto ¶
func (in *JobManagerIngressSpec) DeepCopyInto(out *JobManagerIngressSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobManagerIngressStatus ¶
type JobManagerIngressStatus struct { // The name of the Kubernetes ingress resource. Name string `json:"name"` // The state of the component. State ComponentState `json:"state"` // The URLs of ingress. URLs []string `json:"urls,omitempty"` }
JobManagerIngressStatus defines the status of a JobManager ingress.
func (*JobManagerIngressStatus) DeepCopy ¶
func (in *JobManagerIngressStatus) DeepCopy() *JobManagerIngressStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobManagerIngressStatus.
func (*JobManagerIngressStatus) DeepCopyInto ¶
func (in *JobManagerIngressStatus) DeepCopyInto(out *JobManagerIngressStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobManagerPorts ¶
type JobManagerPorts struct { // RPC port, default: `6123`. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:default:=6123 RPC *int32 `json:"rpc,omitempty"` // Blob port, default: `6124`. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:default:=6124 Blob *int32 `json:"blob,omitempty"` // Query port, default: `6125`. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:default:=6125 Query *int32 `json:"query,omitempty"` // UI port, default: `8081`. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:default:=8081 UI *int32 `json:"ui,omitempty"` }
JobManagerPorts defines ports of JobManager.
func (*JobManagerPorts) DeepCopy ¶
func (in *JobManagerPorts) DeepCopy() *JobManagerPorts
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobManagerPorts.
func (*JobManagerPorts) DeepCopyInto ¶
func (in *JobManagerPorts) DeepCopyInto(out *JobManagerPorts)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobManagerServiceStatus ¶
type JobManagerServiceStatus struct { // The name of the Kubernetes jobManager service. Name string `json:"name"` // The state of the component. State ComponentState `json:"state"` // (Optional) The node port, present when `accessScope` is `NodePort`. NodePort int32 `json:"nodePort,omitempty"` // (Optional) The load balancer ingress, present when `accessScope` is `VPC` or `External` LoadBalancerIngress []corev1.LoadBalancerIngress `json:"loadBalancerIngress,omitempty"` }
JobManagerServiceStatus defines the observed state of FlinkCluster
func (*JobManagerServiceStatus) DeepCopy ¶
func (in *JobManagerServiceStatus) DeepCopy() *JobManagerServiceStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobManagerServiceStatus.
func (*JobManagerServiceStatus) DeepCopyInto ¶
func (in *JobManagerServiceStatus) DeepCopyInto(out *JobManagerServiceStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobManagerSpec ¶
type JobManagerSpec struct { // The number of JobManager replicas, default: `1` // +kubebuilder:default:=1 // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=1 Replicas *int32 `json:"replicas,omitempty"` // Access scope, default: `Cluster`. // `Cluster`: accessible from within the same cluster. // `VPC`: accessible from within the same VPC. // `External`: accessible from the internet. // `NodePort`: accessible through node port. // `Headless`: pod IPs assumed to be routable and advertised directly with `clusterIP: None“. // Currently `VPC, External` are only available for GKE. // +kubebuilder:default:=Cluster // +kubebuilder:validation:Enum=Cluster;VPC;External;NodePort;Headless AccessScope string `json:"accessScope,omitempty"` // _(Optional)_ Define JobManager Service annotations for configuration. ServiceAnnotations map[string]string `json:"ServiceAnnotations,omitempty"` // _(Optional)_ Define JobManager Service labels for configuration. ServiceLabels map[string]string `json:"ServiceLabels,omitempty"` // _(Optional)_ Provide external access to JobManager UI/API. Ingress *JobManagerIngressSpec `json:"ingress,omitempty"` // Ports that JobManager listening on. // +kubebuilder:default:={rpc:6123, blob:6124, query:6125, ui:8081} Ports JobManagerPorts `json:"ports,omitempty"` // _(Optional)_ Extra ports to be exposed. For example, Flink metrics reporter ports: Prometheus, JMX and so on. // Each port number and name must be unique among ports and extraPorts. ExtraPorts []NamedPort `json:"extraPorts,omitempty"` // Compute resources required by each JobManager container. // default: 2 CPUs with 2Gi Memory. // It Cannot be updated. // [More info](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) // +kubebuilder:default:={requests:{cpu:"200m", memory:"512Mi"}, limits: {cpu:2, memory:"2Gi"}} Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Percentage of off-heap memory in containers, as a safety margin to avoid OOM kill, default: `25` MemoryOffHeapRatio *int32 `json:"memoryOffHeapRatio,omitempty"` // Minimum amount of off-heap memory in containers, as a safety margin to avoid OOM kill, default: `600M` // You can express this value like 600M, 572Mi and 600e6 // [More info](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) MemoryOffHeapMin resource.Quantity `json:"memoryOffHeapMin,omitempty"` // For Flink 1.10+. Percentage of memory process, as a safety margin to avoid OOM kill, default: `80` MemoryProcessRatio *int32 `json:"memoryProcessRatio,omitempty"` // _(Optional)_ Volumes in the JobManager pod. // [More info](https://kubernetes.io/docs/concepts/storage/volumes/) Volumes []corev1.Volume `json:"volumes,omitempty"` // _(Optional)_ Volume mounts in the JobManager container. // [More info](https://kubernetes.io/docs/concepts/storage/volumes/) VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` // _(Optional)_ A template for persistent volume claim each requested and mounted to JobManager pod, // This can be used to mount an external volume with a specific storageClass or larger captivity (for larger/faster state backend). // [More info](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) VolumeClaimTemplates []corev1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` // _(Optional)_ Init containers of the Job Manager pod. // [More info](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) InitContainers []corev1.Container `json:"initContainers,omitempty"` // _(Optional)_ Defines the affinity of the JobManager pod // [More info](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) Affinity *corev1.Affinity `json:"affinity,omitempty"` // _(Optional)_ Selector which must match a node's labels for the JobManager pod to be // scheduled on that node. // [More info](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) NodeSelector map[string]string `json:"nodeSelector,omitempty"` // _(Optional)_ Defines the node affinity of the JobManager pod // [More info](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // _(Optional)_ Sidecar containers running alongside with the JobManager container in the pod. // [More info](https://kubernetes.io/docs/concepts/containers/) Sidecars []corev1.Container `json:"sidecars,omitempty"` // _(Optional)_ JobManager StatefulSet pod template annotations. // [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) PodAnnotations map[string]string `json:"podAnnotations,omitempty"` // _(Optional)_ SecurityContext of the JobManager pod. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` // _(Optional)_ JobManager StatefulSet pod template labels. // [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) PodLabels map[string]string `json:"podLabels,omitempty"` // Container liveness probe // If omitted, a [default value](https://github.com/spotify/flink-on-k8s-operator/blob/a88ed2b/api/v1beta1/flinkcluster_default.go#L113-L123) will be used. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty"` // Container readiness probe // If omitted, a [default value](https://github.com/spotify/flink-on-k8s-operator/blob/a88ed2b/api/v1beta1/flinkcluster_default.go#L129-L139) will be used. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty"` // _(Optional)_ Adding entries to JobManager pod /etc/hosts with HostAliases // [More info](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"` }
JobManagerSpec defines properties of JobManager.
func (*JobManagerSpec) DeepCopy ¶
func (in *JobManagerSpec) DeepCopy() *JobManagerSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobManagerSpec.
func (*JobManagerSpec) DeepCopyInto ¶
func (in *JobManagerSpec) DeepCopyInto(out *JobManagerSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*JobManagerSpec) GetResources ¶ added in v0.4.2
func (jm *JobManagerSpec) GetResources() *corev1.ResourceList
type JobManagerStatus ¶ added in v0.4.2
type JobManagerStatus struct { // The resource name of the component. Name string `json:"name"` // The state of the component. State ComponentState `json:"state"` // replicas is the number of desired replicas. Replicas int32 `json:"replicas"` // readyReplicas is the number of created pods with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty"` Ready string `json:"ready"` }
func (*JobManagerStatus) DeepCopy ¶ added in v0.4.2
func (in *JobManagerStatus) DeepCopy() *JobManagerStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobManagerStatus.
func (*JobManagerStatus) DeepCopyInto ¶ added in v0.4.2
func (in *JobManagerStatus) DeepCopyInto(out *JobManagerStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobRestartPolicy ¶
type JobRestartPolicy string
JobRestartPolicy defines the restart policy when a job fails.
const ( // JobRestartPolicyNever - never restarts a failed job. JobRestartPolicyNever JobRestartPolicy = "Never" // JobRestartPolicyFromSavepointOnFailure - restart the job from the latest // savepoint if available, otherwise do not restart. JobRestartPolicyFromSavepointOnFailure JobRestartPolicy = "FromSavepointOnFailure" )
type JobSpec ¶
type JobSpec struct { // _(Optional)_ Adds URLs to each user code classloader on all nodes in the cluster. // The paths must specify a protocol (e.g. file://) and be accessible on all nodes (e.g. by means of a NFS share). // The protocol must be supported by the {@link java.net.URLClassLoader}. // You may add support to more protocol by setting the `java.protocol.handler.pkgs` java option ClassPath []string `json:"classPath,omitempty"` // _(Optional)_ JAR file of the job. It could be a local file or remote URI, // depending on which protocols (e.g., `https://, gs://`) are supported by the Flink image. JarFile *string `json:"jarFile,omitempty"` // _(Optional)_ Fully qualified Java class name of the job. ClassName *string `json:"className,omitempty"` // _(Optional)_ Python file of the job. It could be a local file or remote URI (e.g.,`https://`, `gs://`). PyFile *string `json:"pyFile,omitempty"` // _(Optional)_ Python files of the job. It could be a local file (with .py/.egg/.zip/.whl), directory or remote URI (e.g.,`https://`, `gs://`). // See the Flink argument `--pyFiles` for the detail. PyFiles *string `json:"pyFiles,omitempty"` // _(Optional)_ Python module path of the job entry point. Must use with pythonFiles. PyModule *string `json:"pyModule,omitempty"` // _(Optional)_ Command-line args of the job. Args []string `json:"args,omitempty"` // _(Optional)_ FromSavepoint where to restore the job from // Savepoint where to restore the job from (e.g., gs://my-savepoint/1234). // If flink job must be restored from the latest available savepoint when Flink job updating, this field must be unspecified. FromSavepoint *string `json:"fromSavepoint,omitempty"` // Allow non-restored state, default: `false`. // +kubebuilder:default:=false AllowNonRestoredState *bool `json:"allowNonRestoredState,omitempty"` // _(Optional)_ Savepoints dir where to store savepoints of the job. SavepointsDir *string `json:"savepointsDir,omitempty"` // _(Optional)_ Should take savepoint before updating job, default: `true`. // If this is set as false, maxStateAgeToRestoreSeconds must be provided to limit the savepoint age to restore. TakeSavepointOnUpdate *bool `json:"takeSavepointOnUpdate,omitempty"` // _(Optional)_ Maximum age of the savepoint that allowed to restore state. // This is applied to auto restart on failure, update from stopped state and update without taking savepoint. // If nil, job can be restarted only when the latest savepoint is the final job state (created by "stop with savepoint") // - that is, only when job can be resumed from the suspended state. // +kubebuilder:validation:Minimum=0 MaxStateAgeToRestoreSeconds *int32 `json:"maxStateAgeToRestoreSeconds,omitempty"` // _(Optional)_ Automatically take a savepoint to the `savepointsDir` every n seconds. AutoSavepointSeconds *int32 `json:"autoSavepointSeconds,omitempty"` // _(Optional)_ Update this field to `jobStatus.savepointGeneration + 1` for a running job // cluster to trigger a new savepoint to `savepointsDir` on demand. SavepointGeneration int32 `json:"savepointGeneration,omitempty"` // _(Optional)_ Job parallelism; if not set parallelism will be #replicas * #slots. Parallelism *int32 `json:"parallelism,omitempty"` // No logging output to STDOUT, default: `false`. // +kubebuilder:default:=false NoLoggingToStdout *bool `json:"noLoggingToStdout,omitempty"` // _(Optional)_ Volumes in the Job pod. // [More info](https://kubernetes.io/docs/concepts/storage/volumes/) Volumes []corev1.Volume `json:"volumes,omitempty"` // _(Optional)_ Volume mounts in the Job container. // [More info](https://kubernetes.io/docs/concepts/storage/volumes/) VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` // _(Optional)_ Init containers of the Job pod. A typical use case could be using an init // container to download a remote job jar to a local path which is // referenced by the `jarFile` property. // [More info](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) InitContainers []corev1.Container `json:"initContainers,omitempty"` // _(Optional)_ Defines the affinity of the Job submitter pod // [More info](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) Affinity *corev1.Affinity `json:"affinity,omitempty"` // _(Optional)_ Selector which must match a node's labels for the Job submitter pod to be // scheduled on that node. // [More info](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) NodeSelector map[string]string `json:"nodeSelector,omitempty"` // _(Optional)_ Defines the node affinity of the Job submitter pod // [More info](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // Restart policy when the job fails, one of `Never, FromSavepointOnFailure`, // default: `Never`. // `Never` means the operator will never try to restart a failed job, manual // cleanup and restart is required. // `FromSavepointOnFailure` means the operator will try to restart the failed // job from the savepoint recorded in the job status if available; otherwise, // the job will stay in failed state. This option is usually used together // with `autoSavepointSeconds` and `savepointsDir`. // +kubebuilder:default:=Never // +kubebuilder:validation:Enum=Never;FromSavepointOnFailure RestartPolicy *JobRestartPolicy `json:"restartPolicy,omitempty"` // The action to take after job finishes. // +kubebuilder:default:={afterJobSucceeds:DeleteCluster, afterJobFails:KeepCluster, afterJobCancelled:DeleteCluster} CleanupPolicy *CleanupPolicy `json:"cleanupPolicy,omitempty"` // Deprecated: _(Optional)_ Request the job to be cancelled. Only applies to running jobs. If // `savePointsDir` is provided, a savepoint will be taken before stopping the // job. CancelRequested *bool `json:"cancelRequested,omitempty"` // _(Optional)_ Job pod template annotations. // [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) PodAnnotations map[string]string `json:"podAnnotations,omitempty"` // _(Optional)_ Job pod template labels. // [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) PodLabels map[string]string `json:"podLabels,omitempty"` // _(Optional)_ Compute resources required by each Job container. // If omitted, a default value will be used. // It Cannot be updated. // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +kubebuilder:default:={requests:{cpu:"200m", memory:"512Mi"}, limits: {cpu:2, memory:"2Gi"}} Resources corev1.ResourceRequirements `json:"resources,omitempty"` // _(Optional)_ SecurityContext of the Job pod. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` // _(Optional)_ Adding entries to Job pod /etc/hosts with HostAliases // [More info](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"` // Job running mode, `"Blocking", "Detached"`, default: `"Detached"` // +kubebuilder:validation:Enum=Detached;Blocking;Application // +kubebuilder:default:=Detached Mode *JobMode `json:"mode,omitempty"` }
JobSpec defines properties of a Flink job.
func (*JobSpec) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec.
func (*JobSpec) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobState ¶ added in v0.4.2
type JobState string
JobState defines states for a Flink job deployment.
const ( JobStatePending JobState = "Pending" JobStateUpdating JobState = "Updating" JobStateRestarting JobState = "Restarting" JobStateDeploying JobState = "Deploying" JobStateDeployFailed JobState = "DeployFailed" JobStateRunning JobState = "Running" JobStateSucceeded JobState = "Succeeded" JobStateCancelled JobState = "Cancelled" JobStateFailed JobState = "Failed" JobStateLost JobState = "Lost" JobStateUnknown JobState = "Unknown" )
type JobStatus ¶
type JobStatus struct { // The ID of the Flink job. ID string `json:"id,omitempty"` // The Name of the Flink job. Name string `json:"name,omitempty"` // The name of the Kubernetes job resource. SubmitterName string `json:"submitterName,omitempty"` // Exit code of the JubSubmitter job resource. SubmitterExitCode int32 `json:"submitterExitCode,omitempty"` // The state of the Flink job deployment. State JobState `json:"state"` // The actual savepoint from which this job started. // In case of restart, it might be different from the savepoint in the job // spec. FromSavepoint string `json:"fromSavepoint,omitempty"` // The generation of the savepoint in `savepointsDir` taken by the operator. // The value starts from 0 when there is no savepoint and increases by 1 for // each successful savepoint. SavepointGeneration int32 `json:"savepointGeneration,omitempty"` // Savepoint location. SavepointLocation string `json:"savepointLocation,omitempty"` // Last successful savepoint completed timestamp. SavepointTime string `json:"savepointTime,omitempty"` // The savepoint recorded in savepointLocation is the final state of the job. FinalSavepoint bool `json:"finalSavepoint,omitempty"` // The timestamp of the Flink job deployment that creating job submitter. DeployTime string `json:"deployTime,omitempty"` // The Flink job started timestamp. StartTime string `json:"startTime,omitempty"` // The number of restarts. RestartCount int32 `json:"restartCount,omitempty"` // Job completion time. Present when job is terminated regardless of its state. CompletionTime *metav1.Time `json:"completionTime,omitempty"` // Reasons for the job failure. Present if job state is Failure FailureReasons []string `json:"failureReasons,omitempty"` }
JobStatus defines the status of a job.
func (*JobStatus) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus.
func (*JobStatus) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*JobStatus) IsSavepointUpToDate ¶
IsSavepointUpToDate check if the recorded savepoint is up-to-date compared to maxStateAgeToRestoreSeconds. If maxStateAgeToRestoreSeconds is not set, the savepoint is up-to-date only when the recorded savepoint is the final job state.
func (*JobStatus) IsTerminated ¶
func (*JobStatus) ShouldRestart ¶
ShouldRestart returns true if the controller should restart failed job. The controller can restart the job if policy is set to FromSavepointOnFailure. Job will restart from savepoint if the savepoint was taken successfully.
type NamedPort ¶
type NamedPort struct { // _(Optional)_ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each // named port in a pod must have a unique name. Name for the port that can be // referred to by services. Name string `json:"name,omitempty"` // Number of port to expose on the pod's IP address. // This must be a valid port number, 0 < x < 65536. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 ContainerPort int32 `json:"containerPort"` // Protocol for port. One of `UDP, TCP, or SCTP`, default: `TCP`. // +kubebuilder:validation:Enum=TCP;UDP;SCTP Protocol string `json:"protocol,omitempty"` }
NamedPort defines the container port properties.
func (*NamedPort) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedPort.
func (*NamedPort) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type RevisionStatus ¶
type RevisionStatus struct { // CurrentRevision indicates the version of FlinkCluster. CurrentRevision string `json:"currentRevision,omitempty"` // NextRevision indicates the version of FlinkCluster updating. NextRevision string `json:"nextRevision,omitempty"` // collisionCount is the count of hash collisions for the FlinkCluster. The controller // uses this field as a collision avoidance mechanism when it needs to create the name for the // newest ControllerRevision. CollisionCount *int32 `json:"collisionCount,omitempty"` }
func (*RevisionStatus) DeepCopy ¶
func (in *RevisionStatus) DeepCopy() *RevisionStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevisionStatus.
func (*RevisionStatus) DeepCopyInto ¶
func (in *RevisionStatus) DeepCopyInto(out *RevisionStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*RevisionStatus) IsUpdateTriggered ¶
func (r *RevisionStatus) IsUpdateTriggered() bool
type SavepointStatus ¶
type SavepointStatus struct { // The ID of the Flink job. JobID string `json:"jobID,omitempty"` // Savepoint trigger ID. TriggerID string `json:"triggerID,omitempty"` // Savepoint triggered time. TriggerTime string `json:"triggerTime,omitempty"` // Savepoint triggered reason. TriggerReason SavepointReason `json:"triggerReason,omitempty"` // Savepoint status update time. UpdateTime string `json:"requestTime,omitempty"` // Savepoint state. State string `json:"state"` // Savepoint message. Message string `json:"message,omitempty"` }
SavepointStatus is the status of savepoint progress.
func (*SavepointStatus) DeepCopy ¶
func (in *SavepointStatus) DeepCopy() *SavepointStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SavepointStatus.
func (*SavepointStatus) DeepCopyInto ¶
func (in *SavepointStatus) DeepCopyInto(out *SavepointStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*SavepointStatus) IsFailed ¶
func (s *SavepointStatus) IsFailed() bool
type TaskManagerPorts ¶
type TaskManagerPorts struct { // Data port, default: `6121`. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:default:=6121 Data *int32 `json:"data,omitempty"` // RPC port, default: `6122`. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:default:=6122 RPC *int32 `json:"rpc,omitempty"` // Query port, default: `6125`. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +kubebuilder:default:=6125 Query *int32 `json:"query,omitempty"` }
TaskManagerPorts defines ports of TaskManager.
func (*TaskManagerPorts) DeepCopy ¶
func (in *TaskManagerPorts) DeepCopy() *TaskManagerPorts
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskManagerPorts.
func (*TaskManagerPorts) DeepCopyInto ¶
func (in *TaskManagerPorts) DeepCopyInto(out *TaskManagerPorts)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type TaskManagerSpec ¶
type TaskManagerSpec struct { // _(Optional)_ Defines the replica workload's type: `StatefulSet` or `Deployment`. If not specified, the default value is `StatefulSet`. // +kubebuilder:default:=StatefulSet DeploymentType DeploymentType `json:"deploymentType,omitempty"` // The number of replicas. default: `3` // +kubebuilder:default:=3 // +kubebuilder:validation:Minimum=1 Replicas *int32 `json:"replicas,omitempty"` // Ports that TaskManager listening on. // +kubebuilder:default:={data:6121, rpc:6122, query:6125} Ports TaskManagerPorts `json:"ports,omitempty"` // _(Optional)_ Extra ports to be exposed. For example, Flink metrics reporter ports: Prometheus, JMX and so on. ExtraPorts []NamedPort `json:"extraPorts,omitempty"` // Compute resources required by each TaskManager container. // default: 2 CPUs with 2Gi Memory. // It Cannot be updated. // [More info](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) // +kubebuilder:default:={requests:{cpu:"200m", memory:"512Mi"}, limits: {cpu:2, memory:"2Gi"}} Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Percentage of off-heap memory in containers, as a safety margin to avoid OOM kill, default: `25` MemoryOffHeapRatio *int32 `json:"memoryOffHeapRatio,omitempty"` // Minimum amount of off-heap memory in containers, as a safety margin to avoid OOM kill, default: `600M` // You can express this value like 600M, 572Mi and 600e6 // [More info](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) MemoryOffHeapMin resource.Quantity `json:"memoryOffHeapMin,omitempty"` // For Flink 1.10+. Percentage of process memory, as a safety margin to avoid OOM kill, default: `20` MemoryProcessRatio *int32 `json:"memoryProcessRatio,omitempty"` // _(Optional)_ Volumes in the TaskManager pods. // [More info](https://kubernetes.io/docs/concepts/storage/volumes/) Volumes []corev1.Volume `json:"volumes,omitempty"` // _(Optional)_ Volume mounts in the TaskManager containers. // [More info](https://kubernetes.io/docs/concepts/storage/volumes/) VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` // _(Optional)_ A template for persistent volume claim each requested and mounted to TaskManager pod, // This can be used to mount an external volume with a specific storageClass or larger captivity (for larger/faster state backend). // [More info](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) // If deploymentType: StatefulSet is used, these templates will be added to the taskManager statefulset template, // hence mounting persistent-pvcs to the indexed statefulset pods. // If deploymentType: Deployment is used, these templates are appended to the Ephemeral Volumes in the PodSpec, // hence mounting ephemeral-pvcs to the replicaset pods. VolumeClaimTemplates []corev1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` // _(Optional)_ Init containers of the Task Manager pod. // [More info](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) InitContainers []corev1.Container `json:"initContainers,omitempty"` // _(Optional)_ Defines the affinity of the Task Manager pod // [More info](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) Affinity *corev1.Affinity `json:"affinity,omitempty"` // _(Optional)_ Selector which must match a node's labels for the Task Manager pod to be // scheduled on that node. // [More info](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) NodeSelector map[string]string `json:"nodeSelector,omitempty"` // _(Optional)_ Defines the node affinity of the Task Manager pod // [More info](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // _(Optional)_ Sidecar containers running alongside with the TaskManager container in the pod. // [More info](https://kubernetes.io/docs/concepts/containers/) Sidecars []corev1.Container `json:"sidecars,omitempty"` // _(Optional)_ TaskManager StatefulSet pod template annotations. // [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) PodAnnotations map[string]string `json:"podAnnotations,omitempty"` // _(Optional)_ SecurityContext of the TaskManager pod. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` // _(Optional)_ TaskManager StatefulSet pod template labels. // [More info](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) PodLabels map[string]string `json:"podLabels,omitempty"` // Container liveness probe // If omitted, a [default value](https://github.com/spotify/flink-on-k8s-operator/blob/a88ed2b/api/v1beta1/flinkcluster_default.go#L177-L187) will be used. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty"` // Container readiness probe // If omitted, a [default value](https://github.com/spotify/flink-on-k8s-operator/blob/a88ed2b/api/v1beta1/flinkcluster_default.go#L193-L203) will be used. // [More info](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty"` // _(Optional)_ Adding entries to TaskManager pod /etc/hosts with HostAliases // [More info](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"` // _(Optional)_ HorizontalPodAutoscaler for TaskManager. // [More info](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) HorizontalPodAutoscaler *HorizontalPodAutoscalerSpec `json:"horizontalPodAutoscaler,omitempty"` }
TaskManagerSpec defines properties of TaskManager.
func (*TaskManagerSpec) DeepCopy ¶
func (in *TaskManagerSpec) DeepCopy() *TaskManagerSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskManagerSpec.
func (*TaskManagerSpec) DeepCopyInto ¶
func (in *TaskManagerSpec) DeepCopyInto(out *TaskManagerSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*TaskManagerSpec) GetResources ¶ added in v0.4.2
func (tm *TaskManagerSpec) GetResources() *corev1.ResourceList
type TaskManagerStatus ¶ added in v0.4.2
type TaskManagerStatus struct { // The resource name of the component. Name string `json:"name"` // The state of the component. State ComponentState `json:"state"` // replicas is the number of desired Pods. Replicas int32 `json:"replicas"` // readyReplicas is the number of created pods with a Ready Condition. ReadyReplicas int32 `json:"readyReplicas,omitempty"` Ready string `json:"ready"` Selector string `json:"selector"` }
func (*TaskManagerStatus) DeepCopy ¶ added in v0.4.2
func (in *TaskManagerStatus) DeepCopy() *TaskManagerStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskManagerStatus.
func (*TaskManagerStatus) DeepCopyInto ¶ added in v0.4.2
func (in *TaskManagerStatus) DeepCopyInto(out *TaskManagerStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Validator ¶
type Validator struct{}
Validator validates CUD requests for the CR.
func (*Validator) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validator.
func (*Validator) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Validator) ValidateCreate ¶
func (v *Validator) ValidateCreate(cluster *FlinkCluster) error
ValidateCreate validates create request.
func (*Validator) ValidateUpdate ¶
func (v *Validator) ValidateUpdate(old *FlinkCluster, new *FlinkCluster) error
ValidateUpdate validates update request.