v1beta1

package
v1.73.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 10, 2022 License: Apache-2.0 Imports: 6 Imported by: 0

Documentation

Overview

Generate deepcopy object for dataproc/v1beta1 API group

Package v1beta1 contains API Schema definitions for the dataproc v1beta1 API group. +k8s:openapi-gen=true +k8s:deepcopy-gen=package,register +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/dataproc +k8s:defaulter-gen=TypeMeta +groupName=dataproc.cnrm.cloud.google.com

Index

Constants

This section is empty.

Variables

View Source
var (
	// SchemeGroupVersion is the group version used to register these objects.
	SchemeGroupVersion = schema.GroupVersion{Group: "dataproc.cnrm.cloud.google.com", Version: "v1beta1"}

	// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
	SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}

	// AddToScheme is a global function that registers this API group & version to a scheme
	AddToScheme = SchemeBuilder.AddToScheme

	DataprocAutoscalingPolicyGVK = schema.GroupVersionKind{
		Group:   SchemeGroupVersion.Group,
		Version: SchemeGroupVersion.Version,
		Kind:    reflect.TypeOf(DataprocAutoscalingPolicy{}).Name(),
	}

	DataprocClusterGVK = schema.GroupVersionKind{
		Group:   SchemeGroupVersion.Group,
		Version: SchemeGroupVersion.Version,
		Kind:    reflect.TypeOf(DataprocCluster{}).Name(),
	}

	DataprocWorkflowTemplateGVK = schema.GroupVersionKind{
		Group:   SchemeGroupVersion.Group,
		Version: SchemeGroupVersion.Version,
		Kind:    reflect.TypeOf(DataprocWorkflowTemplate{}).Name(),
	}
)

Functions

This section is empty.

Types

type AutoscalingpolicyBasicAlgorithm

type AutoscalingpolicyBasicAlgorithm struct {
	/* Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: . Default: 2m. */
	// +optional
	CooldownPeriod *string `json:"cooldownPeriod,omitempty"`

	/* Required. YARN autoscaling configuration. */
	YarnConfig AutoscalingpolicyYarnConfig `json:"yarnConfig"`
}

func (*AutoscalingpolicyBasicAlgorithm) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicyBasicAlgorithm.

func (*AutoscalingpolicyBasicAlgorithm) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type AutoscalingpolicySecondaryWorkerConfig

type AutoscalingpolicySecondaryWorkerConfig struct {
	/* Optional. Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. */
	// +optional
	MaxInstances *int `json:"maxInstances,omitempty"`

	/* Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0. */
	// +optional
	MinInstances *int `json:"minInstances,omitempty"`

	/* Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. */
	// +optional
	Weight *int `json:"weight,omitempty"`
}

func (*AutoscalingpolicySecondaryWorkerConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicySecondaryWorkerConfig.

func (*AutoscalingpolicySecondaryWorkerConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type AutoscalingpolicyWorkerConfig

type AutoscalingpolicyWorkerConfig struct {
	/* Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. */
	MaxInstances int `json:"maxInstances"`

	/* Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0. */
	// +optional
	MinInstances *int `json:"minInstances,omitempty"`

	/* Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. */
	// +optional
	Weight *int `json:"weight,omitempty"`
}

func (*AutoscalingpolicyWorkerConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicyWorkerConfig.

func (*AutoscalingpolicyWorkerConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type AutoscalingpolicyYarnConfig

type AutoscalingpolicyYarnConfig struct {
	/* Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. */
	GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout"`

	/* Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See . */
	ScaleDownFactor float64 `json:"scaleDownFactor"`

	/* Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: . Default: 0.0. */
	// +optional
	ScaleDownMinWorkerFraction *float64 `json:"scaleDownMinWorkerFraction,omitempty"`

	/* Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See . */
	ScaleUpFactor float64 `json:"scaleUpFactor"`

	/* Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: . Default: 0.0. */
	// +optional
	ScaleUpMinWorkerFraction *float64 `json:"scaleUpMinWorkerFraction,omitempty"`
}

func (*AutoscalingpolicyYarnConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicyYarnConfig.

func (*AutoscalingpolicyYarnConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterAccelerators

type ClusterAccelerators struct {
	/* The number of the accelerator cards of this type exposed to this instance. */
	// +optional
	AcceleratorCount *int `json:"acceleratorCount,omitempty"`

	/* Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`. */
	// +optional
	AcceleratorType *string `json:"acceleratorType,omitempty"`
}

func (*ClusterAccelerators) DeepCopy

func (in *ClusterAccelerators) DeepCopy() *ClusterAccelerators

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAccelerators.

func (*ClusterAccelerators) DeepCopyInto

func (in *ClusterAccelerators) DeepCopyInto(out *ClusterAccelerators)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterAutoscalingConfig

type ClusterAutoscalingConfig struct {
	/*  */
	// +optional
	PolicyRef *v1alpha1.ResourceRef `json:"policyRef,omitempty"`
}

func (*ClusterAutoscalingConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscalingConfig.

func (*ClusterAutoscalingConfig) DeepCopyInto

func (in *ClusterAutoscalingConfig) DeepCopyInto(out *ClusterAutoscalingConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterConfig

type ClusterConfig struct {
	/* Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. */
	// +optional
	AutoscalingConfig *ClusterAutoscalingConfig `json:"autoscalingConfig,omitempty"`

	/* Optional. Encryption settings for the cluster. */
	// +optional
	EncryptionConfig *ClusterEncryptionConfig `json:"encryptionConfig,omitempty"`

	/* Optional. Port/endpoint configuration for this cluster */
	// +optional
	EndpointConfig *ClusterEndpointConfig `json:"endpointConfig,omitempty"`

	/* Optional. The shared Compute Engine config settings for all instances in a cluster. */
	// +optional
	GceClusterConfig *ClusterGceClusterConfig `json:"gceClusterConfig,omitempty"`

	/* Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi */
	// +optional
	InitializationActions []ClusterInitializationActions `json:"initializationActions,omitempty"`

	/* Optional. Lifecycle setting for the cluster. */
	// +optional
	LifecycleConfig *ClusterLifecycleConfig `json:"lifecycleConfig,omitempty"`

	/* Optional. The Compute Engine config settings for the master instance in a cluster. */
	// +optional
	MasterConfig *ClusterMasterConfig `json:"masterConfig,omitempty"`

	/* Optional. The Compute Engine config settings for the master instance in a cluster. */
	// +optional
	SecondaryWorkerConfig *ClusterSecondaryWorkerConfig `json:"secondaryWorkerConfig,omitempty"`

	/* Optional. Security settings for the cluster. */
	// +optional
	SecurityConfig *ClusterSecurityConfig `json:"securityConfig,omitempty"`

	/* Optional. The config settings for software inside the cluster. */
	// +optional
	SoftwareConfig *ClusterSoftwareConfig `json:"softwareConfig,omitempty"`

	/*  */
	// +optional
	StagingBucketRef *v1alpha1.ResourceRef `json:"stagingBucketRef,omitempty"`

	/*  */
	// +optional
	TempBucketRef *v1alpha1.ResourceRef `json:"tempBucketRef,omitempty"`

	/* Optional. The Compute Engine config settings for the master instance in a cluster. */
	// +optional
	WorkerConfig *ClusterWorkerConfig `json:"workerConfig,omitempty"`
}

func (*ClusterConfig) DeepCopy

func (in *ClusterConfig) DeepCopy() *ClusterConfig

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfig.

func (*ClusterConfig) DeepCopyInto

func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterConfigStatus

type ClusterConfigStatus struct {
	/*  */
	EndpointConfig ClusterEndpointConfigStatus `json:"endpointConfig,omitempty"`

	/*  */
	LifecycleConfig ClusterLifecycleConfigStatus `json:"lifecycleConfig,omitempty"`

	/*  */
	MasterConfig ClusterMasterConfigStatus `json:"masterConfig,omitempty"`

	/*  */
	SecondaryWorkerConfig ClusterSecondaryWorkerConfigStatus `json:"secondaryWorkerConfig,omitempty"`

	/*  */
	WorkerConfig ClusterWorkerConfigStatus `json:"workerConfig,omitempty"`
}

func (*ClusterConfigStatus) DeepCopy

func (in *ClusterConfigStatus) DeepCopy() *ClusterConfigStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigStatus.

func (*ClusterConfigStatus) DeepCopyInto

func (in *ClusterConfigStatus) DeepCopyInto(out *ClusterConfigStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterDiskConfig

type ClusterDiskConfig struct {
	/* Optional. Size in GB of the boot disk (default is 500GB). */
	// +optional
	BootDiskSizeGb *int `json:"bootDiskSizeGb,omitempty"`

	/* Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types). */
	// +optional
	BootDiskType *string `json:"bootDiskType,omitempty"`

	/* Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. */
	// +optional
	NumLocalSsds *int `json:"numLocalSsds,omitempty"`
}

func (*ClusterDiskConfig) DeepCopy

func (in *ClusterDiskConfig) DeepCopy() *ClusterDiskConfig

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDiskConfig.

func (*ClusterDiskConfig) DeepCopyInto

func (in *ClusterDiskConfig) DeepCopyInto(out *ClusterDiskConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterEncryptionConfig

type ClusterEncryptionConfig struct {
	/*  */
	// +optional
	GcePdKmsKeyRef *v1alpha1.ResourceRef `json:"gcePdKmsKeyRef,omitempty"`
}

func (*ClusterEncryptionConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterEncryptionConfig.

func (*ClusterEncryptionConfig) DeepCopyInto

func (in *ClusterEncryptionConfig) DeepCopyInto(out *ClusterEncryptionConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterEndpointConfig

type ClusterEndpointConfig struct {
	/* Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. */
	// +optional
	EnableHttpPortAccess *bool `json:"enableHttpPortAccess,omitempty"`
}

func (*ClusterEndpointConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterEndpointConfig.

func (*ClusterEndpointConfig) DeepCopyInto

func (in *ClusterEndpointConfig) DeepCopyInto(out *ClusterEndpointConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterEndpointConfigStatus

type ClusterEndpointConfigStatus struct {
	/* Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. */
	HttpPorts map[string]string `json:"httpPorts,omitempty"`
}

func (*ClusterEndpointConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterEndpointConfigStatus.

func (*ClusterEndpointConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterGceClusterConfig

type ClusterGceClusterConfig struct {
	/* Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. */
	// +optional
	InternalIPOnly *bool `json:"internalIPOnly,omitempty"`

	/* The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). */
	// +optional
	Metadata map[string]string `json:"metadata,omitempty"`

	/*  */
	// +optional
	NetworkRef *v1alpha1.ResourceRef `json:"networkRef,omitempty"`

	/* Optional. Node Group Affinity for sole-tenant clusters. */
	// +optional
	NodeGroupAffinity *ClusterNodeGroupAffinity `json:"nodeGroupAffinity,omitempty"`

	/* Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL */
	// +optional
	PrivateIPv6GoogleAccess *string `json:"privateIPv6GoogleAccess,omitempty"`

	/* Optional. Reservation Affinity for consuming Zonal reservation. */
	// +optional
	ReservationAffinity *ClusterReservationAffinity `json:"reservationAffinity,omitempty"`

	/*  */
	// +optional
	ServiceAccountRef *v1alpha1.ResourceRef `json:"serviceAccountRef,omitempty"`

	/* Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control */
	// +optional
	ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"`

	/*  */
	// +optional
	SubnetworkRef *v1alpha1.ResourceRef `json:"subnetworkRef,omitempty"`

	/* The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). */
	// +optional
	Tags []string `json:"tags,omitempty"`

	/* Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f` */
	// +optional
	Zone *string `json:"zone,omitempty"`
}

func (*ClusterGceClusterConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterGceClusterConfig.

func (*ClusterGceClusterConfig) DeepCopyInto

func (in *ClusterGceClusterConfig) DeepCopyInto(out *ClusterGceClusterConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterInitializationActions

type ClusterInitializationActions struct {
	/* Required. Cloud Storage URI of executable file. */
	ExecutableFile string `json:"executableFile"`

	/* Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. */
	// +optional
	ExecutionTimeout *string `json:"executionTimeout,omitempty"`
}

func (*ClusterInitializationActions) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitializationActions.

func (*ClusterInitializationActions) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterKerberosConfig

type ClusterKerberosConfig struct {
	/* Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
	// +optional
	CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer,omitempty"`

	/* Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
	// +optional
	CrossRealmTrustKdc *string `json:"crossRealmTrustKdc,omitempty"`

	/* Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. */
	// +optional
	CrossRealmTrustRealm *string `json:"crossRealmTrustRealm,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. */
	// +optional
	CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword,omitempty"`

	/* Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. */
	// +optional
	EnableKerberos *bool `json:"enableKerberos,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. */
	// +optional
	KdcDbKey *string `json:"kdcDbKey,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. */
	// +optional
	KeyPassword *string `json:"keyPassword,omitempty"`

	/* Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
	// +optional
	Keystore *string `json:"keystore,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. */
	// +optional
	KeystorePassword *string `json:"keystorePassword,omitempty"`

	/*  */
	// +optional
	KmsKeyRef *v1alpha1.ResourceRef `json:"kmsKeyRef,omitempty"`

	/* Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. */
	// +optional
	Realm *string `json:"realm,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. */
	// +optional
	RootPrincipalPassword *string `json:"rootPrincipalPassword,omitempty"`

	/* Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. */
	// +optional
	TgtLifetimeHours *int `json:"tgtLifetimeHours,omitempty"`

	/* Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
	// +optional
	Truststore *string `json:"truststore,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. */
	// +optional
	TruststorePassword *string `json:"truststorePassword,omitempty"`
}

func (*ClusterKerberosConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterKerberosConfig.

func (*ClusterKerberosConfig) DeepCopyInto

func (in *ClusterKerberosConfig) DeepCopyInto(out *ClusterKerberosConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterLifecycleConfig

type ClusterLifecycleConfig struct {
	/* Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	// +optional
	AutoDeleteTime *string `json:"autoDeleteTime,omitempty"`

	/* Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	// +optional
	AutoDeleteTtl *string `json:"autoDeleteTtl,omitempty"`

	/* Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	// +optional
	IdleDeleteTtl *string `json:"idleDeleteTtl,omitempty"`
}

func (*ClusterLifecycleConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLifecycleConfig.

func (*ClusterLifecycleConfig) DeepCopyInto

func (in *ClusterLifecycleConfig) DeepCopyInto(out *ClusterLifecycleConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterLifecycleConfigStatus

type ClusterLifecycleConfigStatus struct {
	/* Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	IdleStartTime string `json:"idleStartTime,omitempty"`
}

func (*ClusterLifecycleConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLifecycleConfigStatus.

func (*ClusterLifecycleConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterManagedGroupConfigStatus

type ClusterManagedGroupConfigStatus struct {
	/* Output only. The name of the Instance Group Manager for this group. */
	InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"`

	/* Output only. The name of the Instance Template used for the Managed Instance Group. */
	InstanceTemplateName string `json:"instanceTemplateName,omitempty"`
}

func (*ClusterManagedGroupConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterManagedGroupConfigStatus.

func (*ClusterManagedGroupConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterMasterConfig

type ClusterMasterConfig struct {
	/* Optional. The Compute Engine accelerator configuration for these instances. */
	// +optional
	Accelerators []ClusterAccelerators `json:"accelerators,omitempty"`

	/* Optional. Disk option config settings. */
	// +optional
	DiskConfig *ClusterDiskConfig `json:"diskConfig,omitempty"`

	/*  */
	// +optional
	ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`

	/* Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
	// +optional
	MachineType *string `json:"machineType,omitempty"`

	/* Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
	// +optional
	MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`

	/* Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
	// +optional
	NumInstances *int `json:"numInstances,omitempty"`

	/* Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
	// +optional
	Preemptibility *string `json:"preemptibility,omitempty"`
}

func (*ClusterMasterConfig) DeepCopy

func (in *ClusterMasterConfig) DeepCopy() *ClusterMasterConfig

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterConfig.

func (*ClusterMasterConfig) DeepCopyInto

func (in *ClusterMasterConfig) DeepCopyInto(out *ClusterMasterConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterMasterConfigStatus

type ClusterMasterConfigStatus struct {
	/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
	InstanceNames []string `json:"instanceNames,omitempty"`

	/* Output only. Specifies that this instance group contains preemptible instances. */
	IsPreemptible bool `json:"isPreemptible,omitempty"`

	/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
	ManagedGroupConfig ClusterManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}

func (*ClusterMasterConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterConfigStatus.

func (*ClusterMasterConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterMetricsStatus

type ClusterMetricsStatus struct {
	/* The HDFS metrics. */
	HdfsMetrics map[string]string `json:"hdfsMetrics,omitempty"`

	/* The YARN metrics. */
	YarnMetrics map[string]string `json:"yarnMetrics,omitempty"`
}

func (*ClusterMetricsStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMetricsStatus.

func (*ClusterMetricsStatus) DeepCopyInto

func (in *ClusterMetricsStatus) DeepCopyInto(out *ClusterMetricsStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterNodeGroupAffinity

type ClusterNodeGroupAffinity struct {
	/*  */
	NodeGroupRef v1alpha1.ResourceRef `json:"nodeGroupRef"`
}

func (*ClusterNodeGroupAffinity) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNodeGroupAffinity.

func (*ClusterNodeGroupAffinity) DeepCopyInto

func (in *ClusterNodeGroupAffinity) DeepCopyInto(out *ClusterNodeGroupAffinity)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterReservationAffinity

type ClusterReservationAffinity struct {
	/* Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION */
	// +optional
	ConsumeReservationType *string `json:"consumeReservationType,omitempty"`

	/* Optional. Corresponds to the label key of reservation resource. */
	// +optional
	Key *string `json:"key,omitempty"`

	/* Optional. Corresponds to the label values of reservation resource. */
	// +optional
	Values []string `json:"values,omitempty"`
}

func (*ClusterReservationAffinity) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReservationAffinity.

func (*ClusterReservationAffinity) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterSecondaryWorkerConfig

type ClusterSecondaryWorkerConfig struct {
	/* Optional. The Compute Engine accelerator configuration for these instances. */
	// +optional
	Accelerators []ClusterAccelerators `json:"accelerators,omitempty"`

	/* Optional. Disk option config settings. */
	// +optional
	DiskConfig *ClusterDiskConfig `json:"diskConfig,omitempty"`

	/*  */
	// +optional
	ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`

	/* Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
	// +optional
	MachineType *string `json:"machineType,omitempty"`

	/* Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
	// +optional
	MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`

	/* Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
	// +optional
	NumInstances *int `json:"numInstances,omitempty"`

	/* Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
	// +optional
	Preemptibility *string `json:"preemptibility,omitempty"`
}

func (*ClusterSecondaryWorkerConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecondaryWorkerConfig.

func (*ClusterSecondaryWorkerConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterSecondaryWorkerConfigStatus

type ClusterSecondaryWorkerConfigStatus struct {
	/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
	InstanceNames []string `json:"instanceNames,omitempty"`

	/* Output only. Specifies that this instance group contains preemptible instances. */
	IsPreemptible bool `json:"isPreemptible,omitempty"`

	/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
	ManagedGroupConfig ClusterManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}

func (*ClusterSecondaryWorkerConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecondaryWorkerConfigStatus.

func (*ClusterSecondaryWorkerConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterSecurityConfig

type ClusterSecurityConfig struct {
	/* Optional. Kerberos related configuration. */
	// +optional
	KerberosConfig *ClusterKerberosConfig `json:"kerberosConfig,omitempty"`
}

func (*ClusterSecurityConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecurityConfig.

func (*ClusterSecurityConfig) DeepCopyInto

func (in *ClusterSecurityConfig) DeepCopyInto(out *ClusterSecurityConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterSoftwareConfig

type ClusterSoftwareConfig struct {
	/* Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the ["preview" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. */
	// +optional
	ImageVersion *string `json:"imageVersion,omitempty"`

	/* Optional. The set of components to activate on the cluster. */
	// +optional
	OptionalComponents []string `json:"optionalComponents,omitempty"`

	/* Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`
}

func (*ClusterSoftwareConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSoftwareConfig.

func (*ClusterSoftwareConfig) DeepCopyInto

func (in *ClusterSoftwareConfig) DeepCopyInto(out *ClusterSoftwareConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterStatusHistoryStatus

type ClusterStatusHistoryStatus struct {
	/* Optional. Output only. Details of cluster's state. */
	Detail string `json:"detail,omitempty"`

	/* Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING */
	State string `json:"state,omitempty"`

	/* Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	StateStartTime string `json:"stateStartTime,omitempty"`

	/* Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS */
	Substate string `json:"substate,omitempty"`
}

func (*ClusterStatusHistoryStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatusHistoryStatus.

func (*ClusterStatusHistoryStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterStatusStatus

type ClusterStatusStatus struct {
	/* Optional. Output only. Details of cluster's state. */
	Detail string `json:"detail,omitempty"`

	/* Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING */
	State string `json:"state,omitempty"`

	/* Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	StateStartTime string `json:"stateStartTime,omitempty"`

	/* Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS */
	Substate string `json:"substate,omitempty"`
}

func (*ClusterStatusStatus) DeepCopy

func (in *ClusterStatusStatus) DeepCopy() *ClusterStatusStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatusStatus.

func (*ClusterStatusStatus) DeepCopyInto

func (in *ClusterStatusStatus) DeepCopyInto(out *ClusterStatusStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterWorkerConfig

type ClusterWorkerConfig struct {
	/* Optional. The Compute Engine accelerator configuration for these instances. */
	// +optional
	Accelerators []ClusterAccelerators `json:"accelerators,omitempty"`

	/* Optional. Disk option config settings. */
	// +optional
	DiskConfig *ClusterDiskConfig `json:"diskConfig,omitempty"`

	/*  */
	// +optional
	ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`

	/* Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
	// +optional
	MachineType *string `json:"machineType,omitempty"`

	/* Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
	// +optional
	MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`

	/* Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
	// +optional
	NumInstances *int `json:"numInstances,omitempty"`

	/* Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
	// +optional
	Preemptibility *string `json:"preemptibility,omitempty"`
}

func (*ClusterWorkerConfig) DeepCopy

func (in *ClusterWorkerConfig) DeepCopy() *ClusterWorkerConfig

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkerConfig.

func (*ClusterWorkerConfig) DeepCopyInto

func (in *ClusterWorkerConfig) DeepCopyInto(out *ClusterWorkerConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ClusterWorkerConfigStatus

type ClusterWorkerConfigStatus struct {
	/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
	InstanceNames []string `json:"instanceNames,omitempty"`

	/* Output only. Specifies that this instance group contains preemptible instances. */
	IsPreemptible bool `json:"isPreemptible,omitempty"`

	/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
	ManagedGroupConfig ClusterManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}

func (*ClusterWorkerConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkerConfigStatus.

func (*ClusterWorkerConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DataprocAutoscalingPolicy

type DataprocAutoscalingPolicy struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`

	Spec   DataprocAutoscalingPolicySpec   `json:"spec,omitempty"`
	Status DataprocAutoscalingPolicyStatus `json:"status,omitempty"`
}

DataprocAutoscalingPolicy is the Schema for the dataproc API +k8s:openapi-gen=true

func (*DataprocAutoscalingPolicy) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicy.

func (*DataprocAutoscalingPolicy) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DataprocAutoscalingPolicy) DeepCopyObject

func (in *DataprocAutoscalingPolicy) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DataprocAutoscalingPolicyList

type DataprocAutoscalingPolicyList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []DataprocAutoscalingPolicy `json:"items"`
}

DataprocAutoscalingPolicyList contains a list of DataprocAutoscalingPolicy

func (*DataprocAutoscalingPolicyList) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicyList.

func (*DataprocAutoscalingPolicyList) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DataprocAutoscalingPolicyList) DeepCopyObject

func (in *DataprocAutoscalingPolicyList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DataprocAutoscalingPolicySpec

type DataprocAutoscalingPolicySpec struct {
	/*  */
	BasicAlgorithm AutoscalingpolicyBasicAlgorithm `json:"basicAlgorithm"`

	/* The location for the resource */
	Location string `json:"location"`

	/* The Project that this resource belongs to. */
	// +optional
	ProjectRef *v1alpha1.ResourceRef `json:"projectRef,omitempty"`

	/* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */
	// +optional
	ResourceID *string `json:"resourceID,omitempty"`

	/* Optional. Describes how the autoscaler will operate for secondary workers. */
	// +optional
	SecondaryWorkerConfig *AutoscalingpolicySecondaryWorkerConfig `json:"secondaryWorkerConfig,omitempty"`

	/* Required. Describes how the autoscaler will operate for primary workers. */
	WorkerConfig AutoscalingpolicyWorkerConfig `json:"workerConfig"`
}

func (*DataprocAutoscalingPolicySpec) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicySpec.

func (*DataprocAutoscalingPolicySpec) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DataprocAutoscalingPolicyStatus

type DataprocAutoscalingPolicyStatus struct {
	/* Conditions represent the latest available observations of the
	   DataprocAutoscalingPolicy's current state. */
	Conditions []v1alpha1.Condition `json:"conditions,omitempty"`
	/* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */
	ObservedGeneration int `json:"observedGeneration,omitempty"`
}

func (*DataprocAutoscalingPolicyStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicyStatus.

func (*DataprocAutoscalingPolicyStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DataprocCluster

type DataprocCluster struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`

	Spec   DataprocClusterSpec   `json:"spec,omitempty"`
	Status DataprocClusterStatus `json:"status,omitempty"`
}

DataprocCluster is the Schema for the dataproc API +k8s:openapi-gen=true

func (*DataprocCluster) DeepCopy

func (in *DataprocCluster) DeepCopy() *DataprocCluster

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocCluster.

func (*DataprocCluster) DeepCopyInto

func (in *DataprocCluster) DeepCopyInto(out *DataprocCluster)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DataprocCluster) DeepCopyObject

func (in *DataprocCluster) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DataprocClusterList

type DataprocClusterList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []DataprocCluster `json:"items"`
}

DataprocClusterList contains a list of DataprocCluster

func (*DataprocClusterList) DeepCopy

func (in *DataprocClusterList) DeepCopy() *DataprocClusterList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocClusterList.

func (*DataprocClusterList) DeepCopyInto

func (in *DataprocClusterList) DeepCopyInto(out *DataprocClusterList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DataprocClusterList) DeepCopyObject

func (in *DataprocClusterList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DataprocClusterSpec

type DataprocClusterSpec struct {
	/* Required. The cluster config. Note that Dataproc may set default values, and values may change when clusters are updated. */
	// +optional
	Config *ClusterConfig `json:"config,omitempty"`

	/* The location for the resource, usually a GCP region. */
	Location string `json:"location"`

	/* The Project that this resource belongs to. */
	// +optional
	ProjectRef *v1alpha1.ResourceRef `json:"projectRef,omitempty"`

	/* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */
	// +optional
	ResourceID *string `json:"resourceID,omitempty"`
}

func (*DataprocClusterSpec) DeepCopy

func (in *DataprocClusterSpec) DeepCopy() *DataprocClusterSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocClusterSpec.

func (*DataprocClusterSpec) DeepCopyInto

func (in *DataprocClusterSpec) DeepCopyInto(out *DataprocClusterSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DataprocClusterStatus

type DataprocClusterStatus struct {
	/* Conditions represent the latest available observations of the
	   DataprocCluster's current state. */
	Conditions []v1alpha1.Condition `json:"conditions,omitempty"`
	/* Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster. */
	ClusterUuid string `json:"clusterUuid,omitempty"`
	/*  */
	Config ClusterConfigStatus `json:"config,omitempty"`
	/* Output only. Contains cluster daemon metrics such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release. */
	Metrics ClusterMetricsStatus `json:"metrics,omitempty"`
	/* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */
	ObservedGeneration int `json:"observedGeneration,omitempty"`
	/* Output only. Cluster status. */
	Status ClusterStatusStatus `json:"status,omitempty"`
	/* Output only. The previous cluster status. */
	StatusHistory []ClusterStatusHistoryStatus `json:"statusHistory,omitempty"`
}

func (*DataprocClusterStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocClusterStatus.

func (*DataprocClusterStatus) DeepCopyInto

func (in *DataprocClusterStatus) DeepCopyInto(out *DataprocClusterStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DataprocWorkflowTemplate

type DataprocWorkflowTemplate struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`

	Spec   DataprocWorkflowTemplateSpec   `json:"spec,omitempty"`
	Status DataprocWorkflowTemplateStatus `json:"status,omitempty"`
}

DataprocWorkflowTemplate is the Schema for the dataproc API +k8s:openapi-gen=true

func (*DataprocWorkflowTemplate) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplate.

func (*DataprocWorkflowTemplate) DeepCopyInto

func (in *DataprocWorkflowTemplate) DeepCopyInto(out *DataprocWorkflowTemplate)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DataprocWorkflowTemplate) DeepCopyObject

func (in *DataprocWorkflowTemplate) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DataprocWorkflowTemplateList

type DataprocWorkflowTemplateList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []DataprocWorkflowTemplate `json:"items"`
}

DataprocWorkflowTemplateList contains a list of DataprocWorkflowTemplate

func (*DataprocWorkflowTemplateList) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplateList.

func (*DataprocWorkflowTemplateList) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*DataprocWorkflowTemplateList) DeepCopyObject

func (in *DataprocWorkflowTemplateList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type DataprocWorkflowTemplateSpec

type DataprocWorkflowTemplateSpec struct {
	/* Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted. */
	// +optional
	DagTimeout *string `json:"dagTimeout,omitempty"`

	/* Required. The Directed Acyclic Graph of Jobs to submit. */
	Jobs []WorkflowtemplateJobs `json:"jobs"`

	/* The location for the resource */
	Location string `json:"location"`

	/* Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. */
	// +optional
	Parameters []WorkflowtemplateParameters `json:"parameters,omitempty"`

	/* Required. WorkflowTemplate scheduling information. */
	Placement WorkflowtemplatePlacement `json:"placement"`

	/* The Project that this resource belongs to. */
	// +optional
	ProjectRef *v1alpha1.ResourceRef `json:"projectRef,omitempty"`

	/* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */
	// +optional
	ResourceID *string `json:"resourceID,omitempty"`
}

func (*DataprocWorkflowTemplateSpec) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplateSpec.

func (*DataprocWorkflowTemplateSpec) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type DataprocWorkflowTemplateStatus

type DataprocWorkflowTemplateStatus struct {
	/* Conditions represent the latest available observations of the
	   DataprocWorkflowTemplate's current state. */
	Conditions []v1alpha1.Condition `json:"conditions,omitempty"`
	/* Output only. The time template was created. */
	CreateTime string `json:"createTime,omitempty"`
	/* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */
	ObservedGeneration int `json:"observedGeneration,omitempty"`
	/*  */
	Placement WorkflowtemplatePlacementStatus `json:"placement,omitempty"`
	/* Output only. The time template was last updated. */
	UpdateTime string `json:"updateTime,omitempty"`
	/* Output only. The current version of this workflow template. */
	Version int `json:"version,omitempty"`
}

func (*DataprocWorkflowTemplateStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplateStatus.

func (*DataprocWorkflowTemplateStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateAccelerators

type WorkflowtemplateAccelerators struct {
	/* The number of the accelerator cards of this type exposed to this instance. */
	// +optional
	AcceleratorCount *int `json:"acceleratorCount,omitempty"`

	/* Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`. */
	// +optional
	AcceleratorType *string `json:"acceleratorType,omitempty"`
}

func (*WorkflowtemplateAccelerators) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateAccelerators.

func (*WorkflowtemplateAccelerators) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateAutoscalingConfig

type WorkflowtemplateAutoscalingConfig struct {
	/*  */
	// +optional
	PolicyRef *v1alpha1.ResourceRef `json:"policyRef,omitempty"`
}

func (*WorkflowtemplateAutoscalingConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateAutoscalingConfig.

func (*WorkflowtemplateAutoscalingConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateClusterSelector

type WorkflowtemplateClusterSelector struct {
	/* Required. The cluster labels. Cluster must have all labels to match. */
	ClusterLabels map[string]string `json:"clusterLabels"`

	/* Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used. */
	// +optional
	Zone *string `json:"zone,omitempty"`
}

func (*WorkflowtemplateClusterSelector) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateClusterSelector.

func (*WorkflowtemplateClusterSelector) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateConfig

type WorkflowtemplateConfig struct {
	/* Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. */
	// +optional
	AutoscalingConfig *WorkflowtemplateAutoscalingConfig `json:"autoscalingConfig,omitempty"`

	/* Optional. Encryption settings for the cluster. */
	// +optional
	EncryptionConfig *WorkflowtemplateEncryptionConfig `json:"encryptionConfig,omitempty"`

	/* Optional. Port/endpoint configuration for this cluster */
	// +optional
	EndpointConfig *WorkflowtemplateEndpointConfig `json:"endpointConfig,omitempty"`

	/* Optional. The shared Compute Engine config settings for all instances in a cluster. */
	// +optional
	GceClusterConfig *WorkflowtemplateGceClusterConfig `json:"gceClusterConfig,omitempty"`

	/* Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi */
	// +optional
	InitializationActions []WorkflowtemplateInitializationActions `json:"initializationActions,omitempty"`

	/* Optional. Lifecycle setting for the cluster. */
	// +optional
	LifecycleConfig *WorkflowtemplateLifecycleConfig `json:"lifecycleConfig,omitempty"`

	/* Optional. The Compute Engine config settings for worker instances in a cluster. */
	// +optional
	MasterConfig *WorkflowtemplateMasterConfig `json:"masterConfig,omitempty"`

	/* Optional. The Compute Engine config settings for worker instances in a cluster. */
	// +optional
	SecondaryWorkerConfig *WorkflowtemplateSecondaryWorkerConfig `json:"secondaryWorkerConfig,omitempty"`

	/* Optional. Security settings for the cluster. */
	// +optional
	SecurityConfig *WorkflowtemplateSecurityConfig `json:"securityConfig,omitempty"`

	/* Optional. The config settings for software inside the cluster. */
	// +optional
	SoftwareConfig *WorkflowtemplateSoftwareConfig `json:"softwareConfig,omitempty"`

	/*  */
	// +optional
	StagingBucketRef *v1alpha1.ResourceRef `json:"stagingBucketRef,omitempty"`

	/*  */
	// +optional
	TempBucketRef *v1alpha1.ResourceRef `json:"tempBucketRef,omitempty"`

	/* Optional. The Compute Engine config settings for worker instances in a cluster. */
	// +optional
	WorkerConfig *WorkflowtemplateWorkerConfig `json:"workerConfig,omitempty"`
}

func (*WorkflowtemplateConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateConfig.

func (*WorkflowtemplateConfig) DeepCopyInto

func (in *WorkflowtemplateConfig) DeepCopyInto(out *WorkflowtemplateConfig)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateConfigStatus

type WorkflowtemplateConfigStatus struct {
	/*  */
	EndpointConfig WorkflowtemplateEndpointConfigStatus `json:"endpointConfig,omitempty"`

	/*  */
	LifecycleConfig WorkflowtemplateLifecycleConfigStatus `json:"lifecycleConfig,omitempty"`

	/*  */
	MasterConfig WorkflowtemplateMasterConfigStatus `json:"masterConfig,omitempty"`

	/*  */
	SecondaryWorkerConfig WorkflowtemplateSecondaryWorkerConfigStatus `json:"secondaryWorkerConfig,omitempty"`

	/*  */
	WorkerConfig WorkflowtemplateWorkerConfigStatus `json:"workerConfig,omitempty"`
}

func (*WorkflowtemplateConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateConfigStatus.

func (*WorkflowtemplateConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateDiskConfig

type WorkflowtemplateDiskConfig struct {
	/* Optional. Size in GB of the boot disk (default is 500GB). */
	// +optional
	BootDiskSizeGb *int `json:"bootDiskSizeGb,omitempty"`

	/* Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types). */
	// +optional
	BootDiskType *string `json:"bootDiskType,omitempty"`

	/* Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. */
	// +optional
	NumLocalSsds *int `json:"numLocalSsds,omitempty"`
}

func (*WorkflowtemplateDiskConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateDiskConfig.

func (*WorkflowtemplateDiskConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateEncryptionConfig

type WorkflowtemplateEncryptionConfig struct {
	/*  */
	// +optional
	GcePdKmsKeyRef *v1alpha1.ResourceRef `json:"gcePdKmsKeyRef,omitempty"`
}

func (*WorkflowtemplateEncryptionConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateEncryptionConfig.

func (*WorkflowtemplateEncryptionConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateEndpointConfig

type WorkflowtemplateEndpointConfig struct {
	/* Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. */
	// +optional
	EnableHttpPortAccess *bool `json:"enableHttpPortAccess,omitempty"`
}

func (*WorkflowtemplateEndpointConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateEndpointConfig.

func (*WorkflowtemplateEndpointConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateEndpointConfigStatus

type WorkflowtemplateEndpointConfigStatus struct {
	/* Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. */
	HttpPorts map[string]string `json:"httpPorts,omitempty"`
}

func (*WorkflowtemplateEndpointConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateEndpointConfigStatus.

func (*WorkflowtemplateEndpointConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateGceClusterConfig

type WorkflowtemplateGceClusterConfig struct {
	/* Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. */
	// +optional
	InternalIPOnly *bool `json:"internalIPOnly,omitempty"`

	/* The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). */
	// +optional
	Metadata map[string]string `json:"metadata,omitempty"`

	/*  */
	// +optional
	NetworkRef *v1alpha1.ResourceRef `json:"networkRef,omitempty"`

	/* Optional. Node Group Affinity for sole-tenant clusters. */
	// +optional
	NodeGroupAffinity *WorkflowtemplateNodeGroupAffinity `json:"nodeGroupAffinity,omitempty"`

	/* Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL */
	// +optional
	PrivateIPv6GoogleAccess *string `json:"privateIPv6GoogleAccess,omitempty"`

	/* Optional. Reservation Affinity for consuming Zonal reservation. */
	// +optional
	ReservationAffinity *WorkflowtemplateReservationAffinity `json:"reservationAffinity,omitempty"`

	/*  */
	// +optional
	ServiceAccountRef *v1alpha1.ResourceRef `json:"serviceAccountRef,omitempty"`

	/* Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control */
	// +optional
	ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"`

	/*  */
	// +optional
	SubnetworkRef *v1alpha1.ResourceRef `json:"subnetworkRef,omitempty"`

	/* The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). */
	// +optional
	Tags []string `json:"tags,omitempty"`

	/* Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f` */
	// +optional
	Zone *string `json:"zone,omitempty"`
}

func (*WorkflowtemplateGceClusterConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateGceClusterConfig.

func (*WorkflowtemplateGceClusterConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateHadoopJob

type WorkflowtemplateHadoopJob struct {
	/* Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. */
	// +optional
	ArchiveUris []string `json:"archiveUris,omitempty"`

	/* Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
	// +optional
	Args []string `json:"args,omitempty"`

	/* Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. */
	// +optional
	FileUris []string `json:"fileUris,omitempty"`

	/* Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. */
	// +optional
	JarFileUris []string `json:"jarFileUris,omitempty"`

	/* Optional. The runtime log config for job execution. */
	// +optional
	LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`

	/* The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`. */
	// +optional
	MainClass *string `json:"mainClass,omitempty"`

	/* The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' */
	// +optional
	MainJarFileUri *string `json:"mainJarFileUri,omitempty"`

	/* Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`
}

func (*WorkflowtemplateHadoopJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateHadoopJob.

func (*WorkflowtemplateHadoopJob) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateHiveJob

type WorkflowtemplateHiveJob struct {
	/* Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */
	// +optional
	ContinueOnFailure *bool `json:"continueOnFailure,omitempty"`

	/* Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. */
	// +optional
	JarFileUris []string `json:"jarFileUris,omitempty"`

	/* Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`

	/* The HCFS URI of the script that contains Hive queries. */
	// +optional
	QueryFileUri *string `json:"queryFileUri,omitempty"`

	/* A list of queries. */
	// +optional
	QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`

	/* Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`). */
	// +optional
	ScriptVariables map[string]string `json:"scriptVariables,omitempty"`
}

func (*WorkflowtemplateHiveJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateHiveJob.

func (*WorkflowtemplateHiveJob) DeepCopyInto

func (in *WorkflowtemplateHiveJob) DeepCopyInto(out *WorkflowtemplateHiveJob)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateInitializationActions

type WorkflowtemplateInitializationActions struct {
	/* Required. Cloud Storage URI of executable file. */
	// +optional
	ExecutableFile *string `json:"executableFile,omitempty"`

	/* Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. */
	// +optional
	ExecutionTimeout *string `json:"executionTimeout,omitempty"`
}

func (*WorkflowtemplateInitializationActions) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateInitializationActions.

func (*WorkflowtemplateInitializationActions) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateJobs

type WorkflowtemplateJobs struct {
	/* Optional. Job is a Hadoop job. */
	// +optional
	HadoopJob *WorkflowtemplateHadoopJob `json:"hadoopJob,omitempty"`

	/* Optional. Job is a Hive job. */
	// +optional
	HiveJob *WorkflowtemplateHiveJob `json:"hiveJob,omitempty"`

	/* Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job. */
	// +optional
	Labels map[string]string `json:"labels,omitempty"`

	/* Optional. Job is a Pig job. */
	// +optional
	PigJob *WorkflowtemplatePigJob `json:"pigJob,omitempty"`

	/* Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. */
	// +optional
	PrerequisiteStepIds []string `json:"prerequisiteStepIds,omitempty"`

	/* Optional. Job is a Presto job. */
	// +optional
	PrestoJob *WorkflowtemplatePrestoJob `json:"prestoJob,omitempty"`

	/* Optional. Job is a PySpark job. */
	// +optional
	PysparkJob *WorkflowtemplatePysparkJob `json:"pysparkJob,omitempty"`

	/* Optional. Job scheduling configuration. */
	// +optional
	Scheduling *WorkflowtemplateScheduling `json:"scheduling,omitempty"`

	/* Optional. Job is a Spark job. */
	// +optional
	SparkJob *WorkflowtemplateSparkJob `json:"sparkJob,omitempty"`

	/* Optional. Job is a SparkR job. */
	// +optional
	SparkRJob *WorkflowtemplateSparkRJob `json:"sparkRJob,omitempty"`

	/* Optional. Job is a SparkSql job. */
	// +optional
	SparkSqlJob *WorkflowtemplateSparkSqlJob `json:"sparkSqlJob,omitempty"`

	/* Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. */
	StepId string `json:"stepId"`
}

func (*WorkflowtemplateJobs) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateJobs.

func (*WorkflowtemplateJobs) DeepCopyInto

func (in *WorkflowtemplateJobs) DeepCopyInto(out *WorkflowtemplateJobs)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateKerberosConfig

type WorkflowtemplateKerberosConfig struct {
	/* Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
	// +optional
	CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer,omitempty"`

	/* Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
	// +optional
	CrossRealmTrustKdc *string `json:"crossRealmTrustKdc,omitempty"`

	/* Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. */
	// +optional
	CrossRealmTrustRealm *string `json:"crossRealmTrustRealm,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. */
	// +optional
	CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword,omitempty"`

	/* Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. */
	// +optional
	EnableKerberos *bool `json:"enableKerberos,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. */
	// +optional
	KdcDbKey *string `json:"kdcDbKey,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. */
	// +optional
	KeyPassword *string `json:"keyPassword,omitempty"`

	/* Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
	// +optional
	Keystore *string `json:"keystore,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. */
	// +optional
	KeystorePassword *string `json:"keystorePassword,omitempty"`

	/*  */
	// +optional
	KmsKeyRef *v1alpha1.ResourceRef `json:"kmsKeyRef,omitempty"`

	/* Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. */
	// +optional
	Realm *string `json:"realm,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. */
	// +optional
	RootPrincipalPassword *string `json:"rootPrincipalPassword,omitempty"`

	/* Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. */
	// +optional
	TgtLifetimeHours *int `json:"tgtLifetimeHours,omitempty"`

	/* Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
	// +optional
	Truststore *string `json:"truststore,omitempty"`

	/* Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. */
	// +optional
	TruststorePassword *string `json:"truststorePassword,omitempty"`
}

func (*WorkflowtemplateKerberosConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateKerberosConfig.

func (*WorkflowtemplateKerberosConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateLifecycleConfig

type WorkflowtemplateLifecycleConfig struct {
	/* Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	// +optional
	AutoDeleteTime *string `json:"autoDeleteTime,omitempty"`

	/* Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	// +optional
	AutoDeleteTtl *string `json:"autoDeleteTtl,omitempty"`

	/* Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	// +optional
	IdleDeleteTtl *string `json:"idleDeleteTtl,omitempty"`
}

func (*WorkflowtemplateLifecycleConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateLifecycleConfig.

func (*WorkflowtemplateLifecycleConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateLifecycleConfigStatus

type WorkflowtemplateLifecycleConfigStatus struct {
	/* Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
	IdleStartTime string `json:"idleStartTime,omitempty"`
}

func (*WorkflowtemplateLifecycleConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateLifecycleConfigStatus.

func (*WorkflowtemplateLifecycleConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateLoggingConfig

type WorkflowtemplateLoggingConfig struct {
	/* The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */
	// +optional
	DriverLogLevels map[string]string `json:"driverLogLevels,omitempty"`
}

func (*WorkflowtemplateLoggingConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateLoggingConfig.

func (*WorkflowtemplateLoggingConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateManagedCluster

type WorkflowtemplateManagedCluster struct {
	/* Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. */
	ClusterName string `json:"clusterName"`

	/* Required. The cluster configuration. */
	Config WorkflowtemplateConfig `json:"config"`

	/* Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster. */
	// +optional
	Labels map[string]string `json:"labels,omitempty"`
}

func (*WorkflowtemplateManagedCluster) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateManagedCluster.

func (*WorkflowtemplateManagedCluster) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateManagedClusterStatus

type WorkflowtemplateManagedClusterStatus struct {
	/*  */
	Config WorkflowtemplateConfigStatus `json:"config,omitempty"`
}

func (*WorkflowtemplateManagedClusterStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateManagedClusterStatus.

func (*WorkflowtemplateManagedClusterStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateManagedGroupConfigStatus

type WorkflowtemplateManagedGroupConfigStatus struct {
	/* Output only. The name of the Instance Group Manager for this group. */
	InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"`

	/* Output only. The name of the Instance Template used for the Managed Instance Group. */
	InstanceTemplateName string `json:"instanceTemplateName,omitempty"`
}

func (*WorkflowtemplateManagedGroupConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateManagedGroupConfigStatus.

func (*WorkflowtemplateManagedGroupConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateMasterConfig

type WorkflowtemplateMasterConfig struct {
	/* Optional. The Compute Engine accelerator configuration for these instances. */
	// +optional
	Accelerators []WorkflowtemplateAccelerators `json:"accelerators,omitempty"`

	/* Optional. Disk option config settings. */
	// +optional
	DiskConfig *WorkflowtemplateDiskConfig `json:"diskConfig,omitempty"`

	/*  */
	// +optional
	ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`

	/* Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
	// +optional
	MachineType *string `json:"machineType,omitempty"`

	/* Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
	// +optional
	MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`

	/* Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
	// +optional
	NumInstances *int `json:"numInstances,omitempty"`

	/* Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
	// +optional
	Preemptibility *string `json:"preemptibility,omitempty"`
}

func (*WorkflowtemplateMasterConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateMasterConfig.

func (*WorkflowtemplateMasterConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateMasterConfigStatus

type WorkflowtemplateMasterConfigStatus struct {
	/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
	InstanceNames []string `json:"instanceNames,omitempty"`

	/* Output only. Specifies that this instance group contains preemptible instances. */
	IsPreemptible bool `json:"isPreemptible,omitempty"`

	/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
	ManagedGroupConfig WorkflowtemplateManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}

func (*WorkflowtemplateMasterConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateMasterConfigStatus.

func (*WorkflowtemplateMasterConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateNodeGroupAffinity

type WorkflowtemplateNodeGroupAffinity struct {
	/*  */
	NodeGroupRef v1alpha1.ResourceRef `json:"nodeGroupRef"`
}

func (*WorkflowtemplateNodeGroupAffinity) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateNodeGroupAffinity.

func (*WorkflowtemplateNodeGroupAffinity) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateParameters

type WorkflowtemplateParameters struct {
	/* Optional. Brief description of the parameter. Must not exceed 1024 characters. */
	// +optional
	Description *string `json:"description,omitempty"`

	/* Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args */
	Fields []string `json:"fields"`

	/* Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. */
	Name string `json:"name"`

	/* Optional. Validation rules to be applied to this parameter's value. */
	// +optional
	Validation *WorkflowtemplateValidation `json:"validation,omitempty"`
}

func (*WorkflowtemplateParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateParameters.

func (*WorkflowtemplateParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplatePigJob

type WorkflowtemplatePigJob struct {
	/* Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */
	// +optional
	ContinueOnFailure *bool `json:"continueOnFailure,omitempty"`

	/* Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. */
	// +optional
	JarFileUris []string `json:"jarFileUris,omitempty"`

	/* Optional. The runtime log config for job execution. */
	// +optional
	LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`

	/* Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`

	/* The HCFS URI of the script that contains the Pig queries. */
	// +optional
	QueryFileUri *string `json:"queryFileUri,omitempty"`

	/* A list of queries. */
	// +optional
	QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`

	/* Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`). */
	// +optional
	ScriptVariables map[string]string `json:"scriptVariables,omitempty"`
}

func (*WorkflowtemplatePigJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePigJob.

func (*WorkflowtemplatePigJob) DeepCopyInto

func (in *WorkflowtemplatePigJob) DeepCopyInto(out *WorkflowtemplatePigJob)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplatePlacement

type WorkflowtemplatePlacement struct {
	/* Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted. */
	// +optional
	ClusterSelector *WorkflowtemplateClusterSelector `json:"clusterSelector,omitempty"`

	/* A cluster that is managed by the workflow. */
	// +optional
	ManagedCluster *WorkflowtemplateManagedCluster `json:"managedCluster,omitempty"`
}

func (*WorkflowtemplatePlacement) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePlacement.

func (*WorkflowtemplatePlacement) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplatePlacementStatus

type WorkflowtemplatePlacementStatus struct {
	/*  */
	ManagedCluster WorkflowtemplateManagedClusterStatus `json:"managedCluster,omitempty"`
}

func (*WorkflowtemplatePlacementStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePlacementStatus.

func (*WorkflowtemplatePlacementStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplatePrestoJob

type WorkflowtemplatePrestoJob struct {
	/* Optional. Presto client tags to attach to this query */
	// +optional
	ClientTags []string `json:"clientTags,omitempty"`

	/* Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */
	// +optional
	ContinueOnFailure *bool `json:"continueOnFailure,omitempty"`

	/* Optional. The runtime log config for job execution. */
	// +optional
	LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`

	/* Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats */
	// +optional
	OutputFormat *string `json:"outputFormat,omitempty"`

	/* Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`

	/* The HCFS URI of the script that contains SQL queries. */
	// +optional
	QueryFileUri *string `json:"queryFileUri,omitempty"`

	/* A list of queries. */
	// +optional
	QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`
}

func (*WorkflowtemplatePrestoJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePrestoJob.

func (*WorkflowtemplatePrestoJob) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplatePysparkJob

type WorkflowtemplatePysparkJob struct {
	/* Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
	// +optional
	ArchiveUris []string `json:"archiveUris,omitempty"`

	/* Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
	// +optional
	Args []string `json:"args,omitempty"`

	/* Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */
	// +optional
	FileUris []string `json:"fileUris,omitempty"`

	/* Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. */
	// +optional
	JarFileUris []string `json:"jarFileUris,omitempty"`

	/* Optional. The runtime log config for job execution. */
	// +optional
	LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`

	/* Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. */
	MainPythonFileUri string `json:"mainPythonFileUri"`

	/* Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`

	/* Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. */
	// +optional
	PythonFileUris []string `json:"pythonFileUris,omitempty"`
}

func (*WorkflowtemplatePysparkJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePysparkJob.

func (*WorkflowtemplatePysparkJob) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateQueryList

type WorkflowtemplateQueryList struct {
	/* Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } */
	Queries []string `json:"queries"`
}

func (*WorkflowtemplateQueryList) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateQueryList.

func (*WorkflowtemplateQueryList) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateRegex

type WorkflowtemplateRegex struct {
	/* Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). */
	Regexes []string `json:"regexes"`
}

func (*WorkflowtemplateRegex) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateRegex.

func (*WorkflowtemplateRegex) DeepCopyInto

func (in *WorkflowtemplateRegex) DeepCopyInto(out *WorkflowtemplateRegex)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateReservationAffinity

type WorkflowtemplateReservationAffinity struct {
	/* Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION */
	// +optional
	ConsumeReservationType *string `json:"consumeReservationType,omitempty"`

	/* Optional. Corresponds to the label key of reservation resource. */
	// +optional
	Key *string `json:"key,omitempty"`

	/* Optional. Corresponds to the label values of reservation resource. */
	// +optional
	Values []WorkflowtemplateValues `json:"values,omitempty"`
}

func (*WorkflowtemplateReservationAffinity) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateReservationAffinity.

func (*WorkflowtemplateReservationAffinity) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateScheduling

type WorkflowtemplateScheduling struct {
	/* Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10. */
	// +optional
	MaxFailuresPerHour *int `json:"maxFailuresPerHour,omitempty"`

	/* Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240. */
	// +optional
	MaxFailuresTotal *int `json:"maxFailuresTotal,omitempty"`
}

func (*WorkflowtemplateScheduling) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateScheduling.

func (*WorkflowtemplateScheduling) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateSecondaryWorkerConfig

type WorkflowtemplateSecondaryWorkerConfig struct {
	/* Optional. The Compute Engine accelerator configuration for these instances. */
	// +optional
	Accelerators []WorkflowtemplateAccelerators `json:"accelerators,omitempty"`

	/* Optional. Disk option config settings. */
	// +optional
	DiskConfig *WorkflowtemplateDiskConfig `json:"diskConfig,omitempty"`

	/*  */
	// +optional
	ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`

	/* Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
	// +optional
	MachineType *string `json:"machineType,omitempty"`

	/* Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
	// +optional
	MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`

	/* Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
	// +optional
	NumInstances *int `json:"numInstances,omitempty"`

	/* Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
	// +optional
	Preemptibility *string `json:"preemptibility,omitempty"`
}

func (*WorkflowtemplateSecondaryWorkerConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSecondaryWorkerConfig.

func (*WorkflowtemplateSecondaryWorkerConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateSecondaryWorkerConfigStatus

type WorkflowtemplateSecondaryWorkerConfigStatus struct {
	/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
	InstanceNames []string `json:"instanceNames,omitempty"`

	/* Output only. Specifies that this instance group contains preemptible instances. */
	IsPreemptible bool `json:"isPreemptible,omitempty"`

	/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
	ManagedGroupConfig WorkflowtemplateManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}

func (*WorkflowtemplateSecondaryWorkerConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSecondaryWorkerConfigStatus.

func (*WorkflowtemplateSecondaryWorkerConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateSecurityConfig

type WorkflowtemplateSecurityConfig struct {
	/* Optional. Kerberos related configuration. */
	// +optional
	KerberosConfig *WorkflowtemplateKerberosConfig `json:"kerberosConfig,omitempty"`
}

func (*WorkflowtemplateSecurityConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSecurityConfig.

func (*WorkflowtemplateSecurityConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateSoftwareConfig

type WorkflowtemplateSoftwareConfig struct {
	/* Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the ["preview" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. */
	// +optional
	ImageVersion *string `json:"imageVersion,omitempty"`

	/* Optional. The set of components to activate on the cluster. */
	// +optional
	OptionalComponents []string `json:"optionalComponents,omitempty"`

	/* Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`
}

func (*WorkflowtemplateSoftwareConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSoftwareConfig.

func (*WorkflowtemplateSoftwareConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateSparkJob

type WorkflowtemplateSparkJob struct {
	/* Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
	// +optional
	ArchiveUris []string `json:"archiveUris,omitempty"`

	/* Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
	// +optional
	Args []string `json:"args,omitempty"`

	/* Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */
	// +optional
	FileUris []string `json:"fileUris,omitempty"`

	/* Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. */
	// +optional
	JarFileUris []string `json:"jarFileUris,omitempty"`

	/* Optional. The runtime log config for job execution. */
	// +optional
	LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`

	/* The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`. */
	// +optional
	MainClass *string `json:"mainClass,omitempty"`

	/* The HCFS URI of the jar file that contains the main class. */
	// +optional
	MainJarFileUri *string `json:"mainJarFileUri,omitempty"`

	/* Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`
}

func (*WorkflowtemplateSparkJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSparkJob.

func (*WorkflowtemplateSparkJob) DeepCopyInto

func (in *WorkflowtemplateSparkJob) DeepCopyInto(out *WorkflowtemplateSparkJob)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateSparkRJob

type WorkflowtemplateSparkRJob struct {
	/* Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
	// +optional
	ArchiveUris []string `json:"archiveUris,omitempty"`

	/* Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
	// +optional
	Args []string `json:"args,omitempty"`

	/* Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */
	// +optional
	FileUris []string `json:"fileUris,omitempty"`

	/* Optional. The runtime log config for job execution. */
	// +optional
	LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`

	/* Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. */
	MainRFileUri string `json:"mainRFileUri"`

	/* Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`
}

func (*WorkflowtemplateSparkRJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSparkRJob.

func (*WorkflowtemplateSparkRJob) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateSparkSqlJob

type WorkflowtemplateSparkSqlJob struct {
	/* Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. */
	// +optional
	JarFileUris []string `json:"jarFileUris,omitempty"`

	/* Optional. The runtime log config for job execution. */
	// +optional
	LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`

	/* Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. */
	// +optional
	Properties map[string]string `json:"properties,omitempty"`

	/* The HCFS URI of the script that contains SQL queries. */
	// +optional
	QueryFileUri *string `json:"queryFileUri,omitempty"`

	/* A list of queries. */
	// +optional
	QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`

	/* Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`). */
	// +optional
	ScriptVariables map[string]string `json:"scriptVariables,omitempty"`
}

func (*WorkflowtemplateSparkSqlJob) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSparkSqlJob.

func (*WorkflowtemplateSparkSqlJob) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateValidation

type WorkflowtemplateValidation struct {
	/* Validation based on regular expressions. */
	// +optional
	Regex *WorkflowtemplateRegex `json:"regex,omitempty"`

	/* Validation based on a list of allowed values. */
	// +optional
	Values *WorkflowtemplateValues `json:"values,omitempty"`
}

func (*WorkflowtemplateValidation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateValidation.

func (*WorkflowtemplateValidation) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateValues

type WorkflowtemplateValues struct {
	/* Required. List of allowed values for the parameter. */
	Values []WorkflowtemplateValues `json:"values"`
}

func (*WorkflowtemplateValues) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateValues.

func (*WorkflowtemplateValues) DeepCopyInto

func (in *WorkflowtemplateValues) DeepCopyInto(out *WorkflowtemplateValues)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateWorkerConfig

type WorkflowtemplateWorkerConfig struct {
	/* Optional. The Compute Engine accelerator configuration for these instances. */
	// +optional
	Accelerators []WorkflowtemplateAccelerators `json:"accelerators,omitempty"`

	/* Optional. Disk option config settings. */
	// +optional
	DiskConfig *WorkflowtemplateDiskConfig `json:"diskConfig,omitempty"`

	/*  */
	// +optional
	ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`

	/* Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
	// +optional
	MachineType *string `json:"machineType,omitempty"`

	/* Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
	// +optional
	MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`

	/* Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
	// +optional
	NumInstances *int `json:"numInstances,omitempty"`

	/* Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
	// +optional
	Preemptibility *string `json:"preemptibility,omitempty"`
}

func (*WorkflowtemplateWorkerConfig) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateWorkerConfig.

func (*WorkflowtemplateWorkerConfig) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type WorkflowtemplateWorkerConfigStatus

type WorkflowtemplateWorkerConfigStatus struct {
	/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
	InstanceNames []string `json:"instanceNames,omitempty"`

	/* Output only. Specifies that this instance group contains preemptible instances. */
	IsPreemptible bool `json:"isPreemptible,omitempty"`

	/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
	ManagedGroupConfig WorkflowtemplateManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}

func (*WorkflowtemplateWorkerConfigStatus) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateWorkerConfigStatus.

func (*WorkflowtemplateWorkerConfigStatus) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL