dataflow

package
v6.61.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 25, 2023 License: Apache-2.0 Imports: 7 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type FlexTemplateJob

type FlexTemplateJob struct {
	pulumi.CustomResourceState

	// List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].
	AdditionalExperiments pulumi.StringArrayOutput `pulumi:"additionalExperiments"`
	// The algorithm to use for autoscaling
	AutoscalingAlgorithm pulumi.StringPtrOutput `pulumi:"autoscalingAlgorithm"`
	// The GCS path to the Dataflow job Flex
	// Template.
	//
	// ***
	ContainerSpecGcsPath pulumi.StringOutput `pulumi:"containerSpecGcsPath"`
	// Indicates if the job should use the streaming engine feature.
	EnableStreamingEngine pulumi.BoolPtrOutput `pulumi:"enableStreamingEngine"`
	// The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".
	IpConfiguration pulumi.StringPtrOutput `pulumi:"ipConfiguration"`
	// The unique ID of this job.
	JobId pulumi.StringOutput `pulumi:"jobId"`
	// The name for the Cloud KMS key for the job. Key format is:
	// projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
	KmsKeyName pulumi.StringPtrOutput `pulumi:"kmsKeyName"`
	// User labels to be specified for the job. Keys and values
	// should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions)
	// page. **Note**: This field is marked as deprecated as the API does not currently
	// support adding labels.
	// **NOTE**: Google-provided Dataflow templates often provide default labels
	// that begin with `goog-dataflow-provided`. Unless explicitly set in config, these
	// labels will be ignored to prevent diffs on re-apply.
	Labels pulumi.MapOutput `pulumi:"labels"`
	// The machine type to use for launching the job. The default is n1-standard-1.
	LauncherMachineType pulumi.StringPtrOutput `pulumi:"launcherMachineType"`
	// The machine type to use for the job.
	MachineType pulumi.StringPtrOutput `pulumi:"machineType"`
	// The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to
	// 1000.
	MaxWorkers pulumi.IntPtrOutput `pulumi:"maxWorkers"`
	// A unique name for the resource, required by Dataflow.
	Name pulumi.StringOutput `pulumi:"name"`
	// The network to which VMs will be assigned. If it is not provided, "default" will be used.
	Network pulumi.StringPtrOutput `pulumi:"network"`
	// The initial number of Google Compute Engine instances for the job.
	NumWorkers pulumi.IntPtrOutput `pulumi:"numWorkers"`
	// One of "drain" or "cancel". Specifies behavior of
	// deletion during `pulumi destroy`.  See above note.
	OnDelete pulumi.StringPtrOutput `pulumi:"onDelete"`
	// Key/Value pairs to be passed to the Dataflow job (as
	// used in the template). Additional [pipeline options](https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options)
	// such as `serviceAccount`, `workerMachineType`, etc can be specified here.
	Parameters pulumi.MapOutput `pulumi:"parameters"`
	// The project in which the resource belongs. If it is not
	// provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the created job should run.
	Region pulumi.StringOutput `pulumi:"region"`
	// Docker registry location of container image to use for the 'worker harness. Default is the container for the version of
	// the SDK. Note this field is only valid for portable pipelines.
	SdkContainerImage pulumi.StringPtrOutput `pulumi:"sdkContainerImage"`
	// The Service Account email used to create the job.
	ServiceAccountEmail pulumi.StringOutput `pulumi:"serviceAccountEmail"`
	// If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from
	// terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are
	// different, e.g. by embedding a release ID or by using a random_id.
	SkipWaitOnJobTermination pulumi.BoolPtrOutput `pulumi:"skipWaitOnJobTermination"`
	// The Cloud Storage path to use for staging files. Must be a valid Cloud Storage URL, beginning with gs://.
	StagingLocation pulumi.StringOutput `pulumi:"stagingLocation"`
	// The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState)
	State pulumi.StringOutput `pulumi:"state"`
	// The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".
	Subnetwork pulumi.StringPtrOutput `pulumi:"subnetwork"`
	// The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
	TempLocation pulumi.StringOutput `pulumi:"tempLocation"`
	// Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the
	// corresponding name prefixes of the new job.
	TransformNameMapping pulumi.MapOutput `pulumi:"transformNameMapping"`
	// The type of this job, selected from the JobType enum.
	Type pulumi.StringOutput `pulumi:"type"`
}

Creates a [Flex Template](https://cloud.google.com/dataflow/docs/guides/templates/using-flex-templates) job on Dataflow, which is an implementation of Apache Beam running on Google Compute Engine. For more information see the official documentation for [Beam](https://beam.apache.org) and [Dataflow](https://cloud.google.com/dataflow/).

## Example Usage

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataflow"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataflow.NewFlexTemplateJob(ctx, "bigDataJob", &dataflow.FlexTemplateJobArgs{
			ContainerSpecGcsPath: pulumi.String("gs://my-bucket/templates/template.json"),
			Parameters: pulumi.AnyMap{
				"inputSubscription": pulumi.Any("messages"),
			},
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		return nil
	})
}

``` ## Note on "destroy" / "apply"

There are many types of Dataflow jobs. Some Dataflow jobs run constantly, getting new data from (e.g.) a GCS bucket, and outputting data continuously. Some jobs process a set amount of data then terminate. All jobs can fail while running due to programming errors or other issues. In this way, Dataflow jobs are different from most other provider / Google resources.

The Dataflow resource is considered 'existing' while it is in a nonterminal state. If it reaches a terminal state (e.g. 'FAILED', 'COMPLETE', 'CANCELLED'), it will be recreated on the next 'apply'. This is as expected for jobs which run continuously, but may surprise users who use this resource for other kinds of Dataflow jobs.

A Dataflow job which is 'destroyed' may be "cancelled" or "drained". If "cancelled", the job terminates - any data written remains where it is, but no new data will be processed. If "drained", no new data will enter the pipeline, but any data currently in the pipeline will finish being processed. The default is "cancelled", but if a user sets `onDelete` to `"drain"` in the configuration, you may experience a long wait for your `pulumi destroy` to complete.

You can potentially short-circuit the wait by setting `skipWaitOnJobTermination` to `true`, but beware that unless you take active steps to ensure that the job `name` parameter changes between instances, the name will conflict and the launch of the new job will fail. One way to do this is with a randomId resource, for example:

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataflow"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		bigDataJobSubscriptionId := "projects/myproject/subscriptions/messages"
		if param := cfg.Get("bigDataJobSubscriptionId"); param != "" {
			bigDataJobSubscriptionId = param
		}
		_, err := random.NewRandomId(ctx, "bigDataJobNameSuffix", &random.RandomIdArgs{
			ByteLength: pulumi.Int(4),
			Keepers: pulumi.AnyMap{
				"region":          pulumi.Any(_var.Region),
				"subscription_id": pulumi.String(bigDataJobSubscriptionId),
			},
		})
		if err != nil {
			return err
		}
		_, err = dataflow.NewFlexTemplateJob(ctx, "bigDataJob", &dataflow.FlexTemplateJobArgs{
			Region:                   pulumi.Any(_var.Region),
			ContainerSpecGcsPath:     pulumi.String("gs://my-bucket/templates/template.json"),
			SkipWaitOnJobTermination: pulumi.Bool(true),
			Parameters: pulumi.AnyMap{
				"inputSubscription": pulumi.String(bigDataJobSubscriptionId),
			},
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

This resource does not support import.

func GetFlexTemplateJob

func GetFlexTemplateJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *FlexTemplateJobState, opts ...pulumi.ResourceOption) (*FlexTemplateJob, error)

GetFlexTemplateJob gets an existing FlexTemplateJob resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewFlexTemplateJob

func NewFlexTemplateJob(ctx *pulumi.Context,
	name string, args *FlexTemplateJobArgs, opts ...pulumi.ResourceOption) (*FlexTemplateJob, error)

NewFlexTemplateJob registers a new resource with the given unique name, arguments, and options.

func (*FlexTemplateJob) ElementType

func (*FlexTemplateJob) ElementType() reflect.Type

func (*FlexTemplateJob) ToFlexTemplateJobOutput

func (i *FlexTemplateJob) ToFlexTemplateJobOutput() FlexTemplateJobOutput

func (*FlexTemplateJob) ToFlexTemplateJobOutputWithContext

func (i *FlexTemplateJob) ToFlexTemplateJobOutputWithContext(ctx context.Context) FlexTemplateJobOutput

type FlexTemplateJobArgs

type FlexTemplateJobArgs struct {
	// List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].
	AdditionalExperiments pulumi.StringArrayInput
	// The algorithm to use for autoscaling
	AutoscalingAlgorithm pulumi.StringPtrInput
	// The GCS path to the Dataflow job Flex
	// Template.
	//
	// ***
	ContainerSpecGcsPath pulumi.StringInput
	// Indicates if the job should use the streaming engine feature.
	EnableStreamingEngine pulumi.BoolPtrInput
	// The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".
	IpConfiguration pulumi.StringPtrInput
	// The name for the Cloud KMS key for the job. Key format is:
	// projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
	KmsKeyName pulumi.StringPtrInput
	// User labels to be specified for the job. Keys and values
	// should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions)
	// page. **Note**: This field is marked as deprecated as the API does not currently
	// support adding labels.
	// **NOTE**: Google-provided Dataflow templates often provide default labels
	// that begin with `goog-dataflow-provided`. Unless explicitly set in config, these
	// labels will be ignored to prevent diffs on re-apply.
	Labels pulumi.MapInput
	// The machine type to use for launching the job. The default is n1-standard-1.
	LauncherMachineType pulumi.StringPtrInput
	// The machine type to use for the job.
	MachineType pulumi.StringPtrInput
	// The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to
	// 1000.
	MaxWorkers pulumi.IntPtrInput
	// A unique name for the resource, required by Dataflow.
	Name pulumi.StringPtrInput
	// The network to which VMs will be assigned. If it is not provided, "default" will be used.
	Network pulumi.StringPtrInput
	// The initial number of Google Compute Engine instances for the job.
	NumWorkers pulumi.IntPtrInput
	// One of "drain" or "cancel". Specifies behavior of
	// deletion during `pulumi destroy`.  See above note.
	OnDelete pulumi.StringPtrInput
	// Key/Value pairs to be passed to the Dataflow job (as
	// used in the template). Additional [pipeline options](https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options)
	// such as `serviceAccount`, `workerMachineType`, etc can be specified here.
	Parameters pulumi.MapInput
	// The project in which the resource belongs. If it is not
	// provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the created job should run.
	Region pulumi.StringPtrInput
	// Docker registry location of container image to use for the 'worker harness. Default is the container for the version of
	// the SDK. Note this field is only valid for portable pipelines.
	SdkContainerImage pulumi.StringPtrInput
	// The Service Account email used to create the job.
	ServiceAccountEmail pulumi.StringPtrInput
	// If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from
	// terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are
	// different, e.g. by embedding a release ID or by using a random_id.
	SkipWaitOnJobTermination pulumi.BoolPtrInput
	// The Cloud Storage path to use for staging files. Must be a valid Cloud Storage URL, beginning with gs://.
	StagingLocation pulumi.StringPtrInput
	// The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".
	Subnetwork pulumi.StringPtrInput
	// The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
	TempLocation pulumi.StringPtrInput
	// Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the
	// corresponding name prefixes of the new job.
	TransformNameMapping pulumi.MapInput
}

The set of arguments for constructing a FlexTemplateJob resource.

func (FlexTemplateJobArgs) ElementType

func (FlexTemplateJobArgs) ElementType() reflect.Type

type FlexTemplateJobArray

type FlexTemplateJobArray []FlexTemplateJobInput

func (FlexTemplateJobArray) ElementType

func (FlexTemplateJobArray) ElementType() reflect.Type

func (FlexTemplateJobArray) ToFlexTemplateJobArrayOutput

func (i FlexTemplateJobArray) ToFlexTemplateJobArrayOutput() FlexTemplateJobArrayOutput

func (FlexTemplateJobArray) ToFlexTemplateJobArrayOutputWithContext

func (i FlexTemplateJobArray) ToFlexTemplateJobArrayOutputWithContext(ctx context.Context) FlexTemplateJobArrayOutput

type FlexTemplateJobArrayInput

type FlexTemplateJobArrayInput interface {
	pulumi.Input

	ToFlexTemplateJobArrayOutput() FlexTemplateJobArrayOutput
	ToFlexTemplateJobArrayOutputWithContext(context.Context) FlexTemplateJobArrayOutput
}

FlexTemplateJobArrayInput is an input type that accepts FlexTemplateJobArray and FlexTemplateJobArrayOutput values. You can construct a concrete instance of `FlexTemplateJobArrayInput` via:

FlexTemplateJobArray{ FlexTemplateJobArgs{...} }

type FlexTemplateJobArrayOutput

type FlexTemplateJobArrayOutput struct{ *pulumi.OutputState }

func (FlexTemplateJobArrayOutput) ElementType

func (FlexTemplateJobArrayOutput) ElementType() reflect.Type

func (FlexTemplateJobArrayOutput) Index

func (FlexTemplateJobArrayOutput) ToFlexTemplateJobArrayOutput

func (o FlexTemplateJobArrayOutput) ToFlexTemplateJobArrayOutput() FlexTemplateJobArrayOutput

func (FlexTemplateJobArrayOutput) ToFlexTemplateJobArrayOutputWithContext

func (o FlexTemplateJobArrayOutput) ToFlexTemplateJobArrayOutputWithContext(ctx context.Context) FlexTemplateJobArrayOutput

type FlexTemplateJobInput

type FlexTemplateJobInput interface {
	pulumi.Input

	ToFlexTemplateJobOutput() FlexTemplateJobOutput
	ToFlexTemplateJobOutputWithContext(ctx context.Context) FlexTemplateJobOutput
}

type FlexTemplateJobMap

type FlexTemplateJobMap map[string]FlexTemplateJobInput

func (FlexTemplateJobMap) ElementType

func (FlexTemplateJobMap) ElementType() reflect.Type

func (FlexTemplateJobMap) ToFlexTemplateJobMapOutput

func (i FlexTemplateJobMap) ToFlexTemplateJobMapOutput() FlexTemplateJobMapOutput

func (FlexTemplateJobMap) ToFlexTemplateJobMapOutputWithContext

func (i FlexTemplateJobMap) ToFlexTemplateJobMapOutputWithContext(ctx context.Context) FlexTemplateJobMapOutput

type FlexTemplateJobMapInput

type FlexTemplateJobMapInput interface {
	pulumi.Input

	ToFlexTemplateJobMapOutput() FlexTemplateJobMapOutput
	ToFlexTemplateJobMapOutputWithContext(context.Context) FlexTemplateJobMapOutput
}

FlexTemplateJobMapInput is an input type that accepts FlexTemplateJobMap and FlexTemplateJobMapOutput values. You can construct a concrete instance of `FlexTemplateJobMapInput` via:

FlexTemplateJobMap{ "key": FlexTemplateJobArgs{...} }

type FlexTemplateJobMapOutput

type FlexTemplateJobMapOutput struct{ *pulumi.OutputState }

func (FlexTemplateJobMapOutput) ElementType

func (FlexTemplateJobMapOutput) ElementType() reflect.Type

func (FlexTemplateJobMapOutput) MapIndex

func (FlexTemplateJobMapOutput) ToFlexTemplateJobMapOutput

func (o FlexTemplateJobMapOutput) ToFlexTemplateJobMapOutput() FlexTemplateJobMapOutput

func (FlexTemplateJobMapOutput) ToFlexTemplateJobMapOutputWithContext

func (o FlexTemplateJobMapOutput) ToFlexTemplateJobMapOutputWithContext(ctx context.Context) FlexTemplateJobMapOutput

type FlexTemplateJobOutput

type FlexTemplateJobOutput struct{ *pulumi.OutputState }

func (FlexTemplateJobOutput) AdditionalExperiments added in v6.57.0

func (o FlexTemplateJobOutput) AdditionalExperiments() pulumi.StringArrayOutput

List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].

func (FlexTemplateJobOutput) AutoscalingAlgorithm added in v6.57.0

func (o FlexTemplateJobOutput) AutoscalingAlgorithm() pulumi.StringPtrOutput

The algorithm to use for autoscaling

func (FlexTemplateJobOutput) ContainerSpecGcsPath added in v6.23.0

func (o FlexTemplateJobOutput) ContainerSpecGcsPath() pulumi.StringOutput

The GCS path to the Dataflow job Flex Template.

***

func (FlexTemplateJobOutput) ElementType

func (FlexTemplateJobOutput) ElementType() reflect.Type

func (FlexTemplateJobOutput) EnableStreamingEngine added in v6.57.0

func (o FlexTemplateJobOutput) EnableStreamingEngine() pulumi.BoolPtrOutput

Indicates if the job should use the streaming engine feature.

func (FlexTemplateJobOutput) IpConfiguration added in v6.57.0

func (o FlexTemplateJobOutput) IpConfiguration() pulumi.StringPtrOutput

The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".

func (FlexTemplateJobOutput) JobId added in v6.23.0

The unique ID of this job.

func (FlexTemplateJobOutput) KmsKeyName added in v6.57.0

The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY

func (FlexTemplateJobOutput) Labels added in v6.23.0

User labels to be specified for the job. Keys and values should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. **Note**: This field is marked as deprecated as the API does not currently support adding labels. **NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.

func (FlexTemplateJobOutput) LauncherMachineType added in v6.57.0

func (o FlexTemplateJobOutput) LauncherMachineType() pulumi.StringPtrOutput

The machine type to use for launching the job. The default is n1-standard-1.

func (FlexTemplateJobOutput) MachineType added in v6.57.0

The machine type to use for the job.

func (FlexTemplateJobOutput) MaxWorkers added in v6.57.0

func (o FlexTemplateJobOutput) MaxWorkers() pulumi.IntPtrOutput

The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.

func (FlexTemplateJobOutput) Name added in v6.23.0

A unique name for the resource, required by Dataflow.

func (FlexTemplateJobOutput) Network added in v6.57.0

The network to which VMs will be assigned. If it is not provided, "default" will be used.

func (FlexTemplateJobOutput) NumWorkers added in v6.57.0

func (o FlexTemplateJobOutput) NumWorkers() pulumi.IntPtrOutput

The initial number of Google Compute Engine instances for the job.

func (FlexTemplateJobOutput) OnDelete added in v6.23.0

One of "drain" or "cancel". Specifies behavior of deletion during `pulumi destroy`. See above note.

func (FlexTemplateJobOutput) Parameters added in v6.23.0

func (o FlexTemplateJobOutput) Parameters() pulumi.MapOutput

Key/Value pairs to be passed to the Dataflow job (as used in the template). Additional [pipeline options](https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options) such as `serviceAccount`, `workerMachineType`, etc can be specified here.

func (FlexTemplateJobOutput) Project added in v6.23.0

The project in which the resource belongs. If it is not provided, the provider project is used.

func (FlexTemplateJobOutput) Region added in v6.23.0

The region in which the created job should run.

func (FlexTemplateJobOutput) SdkContainerImage added in v6.57.0

func (o FlexTemplateJobOutput) SdkContainerImage() pulumi.StringPtrOutput

Docker registry location of container image to use for the 'worker harness. Default is the container for the version of the SDK. Note this field is only valid for portable pipelines.

func (FlexTemplateJobOutput) ServiceAccountEmail added in v6.57.0

func (o FlexTemplateJobOutput) ServiceAccountEmail() pulumi.StringOutput

The Service Account email used to create the job.

func (FlexTemplateJobOutput) SkipWaitOnJobTermination added in v6.23.0

func (o FlexTemplateJobOutput) SkipWaitOnJobTermination() pulumi.BoolPtrOutput

If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are different, e.g. by embedding a release ID or by using a random_id.

func (FlexTemplateJobOutput) StagingLocation added in v6.57.0

func (o FlexTemplateJobOutput) StagingLocation() pulumi.StringOutput

The Cloud Storage path to use for staging files. Must be a valid Cloud Storage URL, beginning with gs://.

func (FlexTemplateJobOutput) State added in v6.23.0

The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState)

func (FlexTemplateJobOutput) Subnetwork added in v6.57.0

The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".

func (FlexTemplateJobOutput) TempLocation added in v6.57.0

func (o FlexTemplateJobOutput) TempLocation() pulumi.StringOutput

The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.

func (FlexTemplateJobOutput) ToFlexTemplateJobOutput

func (o FlexTemplateJobOutput) ToFlexTemplateJobOutput() FlexTemplateJobOutput

func (FlexTemplateJobOutput) ToFlexTemplateJobOutputWithContext

func (o FlexTemplateJobOutput) ToFlexTemplateJobOutputWithContext(ctx context.Context) FlexTemplateJobOutput

func (FlexTemplateJobOutput) TransformNameMapping added in v6.57.0

func (o FlexTemplateJobOutput) TransformNameMapping() pulumi.MapOutput

Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.

func (FlexTemplateJobOutput) Type added in v6.57.0

The type of this job, selected from the JobType enum.

type FlexTemplateJobState

type FlexTemplateJobState struct {
	// List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].
	AdditionalExperiments pulumi.StringArrayInput
	// The algorithm to use for autoscaling
	AutoscalingAlgorithm pulumi.StringPtrInput
	// The GCS path to the Dataflow job Flex
	// Template.
	//
	// ***
	ContainerSpecGcsPath pulumi.StringPtrInput
	// Indicates if the job should use the streaming engine feature.
	EnableStreamingEngine pulumi.BoolPtrInput
	// The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".
	IpConfiguration pulumi.StringPtrInput
	// The unique ID of this job.
	JobId pulumi.StringPtrInput
	// The name for the Cloud KMS key for the job. Key format is:
	// projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
	KmsKeyName pulumi.StringPtrInput
	// User labels to be specified for the job. Keys and values
	// should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions)
	// page. **Note**: This field is marked as deprecated as the API does not currently
	// support adding labels.
	// **NOTE**: Google-provided Dataflow templates often provide default labels
	// that begin with `goog-dataflow-provided`. Unless explicitly set in config, these
	// labels will be ignored to prevent diffs on re-apply.
	Labels pulumi.MapInput
	// The machine type to use for launching the job. The default is n1-standard-1.
	LauncherMachineType pulumi.StringPtrInput
	// The machine type to use for the job.
	MachineType pulumi.StringPtrInput
	// The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to
	// 1000.
	MaxWorkers pulumi.IntPtrInput
	// A unique name for the resource, required by Dataflow.
	Name pulumi.StringPtrInput
	// The network to which VMs will be assigned. If it is not provided, "default" will be used.
	Network pulumi.StringPtrInput
	// The initial number of Google Compute Engine instances for the job.
	NumWorkers pulumi.IntPtrInput
	// One of "drain" or "cancel". Specifies behavior of
	// deletion during `pulumi destroy`.  See above note.
	OnDelete pulumi.StringPtrInput
	// Key/Value pairs to be passed to the Dataflow job (as
	// used in the template). Additional [pipeline options](https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options)
	// such as `serviceAccount`, `workerMachineType`, etc can be specified here.
	Parameters pulumi.MapInput
	// The project in which the resource belongs. If it is not
	// provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the created job should run.
	Region pulumi.StringPtrInput
	// Docker registry location of container image to use for the 'worker harness. Default is the container for the version of
	// the SDK. Note this field is only valid for portable pipelines.
	SdkContainerImage pulumi.StringPtrInput
	// The Service Account email used to create the job.
	ServiceAccountEmail pulumi.StringPtrInput
	// If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from
	// terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are
	// different, e.g. by embedding a release ID or by using a random_id.
	SkipWaitOnJobTermination pulumi.BoolPtrInput
	// The Cloud Storage path to use for staging files. Must be a valid Cloud Storage URL, beginning with gs://.
	StagingLocation pulumi.StringPtrInput
	// The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState)
	State pulumi.StringPtrInput
	// The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".
	Subnetwork pulumi.StringPtrInput
	// The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.
	TempLocation pulumi.StringPtrInput
	// Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the
	// corresponding name prefixes of the new job.
	TransformNameMapping pulumi.MapInput
	// The type of this job, selected from the JobType enum.
	Type pulumi.StringPtrInput
}

func (FlexTemplateJobState) ElementType

func (FlexTemplateJobState) ElementType() reflect.Type

type Job

type Job struct {
	pulumi.CustomResourceState

	// List of experiments that should be used by the job. An example value is `["enableStackdriverAgentMetrics"]`.
	AdditionalExperiments pulumi.StringArrayOutput `pulumi:"additionalExperiments"`
	// Enable/disable the use of [Streaming Engine](https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#streaming-engine) for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3.
	EnableStreamingEngine pulumi.BoolPtrOutput `pulumi:"enableStreamingEngine"`
	// The configuration for VM IPs.  Options are `"WORKER_IP_PUBLIC"` or `"WORKER_IP_PRIVATE"`.
	IpConfiguration pulumi.StringPtrOutput `pulumi:"ipConfiguration"`
	// The unique ID of this job.
	JobId pulumi.StringOutput `pulumi:"jobId"`
	// The name for the Cloud KMS key for the job. Key format is: `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`
	KmsKeyName pulumi.StringPtrOutput `pulumi:"kmsKeyName"`
	// User labels to be specified for the job. Keys and values should follow the restrictions
	// specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page.
	// **NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`.
	// Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.
	Labels pulumi.MapOutput `pulumi:"labels"`
	// The machine type to use for the job.
	MachineType pulumi.StringPtrOutput `pulumi:"machineType"`
	// The number of workers permitted to work on the job.  More workers may improve processing speed at additional cost.
	MaxWorkers pulumi.IntPtrOutput `pulumi:"maxWorkers"`
	// A unique name for the resource, required by Dataflow.
	Name pulumi.StringOutput `pulumi:"name"`
	// The network to which VMs will be assigned. If it is not provided, "default" will be used.
	Network pulumi.StringPtrOutput `pulumi:"network"`
	// One of "drain" or "cancel".  Specifies behavior of deletion during `pulumi destroy`.  See above note.
	OnDelete pulumi.StringPtrOutput `pulumi:"onDelete"`
	// Key/Value pairs to be passed to the Dataflow job (as used in the template).
	Parameters pulumi.MapOutput `pulumi:"parameters"`
	// The project in which the resource belongs. If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the created job should run.
	Region pulumi.StringPtrOutput `pulumi:"region"`
	// The Service Account email used to create the job.
	ServiceAccountEmail pulumi.StringPtrOutput `pulumi:"serviceAccountEmail"`
	// If set to `true`, Pulumi will treat `DRAINING` and `CANCELLING` as terminal states when deleting the resource, and will remove the resource from Pulumi state and move on.  See above note.
	SkipWaitOnJobTermination pulumi.BoolPtrOutput `pulumi:"skipWaitOnJobTermination"`
	// The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState)
	State pulumi.StringOutput `pulumi:"state"`
	// The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"`
	Subnetwork pulumi.StringPtrOutput `pulumi:"subnetwork"`
	// A writeable location on GCS for the Dataflow job to dump its temporary data.
	//
	// ***
	TempGcsLocation pulumi.StringOutput `pulumi:"tempGcsLocation"`
	// The GCS path to the Dataflow job template.
	TemplateGcsPath pulumi.StringOutput `pulumi:"templateGcsPath"`
	// Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update.
	TransformNameMapping pulumi.MapOutput `pulumi:"transformNameMapping"`
	// The type of this job, selected from the [JobType enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobType)
	Type pulumi.StringOutput `pulumi:"type"`
	// The zone in which the created job should run. If it is not provided, the provider zone is used.
	Zone pulumi.StringPtrOutput `pulumi:"zone"`
}

Creates a job on Dataflow, which is an implementation of Apache Beam running on Google Compute Engine. For more information see the official documentation for [Beam](https://beam.apache.org) and [Dataflow](https://cloud.google.com/dataflow/).

## Example Usage

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataflow"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataflow.NewJob(ctx, "bigDataJob", &dataflow.JobArgs{
			Parameters: pulumi.AnyMap{
				"baz": pulumi.Any("qux"),
				"foo": pulumi.Any("bar"),
			},
			TempGcsLocation: pulumi.String("gs://my-bucket/tmp_dir"),
			TemplateGcsPath: pulumi.String("gs://my-bucket/templates/template_file"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ### Streaming Job

```go package main

import (

"fmt"

"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataflow"
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/pubsub"
"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		topic, err := pubsub.NewTopic(ctx, "topic", nil)
		if err != nil {
			return err
		}
		bucket1, err := storage.NewBucket(ctx, "bucket1", &storage.BucketArgs{
			Location:     pulumi.String("US"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = storage.NewBucket(ctx, "bucket2", &storage.BucketArgs{
			Location:     pulumi.String("US"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = dataflow.NewJob(ctx, "pubsubStream", &dataflow.JobArgs{
			TemplateGcsPath:       pulumi.String("gs://my-bucket/templates/template_file"),
			TempGcsLocation:       pulumi.String("gs://my-bucket/tmp_dir"),
			EnableStreamingEngine: pulumi.Bool(true),
			Parameters: pulumi.AnyMap{
				"inputFilePattern": bucket1.Url.ApplyT(func(url string) (string, error) {
					return fmt.Sprintf("%v/*.json", url), nil
				}).(pulumi.StringOutput),
				"outputTopic": topic.ID(),
			},
			TransformNameMapping: pulumi.AnyMap{
				"name": pulumi.Any("test_job"),
				"env":  pulumi.Any("test"),
			},
			OnDelete: pulumi.String("cancel"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ## Note on "destroy" / "apply"

There are many types of Dataflow jobs. Some Dataflow jobs run constantly, getting new data from (e.g.) a GCS bucket, and outputting data continuously. Some jobs process a set amount of data then terminate. All jobs can fail while running due to programming errors or other issues. In this way, Dataflow jobs are different from most other Google resources.

The Dataflow resource is considered 'existing' while it is in a nonterminal state. If it reaches a terminal state (e.g. 'FAILED', 'COMPLETE', 'CANCELLED'), it will be recreated on the next 'apply'. This is as expected for jobs which run continuously, but may surprise users who use this resource for other kinds of Dataflow jobs.

A Dataflow job which is 'destroyed' may be "cancelled" or "drained". If "cancelled", the job terminates - any data written remains where it is, but no new data will be processed. If "drained", no new data will enter the pipeline, but any data currently in the pipeline will finish being processed. The default is "drain". When `onDelete` is set to `"drain"` in the configuration, you may experience a long wait for your `pulumi destroy` to complete.

You can potentially short-circuit the wait by setting `skipWaitOnJobTermination` to `true`, but beware that unless you take active steps to ensure that the job `name` parameter changes between instances, the name will conflict and the launch of the new job will fail. One way to do this is with a randomId resource, for example:

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/dataflow"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		cfg := config.New(ctx, "")
		bigDataJobSubscriptionId := "projects/myproject/subscriptions/messages"
		if param := cfg.Get("bigDataJobSubscriptionId"); param != "" {
			bigDataJobSubscriptionId = param
		}
		_, err := random.NewRandomId(ctx, "bigDataJobNameSuffix", &random.RandomIdArgs{
			ByteLength: pulumi.Int(4),
			Keepers: pulumi.AnyMap{
				"region":          pulumi.Any(_var.Region),
				"subscription_id": pulumi.String(bigDataJobSubscriptionId),
			},
		})
		if err != nil {
			return err
		}
		_, err = dataflow.NewFlexTemplateJob(ctx, "bigDataJob", &dataflow.FlexTemplateJobArgs{
			Region:                   pulumi.Any(_var.Region),
			ContainerSpecGcsPath:     pulumi.String("gs://my-bucket/templates/template.json"),
			SkipWaitOnJobTermination: pulumi.Bool(true),
			Parameters: pulumi.AnyMap{
				"inputSubscription": pulumi.String(bigDataJobSubscriptionId),
			},
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Dataflow jobs can be imported using the job `id` e.g.

```sh

$ pulumi import gcp:dataflow/job:Job example 2022-07-31_06_25_42-11926927532632678660

```

func GetJob

func GetJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobState, opts ...pulumi.ResourceOption) (*Job, error)

GetJob gets an existing Job resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJob

func NewJob(ctx *pulumi.Context,
	name string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error)

NewJob registers a new resource with the given unique name, arguments, and options.

func (*Job) ElementType

func (*Job) ElementType() reflect.Type

func (*Job) ToJobOutput

func (i *Job) ToJobOutput() JobOutput

func (*Job) ToJobOutputWithContext

func (i *Job) ToJobOutputWithContext(ctx context.Context) JobOutput

type JobArgs

type JobArgs struct {
	// List of experiments that should be used by the job. An example value is `["enableStackdriverAgentMetrics"]`.
	AdditionalExperiments pulumi.StringArrayInput
	// Enable/disable the use of [Streaming Engine](https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#streaming-engine) for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3.
	EnableStreamingEngine pulumi.BoolPtrInput
	// The configuration for VM IPs.  Options are `"WORKER_IP_PUBLIC"` or `"WORKER_IP_PRIVATE"`.
	IpConfiguration pulumi.StringPtrInput
	// The name for the Cloud KMS key for the job. Key format is: `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`
	KmsKeyName pulumi.StringPtrInput
	// User labels to be specified for the job. Keys and values should follow the restrictions
	// specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page.
	// **NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`.
	// Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.
	Labels pulumi.MapInput
	// The machine type to use for the job.
	MachineType pulumi.StringPtrInput
	// The number of workers permitted to work on the job.  More workers may improve processing speed at additional cost.
	MaxWorkers pulumi.IntPtrInput
	// A unique name for the resource, required by Dataflow.
	Name pulumi.StringPtrInput
	// The network to which VMs will be assigned. If it is not provided, "default" will be used.
	Network pulumi.StringPtrInput
	// One of "drain" or "cancel".  Specifies behavior of deletion during `pulumi destroy`.  See above note.
	OnDelete pulumi.StringPtrInput
	// Key/Value pairs to be passed to the Dataflow job (as used in the template).
	Parameters pulumi.MapInput
	// The project in which the resource belongs. If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the created job should run.
	Region pulumi.StringPtrInput
	// The Service Account email used to create the job.
	ServiceAccountEmail pulumi.StringPtrInput
	// If set to `true`, Pulumi will treat `DRAINING` and `CANCELLING` as terminal states when deleting the resource, and will remove the resource from Pulumi state and move on.  See above note.
	SkipWaitOnJobTermination pulumi.BoolPtrInput
	// The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"`
	Subnetwork pulumi.StringPtrInput
	// A writeable location on GCS for the Dataflow job to dump its temporary data.
	//
	// ***
	TempGcsLocation pulumi.StringInput
	// The GCS path to the Dataflow job template.
	TemplateGcsPath pulumi.StringInput
	// Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update.
	TransformNameMapping pulumi.MapInput
	// The zone in which the created job should run. If it is not provided, the provider zone is used.
	Zone pulumi.StringPtrInput
}

The set of arguments for constructing a Job resource.

func (JobArgs) ElementType

func (JobArgs) ElementType() reflect.Type

type JobArray

type JobArray []JobInput

func (JobArray) ElementType

func (JobArray) ElementType() reflect.Type

func (JobArray) ToJobArrayOutput

func (i JobArray) ToJobArrayOutput() JobArrayOutput

func (JobArray) ToJobArrayOutputWithContext

func (i JobArray) ToJobArrayOutputWithContext(ctx context.Context) JobArrayOutput

type JobArrayInput

type JobArrayInput interface {
	pulumi.Input

	ToJobArrayOutput() JobArrayOutput
	ToJobArrayOutputWithContext(context.Context) JobArrayOutput
}

JobArrayInput is an input type that accepts JobArray and JobArrayOutput values. You can construct a concrete instance of `JobArrayInput` via:

JobArray{ JobArgs{...} }

type JobArrayOutput

type JobArrayOutput struct{ *pulumi.OutputState }

func (JobArrayOutput) ElementType

func (JobArrayOutput) ElementType() reflect.Type

func (JobArrayOutput) Index

func (JobArrayOutput) ToJobArrayOutput

func (o JobArrayOutput) ToJobArrayOutput() JobArrayOutput

func (JobArrayOutput) ToJobArrayOutputWithContext

func (o JobArrayOutput) ToJobArrayOutputWithContext(ctx context.Context) JobArrayOutput

type JobInput

type JobInput interface {
	pulumi.Input

	ToJobOutput() JobOutput
	ToJobOutputWithContext(ctx context.Context) JobOutput
}

type JobMap

type JobMap map[string]JobInput

func (JobMap) ElementType

func (JobMap) ElementType() reflect.Type

func (JobMap) ToJobMapOutput

func (i JobMap) ToJobMapOutput() JobMapOutput

func (JobMap) ToJobMapOutputWithContext

func (i JobMap) ToJobMapOutputWithContext(ctx context.Context) JobMapOutput

type JobMapInput

type JobMapInput interface {
	pulumi.Input

	ToJobMapOutput() JobMapOutput
	ToJobMapOutputWithContext(context.Context) JobMapOutput
}

JobMapInput is an input type that accepts JobMap and JobMapOutput values. You can construct a concrete instance of `JobMapInput` via:

JobMap{ "key": JobArgs{...} }

type JobMapOutput

type JobMapOutput struct{ *pulumi.OutputState }

func (JobMapOutput) ElementType

func (JobMapOutput) ElementType() reflect.Type

func (JobMapOutput) MapIndex

func (o JobMapOutput) MapIndex(k pulumi.StringInput) JobOutput

func (JobMapOutput) ToJobMapOutput

func (o JobMapOutput) ToJobMapOutput() JobMapOutput

func (JobMapOutput) ToJobMapOutputWithContext

func (o JobMapOutput) ToJobMapOutputWithContext(ctx context.Context) JobMapOutput

type JobOutput

type JobOutput struct{ *pulumi.OutputState }

func (JobOutput) AdditionalExperiments added in v6.23.0

func (o JobOutput) AdditionalExperiments() pulumi.StringArrayOutput

List of experiments that should be used by the job. An example value is `["enableStackdriverAgentMetrics"]`.

func (JobOutput) ElementType

func (JobOutput) ElementType() reflect.Type

func (JobOutput) EnableStreamingEngine added in v6.23.0

func (o JobOutput) EnableStreamingEngine() pulumi.BoolPtrOutput

Enable/disable the use of [Streaming Engine](https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#streaming-engine) for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3.

func (JobOutput) IpConfiguration added in v6.23.0

func (o JobOutput) IpConfiguration() pulumi.StringPtrOutput

The configuration for VM IPs. Options are `"WORKER_IP_PUBLIC"` or `"WORKER_IP_PRIVATE"`.

func (JobOutput) JobId added in v6.23.0

func (o JobOutput) JobId() pulumi.StringOutput

The unique ID of this job.

func (JobOutput) KmsKeyName added in v6.23.0

func (o JobOutput) KmsKeyName() pulumi.StringPtrOutput

The name for the Cloud KMS key for the job. Key format is: `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`

func (JobOutput) Labels added in v6.23.0

func (o JobOutput) Labels() pulumi.MapOutput

User labels to be specified for the job. Keys and values should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. **NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.

func (JobOutput) MachineType added in v6.23.0

func (o JobOutput) MachineType() pulumi.StringPtrOutput

The machine type to use for the job.

func (JobOutput) MaxWorkers added in v6.23.0

func (o JobOutput) MaxWorkers() pulumi.IntPtrOutput

The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.

func (JobOutput) Name added in v6.23.0

func (o JobOutput) Name() pulumi.StringOutput

A unique name for the resource, required by Dataflow.

func (JobOutput) Network added in v6.23.0

func (o JobOutput) Network() pulumi.StringPtrOutput

The network to which VMs will be assigned. If it is not provided, "default" will be used.

func (JobOutput) OnDelete added in v6.23.0

func (o JobOutput) OnDelete() pulumi.StringPtrOutput

One of "drain" or "cancel". Specifies behavior of deletion during `pulumi destroy`. See above note.

func (JobOutput) Parameters added in v6.23.0

func (o JobOutput) Parameters() pulumi.MapOutput

Key/Value pairs to be passed to the Dataflow job (as used in the template).

func (JobOutput) Project added in v6.23.0

func (o JobOutput) Project() pulumi.StringOutput

The project in which the resource belongs. If it is not provided, the provider project is used.

func (JobOutput) Region added in v6.23.0

func (o JobOutput) Region() pulumi.StringPtrOutput

The region in which the created job should run.

func (JobOutput) ServiceAccountEmail added in v6.23.0

func (o JobOutput) ServiceAccountEmail() pulumi.StringPtrOutput

The Service Account email used to create the job.

func (JobOutput) SkipWaitOnJobTermination added in v6.23.0

func (o JobOutput) SkipWaitOnJobTermination() pulumi.BoolPtrOutput

If set to `true`, Pulumi will treat `DRAINING` and `CANCELLING` as terminal states when deleting the resource, and will remove the resource from Pulumi state and move on. See above note.

func (JobOutput) State added in v6.23.0

func (o JobOutput) State() pulumi.StringOutput

The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState)

func (JobOutput) Subnetwork added in v6.23.0

func (o JobOutput) Subnetwork() pulumi.StringPtrOutput

The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"`

func (JobOutput) TempGcsLocation added in v6.23.0

func (o JobOutput) TempGcsLocation() pulumi.StringOutput

A writeable location on GCS for the Dataflow job to dump its temporary data.

***

func (JobOutput) TemplateGcsPath added in v6.23.0

func (o JobOutput) TemplateGcsPath() pulumi.StringOutput

The GCS path to the Dataflow job template.

func (JobOutput) ToJobOutput

func (o JobOutput) ToJobOutput() JobOutput

func (JobOutput) ToJobOutputWithContext

func (o JobOutput) ToJobOutputWithContext(ctx context.Context) JobOutput

func (JobOutput) TransformNameMapping added in v6.23.0

func (o JobOutput) TransformNameMapping() pulumi.MapOutput

Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update.

func (JobOutput) Type added in v6.23.0

func (o JobOutput) Type() pulumi.StringOutput

The type of this job, selected from the [JobType enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobType)

func (JobOutput) Zone added in v6.23.0

func (o JobOutput) Zone() pulumi.StringPtrOutput

The zone in which the created job should run. If it is not provided, the provider zone is used.

type JobState

type JobState struct {
	// List of experiments that should be used by the job. An example value is `["enableStackdriverAgentMetrics"]`.
	AdditionalExperiments pulumi.StringArrayInput
	// Enable/disable the use of [Streaming Engine](https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#streaming-engine) for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3.
	EnableStreamingEngine pulumi.BoolPtrInput
	// The configuration for VM IPs.  Options are `"WORKER_IP_PUBLIC"` or `"WORKER_IP_PRIVATE"`.
	IpConfiguration pulumi.StringPtrInput
	// The unique ID of this job.
	JobId pulumi.StringPtrInput
	// The name for the Cloud KMS key for the job. Key format is: `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`
	KmsKeyName pulumi.StringPtrInput
	// User labels to be specified for the job. Keys and values should follow the restrictions
	// specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page.
	// **NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`.
	// Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.
	Labels pulumi.MapInput
	// The machine type to use for the job.
	MachineType pulumi.StringPtrInput
	// The number of workers permitted to work on the job.  More workers may improve processing speed at additional cost.
	MaxWorkers pulumi.IntPtrInput
	// A unique name for the resource, required by Dataflow.
	Name pulumi.StringPtrInput
	// The network to which VMs will be assigned. If it is not provided, "default" will be used.
	Network pulumi.StringPtrInput
	// One of "drain" or "cancel".  Specifies behavior of deletion during `pulumi destroy`.  See above note.
	OnDelete pulumi.StringPtrInput
	// Key/Value pairs to be passed to the Dataflow job (as used in the template).
	Parameters pulumi.MapInput
	// The project in which the resource belongs. If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the created job should run.
	Region pulumi.StringPtrInput
	// The Service Account email used to create the job.
	ServiceAccountEmail pulumi.StringPtrInput
	// If set to `true`, Pulumi will treat `DRAINING` and `CANCELLING` as terminal states when deleting the resource, and will remove the resource from Pulumi state and move on.  See above note.
	SkipWaitOnJobTermination pulumi.BoolPtrInput
	// The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState)
	State pulumi.StringPtrInput
	// The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"`
	Subnetwork pulumi.StringPtrInput
	// A writeable location on GCS for the Dataflow job to dump its temporary data.
	//
	// ***
	TempGcsLocation pulumi.StringPtrInput
	// The GCS path to the Dataflow job template.
	TemplateGcsPath pulumi.StringPtrInput
	// Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update.
	TransformNameMapping pulumi.MapInput
	// The type of this job, selected from the [JobType enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobType)
	Type pulumi.StringPtrInput
	// The zone in which the created job should run. If it is not provided, the provider zone is used.
	Zone pulumi.StringPtrInput
}

func (JobState) ElementType

func (JobState) ElementType() reflect.Type

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL