jobs

package
v0.4.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 9, 2023 License: Apache-2.0 Imports: 10 Imported by: 18

Documentation

Overview

The Jobs API allows you to create, edit, and delete jobs.

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type BaseJob added in v0.3.0

type BaseJob struct {
	// The time at which this job was created in epoch milliseconds
	// (milliseconds since 1/1/1970 UTC).
	CreatedTime int64 `json:"created_time,omitempty"`
	// The creator user name. This field won’t be included in the response if
	// the user has already been deleted.
	CreatorUserName string `json:"creator_user_name,omitempty"`
	// The canonical identifier for this job.
	JobId int64 `json:"job_id,omitempty"`
	// Settings for this job and all of its runs. These settings can be updated
	// using the `resetJob` method.
	Settings *JobSettings `json:"settings,omitempty"`
}

type BaseRun added in v0.3.0

type BaseRun struct {
	// The sequence number of this run attempt for a triggered job run. The
	// initial attempt of a run has an attempt_number of 0\. If the initial run
	// attempt fails, and the job has a retry policy (`max_retries` \> 0),
	// subsequent runs are created with an `original_attempt_run_id` of the
	// original attempt’s ID and an incrementing `attempt_number`. Runs are
	// retried only until they succeed, and the maximum `attempt_number` is the
	// same as the `max_retries` value for the job.
	AttemptNumber int `json:"attempt_number,omitempty"`
	// The time in milliseconds it took to terminate the cluster and clean up
	// any associated artifacts. The duration of a task run is the sum of the
	// `setup_duration`, `execution_duration`, and the `cleanup_duration`. The
	// `cleanup_duration` field is set to 0 for multitask job runs. The total
	// duration of a multitask job run is the value of the `run_duration` field.
	CleanupDuration int64 `json:"cleanup_duration,omitempty"`
	// The cluster used for this run. If the run is specified to use a new
	// cluster, this field is set once the Jobs service has requested a cluster
	// for the run.
	ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"`
	// A snapshot of the job’s cluster specification when this run was
	// created.
	ClusterSpec *ClusterSpec `json:"cluster_spec,omitempty"`
	// The continuous trigger that triggered this run.
	Continuous *Continuous `json:"continuous,omitempty"`
	// The creator user name. This field won’t be included in the response if
	// the user has already been deleted.
	CreatorUserName string `json:"creator_user_name,omitempty"`
	// The time at which this run ended in epoch milliseconds (milliseconds
	// since 1/1/1970 UTC). This field is set to 0 if the job is still running.
	EndTime int64 `json:"end_time,omitempty"`
	// The time in milliseconds it took to execute the commands in the JAR or
	// notebook until they completed, failed, timed out, were cancelled, or
	// encountered an unexpected error. The duration of a task run is the sum of
	// the `setup_duration`, `execution_duration`, and the `cleanup_duration`.
	// The `execution_duration` field is set to 0 for multitask job runs. The
	// total duration of a multitask job run is the value of the `run_duration`
	// field.
	ExecutionDuration int64 `json:"execution_duration,omitempty"`
	// An optional specification for a remote repository containing the
	// notebooks used by this job's notebook tasks.
	GitSource *GitSource `json:"git_source,omitempty"`
	// A list of job cluster specifications that can be shared and reused by
	// tasks of this job. Libraries cannot be declared in a shared job cluster.
	// You must declare dependent libraries in task settings.
	JobClusters []JobCluster `json:"job_clusters,omitempty"`
	// The canonical identifier of the job that contains this run.
	JobId int64 `json:"job_id,omitempty"`
	// A unique identifier for this job run. This is set to the same value as
	// `run_id`.
	NumberInJob int64 `json:"number_in_job,omitempty"`
	// If this run is a retry of a prior run attempt, this field contains the
	// run_id of the original attempt; otherwise, it is the same as the run_id.
	OriginalAttemptRunId int64 `json:"original_attempt_run_id,omitempty"`
	// The parameters used for this run.
	OverridingParameters *RunParameters `json:"overriding_parameters,omitempty"`
	// The time in milliseconds it took the job run and all of its repairs to
	// finish.
	RunDuration int `json:"run_duration,omitempty"`
	// The canonical identifier of the run. This ID is unique across all runs of
	// all jobs.
	RunId int64 `json:"run_id,omitempty"`
	// An optional name for the run. The maximum allowed length is 4096 bytes in
	// UTF-8 encoding.
	RunName string `json:"run_name,omitempty"`
	// The URL to the detail page of the run.
	RunPageUrl string `json:"run_page_url,omitempty"`
	// This describes an enum
	RunType RunType `json:"run_type,omitempty"`
	// The cron schedule that triggered this run if it was triggered by the
	// periodic scheduler.
	Schedule *CronSchedule `json:"schedule,omitempty"`
	// The time in milliseconds it took to set up the cluster. For runs that run
	// on new clusters this is the cluster creation time, for runs that run on
	// existing clusters this time should be very short. The duration of a task
	// run is the sum of the `setup_duration`, `execution_duration`, and the
	// `cleanup_duration`. The `setup_duration` field is set to 0 for multitask
	// job runs. The total duration of a multitask job run is the value of the
	// `run_duration` field.
	SetupDuration int64 `json:"setup_duration,omitempty"`
	// The time at which this run was started in epoch milliseconds
	// (milliseconds since 1/1/1970 UTC). This may not be the time when the job
	// task starts executing, for example, if the job is scheduled to run on a
	// new cluster, this is the time the cluster creation call is issued.
	StartTime int64 `json:"start_time,omitempty"`
	// The result and lifecycle states of the run.
	State *RunState `json:"state,omitempty"`
	// The list of tasks performed by the run. Each task has its own `run_id`
	// which you can use to call `JobsGetOutput` to retrieve the run resutls.
	Tasks []RunTask `json:"tasks,omitempty"`
	// This describes an enum
	Trigger TriggerType `json:"trigger,omitempty"`
}

type CancelAllRuns

type CancelAllRuns struct {
	// The canonical identifier of the job to cancel all runs of. This field is
	// required.
	JobId int64 `json:"job_id"`
}

type CancelRun

type CancelRun struct {
	// This field is required.
	RunId int64 `json:"run_id"`
}

type ClusterInstance

type ClusterInstance struct {
	// The canonical identifier for the cluster used by a run. This field is
	// always available for runs on existing clusters. For runs on new clusters,
	// it becomes available once the cluster is created. This value can be used
	// to view logs by browsing to `/#setting/sparkui/$cluster_id/driver-logs`.
	// The logs continue to be available after the run completes.
	//
	// The response won’t include this field if the identifier is not
	// available yet.
	ClusterId string `json:"cluster_id,omitempty"`
	// The canonical identifier for the Spark context used by a run. This field
	// is filled in once the run begins execution. This value can be used to
	// view the Spark UI by browsing to
	// `/#setting/sparkui/$cluster_id/$spark_context_id`. The Spark UI continues
	// to be available after the run has completed.
	//
	// The response won’t include this field if the identifier is not
	// available yet.
	SparkContextId string `json:"spark_context_id,omitempty"`
}

type ClusterSpec

type ClusterSpec struct {
	// If existing_cluster_id, the ID of an existing cluster that is used for
	// all runs of this job. When running jobs on an existing cluster, you may
	// need to manually restart the cluster if it stops responding. We suggest
	// running jobs on new clusters for greater reliability
	ExistingClusterId string `json:"existing_cluster_id,omitempty"`
	// An optional list of libraries to be installed on the cluster that
	// executes the job. The default value is an empty list.
	Libraries []libraries.Library `json:"libraries,omitempty"`
	// If new_cluster, a description of a cluster that is created for each run.
	NewCluster *clusters.BaseClusterInfo `json:"new_cluster,omitempty"`
}

type Continuous added in v0.4.0

type Continuous struct {
	// Indicate whether the continuous execution of the job is paused or not.
	// Defaults to UNPAUSED.
	PauseStatus ContinuousPauseStatus `json:"pause_status,omitempty"`
}

type ContinuousPauseStatus added in v0.4.0

type ContinuousPauseStatus string

Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED.

const ContinuousPauseStatusPaused ContinuousPauseStatus = `PAUSED`
const ContinuousPauseStatusUnpaused ContinuousPauseStatus = `UNPAUSED`

func (*ContinuousPauseStatus) Set added in v0.4.0

func (cps *ContinuousPauseStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*ContinuousPauseStatus) String added in v0.4.0

func (cps *ContinuousPauseStatus) String() string

String representation for fmt.Print

func (*ContinuousPauseStatus) Type added in v0.4.0

func (cps *ContinuousPauseStatus) Type() string

Type always returns ContinuousPauseStatus to satisfy [pflag.Value] interface

type CreateJob

type CreateJob struct {
	// List of permissions to set on the job.
	AccessControlList []permissions.AccessControlRequest `json:"access_control_list,omitempty"`
	// An optional continuous property for this job. The continuous property
	// will ensure that there is always one run executing. Only one of
	// `schedule` and `continuous` can be used.
	Continuous *Continuous `json:"continuous,omitempty"`
	// An optional set of email addresses that is notified when runs of this job
	// begin or complete as well as when this job is deleted. The default
	// behavior is to not send any emails.
	EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"`
	// Used to tell what is the format of the job. This field is ignored in
	// Create/Update/Reset calls. When using the Jobs API 2.1 this value is
	// always set to `"MULTI_TASK"`.
	Format CreateJobFormat `json:"format,omitempty"`
	// An optional specification for a remote repository containing the
	// notebooks used by this job's notebook tasks.
	GitSource *GitSource `json:"git_source,omitempty"`
	// A list of job cluster specifications that can be shared and reused by
	// tasks of this job. Libraries cannot be declared in a shared job cluster.
	// You must declare dependent libraries in task settings.
	JobClusters []JobCluster `json:"job_clusters,omitempty"`
	// An optional maximum allowed number of concurrent runs of the job.
	//
	// Set this value if you want to be able to execute multiple runs of the
	// same job concurrently. This is useful for example if you trigger your job
	// on a frequent schedule and want to allow consecutive runs to overlap with
	// each other, or if you want to trigger multiple runs which differ by their
	// input parameters.
	//
	// This setting affects only new runs. For example, suppose the job’s
	// concurrency is 4 and there are 4 concurrent active runs. Then setting the
	// concurrency to 3 won’t kill any of the active runs. However, from then
	// on, new runs are skipped unless there are fewer than 3 active runs.
	//
	// This value cannot exceed 1000\. Setting this value to 0 causes all new
	// runs to be skipped. The default behavior is to allow only 1 concurrent
	// run.
	MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"`
	// An optional name for the job.
	Name string `json:"name,omitempty"`
	// An optional periodic schedule for this job. The default behavior is that
	// the job only runs when triggered by clicking “Run Now” in the Jobs UI
	// or sending an API request to `runNow`.
	Schedule *CronSchedule `json:"schedule,omitempty"`
	// A map of tags associated with the job. These are forwarded to the cluster
	// as cluster tags for jobs clusters, and are subject to the same
	// limitations as cluster tags. A maximum of 25 tags can be added to the
	// job.
	Tags map[string]string `json:"tags,omitempty"`
	// A list of task specifications to be executed by this job.
	Tasks []JobTaskSettings `json:"tasks,omitempty"`
	// An optional timeout applied to each run of this job. The default behavior
	// is to have no timeout.
	TimeoutSeconds int `json:"timeout_seconds,omitempty"`
	// Trigger settings for the job. Can be used to trigger a run when new files
	// arrive in an external location. The default behavior is that the job runs
	// only when triggered by clicking “Run Now” in the Jobs UI or sending
	// an API request to `runNow`.
	Trigger *TriggerSettings `json:"trigger,omitempty"`
	// A collection of system notification IDs to notify when the run begins or
	// completes. The default behavior is to not send any system notifications.
	WebhookNotifications *JobWebhookNotifications `json:"webhook_notifications,omitempty"`
}

type CreateJobFormat

type CreateJobFormat string

Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`.

const CreateJobFormatMultiTask CreateJobFormat = `MULTI_TASK`
const CreateJobFormatSingleTask CreateJobFormat = `SINGLE_TASK`

func (*CreateJobFormat) Set added in v0.2.0

func (cjf *CreateJobFormat) Set(v string) error

Set raw string value and validate it against allowed values

func (*CreateJobFormat) String added in v0.2.0

func (cjf *CreateJobFormat) String() string

String representation for fmt.Print

func (*CreateJobFormat) Type added in v0.2.0

func (cjf *CreateJobFormat) Type() string

Type always returns CreateJobFormat to satisfy [pflag.Value] interface

type CreateResponse

type CreateResponse struct {
	// The canonical identifier for the newly created job.
	JobId int64 `json:"job_id,omitempty"`
}

type CronSchedule

type CronSchedule struct {
	// Indicate whether this schedule is paused or not.
	PauseStatus CronSchedulePauseStatus `json:"pause_status,omitempty"`
	// A Cron expression using Quartz syntax that describes the schedule for a
	// job. See [Cron Trigger] for details. This field is required."
	//
	// [Cron Trigger]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html
	QuartzCronExpression string `json:"quartz_cron_expression"`
	// A Java timezone ID. The schedule for a job is resolved with respect to
	// this timezone. See [Java TimeZone] for details. This field is required.
	//
	// [Java TimeZone]: https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html
	TimezoneId string `json:"timezone_id"`
}

type CronSchedulePauseStatus

type CronSchedulePauseStatus string

Indicate whether this schedule is paused or not.

const CronSchedulePauseStatusPaused CronSchedulePauseStatus = `PAUSED`
const CronSchedulePauseStatusUnpaused CronSchedulePauseStatus = `UNPAUSED`

func (*CronSchedulePauseStatus) Set added in v0.2.0

func (csps *CronSchedulePauseStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*CronSchedulePauseStatus) String added in v0.2.0

func (csps *CronSchedulePauseStatus) String() string

String representation for fmt.Print

func (*CronSchedulePauseStatus) Type added in v0.2.0

func (csps *CronSchedulePauseStatus) Type() string

Type always returns CronSchedulePauseStatus to satisfy [pflag.Value] interface

type DbtOutput

type DbtOutput struct {
	// An optional map of headers to send when retrieving the artifact from the
	// `artifacts_link`.
	ArtifactsHeaders map[string]string `json:"artifacts_headers,omitempty"`
	// A pre-signed URL to download the (compressed) dbt artifacts. This link is
	// valid for a limited time (30 minutes). This information is only available
	// after the run has finished.
	ArtifactsLink string `json:"artifacts_link,omitempty"`
}

type DbtTask

type DbtTask struct {
	// Optional name of the catalog to use. The value is the top level in the
	// 3-level namespace of Unity Catalog (catalog / schema / relation). The
	// catalog value can only be specified if a warehouse_id is specified.
	// Requires dbt-databricks >= 1.1.1.
	Catalog string `json:"catalog,omitempty"`
	// A list of dbt commands to execute. All commands must start with `dbt`.
	// This parameter must not be empty. A maximum of up to 10 commands can be
	// provided.
	Commands []string `json:"commands"`
	// Optional (relative) path to the profiles directory. Can only be specified
	// if no warehouse_id is specified. If no warehouse_id is specified and this
	// folder is unset, the root directory is used.
	ProfilesDirectory string `json:"profiles_directory,omitempty"`
	// Optional (relative) path to the project directory, if no value is
	// provided, the root of the git repository is used.
	ProjectDirectory string `json:"project_directory,omitempty"`
	// Optional schema to write to. This parameter is only used when a
	// warehouse_id is also provided. If not provided, the `default` schema is
	// used.
	Schema string `json:"schema,omitempty"`
	// ID of the SQL warehouse to connect to. If provided, we automatically
	// generate and provide the profile and connection details to dbt. It can be
	// overridden on a per-command basis by using the `--profiles-dir` command
	// line argument.
	WarehouseId string `json:"warehouse_id,omitempty"`
}

type DeleteJob

type DeleteJob struct {
	// The canonical identifier of the job to delete. This field is required.
	JobId int64 `json:"job_id"`
}

type DeleteRun

type DeleteRun struct {
	// The canonical identifier of the run for which to retrieve the metadata.
	RunId int64 `json:"run_id"`
}

type ExportRun

type ExportRun struct {
	// The canonical identifier for the run. This field is required.
	RunId int64 `json:"-" url:"run_id"`
	// Which views to export (CODE, DASHBOARDS, or ALL). Defaults to CODE.
	ViewsToExport ViewsToExport `json:"-" url:"views_to_export,omitempty"`
}

Export and retrieve a job run

type ExportRunOutput

type ExportRunOutput struct {
	// The exported content in HTML format (one for every view item).
	Views []ViewItem `json:"views,omitempty"`
}

type FileArrivalTriggerSettings added in v0.4.0

type FileArrivalTriggerSettings struct {
	// If set, the trigger starts a run only after the specified amount of time
	// passed since the last time the trigger fired. The minimum allowed value
	// is 60 seconds
	MinTimeBetweenTriggerSeconds int `json:"min_time_between_trigger_seconds,omitempty"`
	// URL to be monitored for file arrivals. The path must point to the root or
	// a subpath of the external location.
	Url string `json:"url,omitempty"`
	// If set, the trigger starts a run only after no file activity has occurred
	// for the specified amount of time. This makes it possible to wait for a
	// batch of incoming files to arrive before triggering a run. The minimum
	// allowed value is 60 seconds.
	WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"`
}

type Get

type Get struct {
	// The canonical identifier of the job to retrieve information about. This
	// field is required.
	JobId int64 `json:"-" url:"job_id"`
}

Get a single job

type GetRun

type GetRun struct {
	// Whether to include the repair history in the response.
	IncludeHistory bool `json:"-" url:"include_history,omitempty"`
	// The canonical identifier of the run for which to retrieve the metadata.
	// This field is required.
	RunId int64 `json:"-" url:"run_id"`
}

Get a single job run

type GetRunOutput

type GetRunOutput struct {
	// The canonical identifier for the run. This field is required.
	RunId int64 `json:"-" url:"run_id"`
}

Get the output for a single run

type GitSnapshot

type GitSnapshot struct {
	// Commit that was used to execute the run. If git_branch was specified,
	// this points to the HEAD of the branch at the time of the run; if git_tag
	// was specified, this points to the commit the tag points to.
	UsedCommit string `json:"used_commit,omitempty"`
}

Read-only state of the remote repository at the time the job was run. This field is only included on job runs.

type GitSource

type GitSource struct {
	// Name of the branch to be checked out and used by this job. This field
	// cannot be specified in conjunction with git_tag or git_commit.
	//
	// The maximum length is 255 characters.
	GitBranch string `json:"git_branch,omitempty"`
	// Commit to be checked out and used by this job. This field cannot be
	// specified in conjunction with git_branch or git_tag. The maximum length
	// is 64 characters.
	GitCommit string `json:"git_commit,omitempty"`
	// Unique identifier of the service used to host the Git repository. The
	// value is case insensitive.
	GitProvider GitSourceGitProvider `json:"git_provider"`
	// Read-only state of the remote repository at the time the job was run.
	// This field is only included on job runs.
	GitSnapshot *GitSnapshot `json:"git_snapshot,omitempty"`
	// Name of the tag to be checked out and used by this job. This field cannot
	// be specified in conjunction with git_branch or git_commit.
	//
	// The maximum length is 255 characters.
	GitTag string `json:"git_tag,omitempty"`
	// URL of the repository to be cloned by this job. The maximum length is 300
	// characters.
	GitUrl string `json:"git_url"`
}

An optional specification for a remote repository containing the notebooks used by this job's notebook tasks.

type GitSourceGitProvider

type GitSourceGitProvider string

Unique identifier of the service used to host the Git repository. The value is case insensitive.

const GitSourceGitProviderAwscodecommit GitSourceGitProvider = `awsCodeCommit`
const GitSourceGitProviderAzuredevopsservices GitSourceGitProvider = `azureDevOpsServices`
const GitSourceGitProviderBitbucketcloud GitSourceGitProvider = `bitbucketCloud`
const GitSourceGitProviderBitbucketserver GitSourceGitProvider = `bitbucketServer`
const GitSourceGitProviderGithub GitSourceGitProvider = `gitHub`
const GitSourceGitProviderGithubenterprise GitSourceGitProvider = `gitHubEnterprise`
const GitSourceGitProviderGitlab GitSourceGitProvider = `gitLab`
const GitSourceGitProviderGitlabenterpriseedition GitSourceGitProvider = `gitLabEnterpriseEdition`

func (*GitSourceGitProvider) Set added in v0.2.0

func (gsgp *GitSourceGitProvider) Set(v string) error

Set raw string value and validate it against allowed values

func (*GitSourceGitProvider) String added in v0.2.0

func (gsgp *GitSourceGitProvider) String() string

String representation for fmt.Print

func (*GitSourceGitProvider) Type added in v0.2.0

func (gsgp *GitSourceGitProvider) Type() string

Type always returns GitSourceGitProvider to satisfy [pflag.Value] interface

type Job

type Job struct {
	// The time at which this job was created in epoch milliseconds
	// (milliseconds since 1/1/1970 UTC).
	CreatedTime int64 `json:"created_time,omitempty"`
	// The creator user name. This field won’t be included in the response if
	// the user has already been deleted.
	CreatorUserName string `json:"creator_user_name,omitempty"`
	// The canonical identifier for this job.
	JobId int64 `json:"job_id,omitempty"`
	// The user name that the job runs as. `run_as_user_name` is based on the
	// current job settings, and is set to the creator of the job if job access
	// control is disabled, or the `is_owner` permission if job access control
	// is enabled.
	RunAsUserName string `json:"run_as_user_name,omitempty"`
	// Settings for this job and all of its runs. These settings can be updated
	// using the `resetJob` method.
	Settings *JobSettings `json:"settings,omitempty"`
	// History of the file arrival trigger associated with the job.
	TriggerHistory *TriggerHistory `json:"trigger_history,omitempty"`
}

type JobCluster

type JobCluster struct {
	// A unique name for the job cluster. This field is required and must be
	// unique within the job. `JobTaskSettings` may refer to this field to
	// determine which cluster to launch for the task execution.
	JobClusterKey string `json:"job_cluster_key"`
	// If new_cluster, a description of a cluster that is created for each task.
	NewCluster *clusters.BaseClusterInfo `json:"new_cluster,omitempty"`
}

type JobEmailNotifications

type JobEmailNotifications struct {
	// If true, do not send email to recipients specified in `on_failure` if the
	// run is skipped.
	NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"`
	// A list of email addresses to be notified when a run unsuccessfully
	// completes. A run is considered to have completed unsuccessfully if it
	// ends with an `INTERNAL_ERROR` `life_cycle_state` or a `SKIPPED`,
	// `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job
	// creation, reset, or update the list is empty, and notifications are not
	// sent.
	OnFailure []string `json:"on_failure,omitempty"`
	// A list of email addresses to be notified when a run begins. If not
	// specified on job creation, reset, or update, the list is empty, and
	// notifications are not sent.
	OnStart []string `json:"on_start,omitempty"`
	// A list of email addresses to be notified when a run successfully
	// completes. A run is considered to have completed successfully if it ends
	// with a `TERMINATED` `life_cycle_state` and a `SUCCESSFUL` result_state.
	// If not specified on job creation, reset, or update, the list is empty,
	// and notifications are not sent.
	OnSuccess []string `json:"on_success,omitempty"`
}

type JobSettings

type JobSettings struct {
	// An optional continuous property for this job. The continuous property
	// will ensure that there is always one run executing. Only one of
	// `schedule` and `continuous` can be used.
	Continuous *Continuous `json:"continuous,omitempty"`
	// An optional set of email addresses that is notified when runs of this job
	// begin or complete as well as when this job is deleted. The default
	// behavior is to not send any emails.
	EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"`
	// Used to tell what is the format of the job. This field is ignored in
	// Create/Update/Reset calls. When using the Jobs API 2.1 this value is
	// always set to `"MULTI_TASK"`.
	Format JobSettingsFormat `json:"format,omitempty"`
	// An optional specification for a remote repository containing the
	// notebooks used by this job's notebook tasks.
	GitSource *GitSource `json:"git_source,omitempty"`
	// A list of job cluster specifications that can be shared and reused by
	// tasks of this job. Libraries cannot be declared in a shared job cluster.
	// You must declare dependent libraries in task settings.
	JobClusters []JobCluster `json:"job_clusters,omitempty"`
	// An optional maximum allowed number of concurrent runs of the job.
	//
	// Set this value if you want to be able to execute multiple runs of the
	// same job concurrently. This is useful for example if you trigger your job
	// on a frequent schedule and want to allow consecutive runs to overlap with
	// each other, or if you want to trigger multiple runs which differ by their
	// input parameters.
	//
	// This setting affects only new runs. For example, suppose the job’s
	// concurrency is 4 and there are 4 concurrent active runs. Then setting the
	// concurrency to 3 won’t kill any of the active runs. However, from then
	// on, new runs are skipped unless there are fewer than 3 active runs.
	//
	// This value cannot exceed 1000\. Setting this value to 0 causes all new
	// runs to be skipped. The default behavior is to allow only 1 concurrent
	// run.
	MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"`
	// An optional name for the job.
	Name string `json:"name,omitempty"`
	// An optional periodic schedule for this job. The default behavior is that
	// the job only runs when triggered by clicking “Run Now” in the Jobs UI
	// or sending an API request to `runNow`.
	Schedule *CronSchedule `json:"schedule,omitempty"`
	// A map of tags associated with the job. These are forwarded to the cluster
	// as cluster tags for jobs clusters, and are subject to the same
	// limitations as cluster tags. A maximum of 25 tags can be added to the
	// job.
	Tags map[string]string `json:"tags,omitempty"`
	// A list of task specifications to be executed by this job.
	Tasks []JobTaskSettings `json:"tasks,omitempty"`
	// An optional timeout applied to each run of this job. The default behavior
	// is to have no timeout.
	TimeoutSeconds int `json:"timeout_seconds,omitempty"`
	// Trigger settings for the job. Can be used to trigger a run when new files
	// arrive in an external location. The default behavior is that the job runs
	// only when triggered by clicking “Run Now” in the Jobs UI or sending
	// an API request to `runNow`.
	Trigger *TriggerSettings `json:"trigger,omitempty"`
	// A collection of system notification IDs to notify when the run begins or
	// completes. The default behavior is to not send any system notifications.
	WebhookNotifications *JobWebhookNotifications `json:"webhook_notifications,omitempty"`
}

type JobSettingsFormat

type JobSettingsFormat string

Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`.

const JobSettingsFormatMultiTask JobSettingsFormat = `MULTI_TASK`
const JobSettingsFormatSingleTask JobSettingsFormat = `SINGLE_TASK`

func (*JobSettingsFormat) Set added in v0.2.0

func (jsf *JobSettingsFormat) Set(v string) error

Set raw string value and validate it against allowed values

func (*JobSettingsFormat) String added in v0.2.0

func (jsf *JobSettingsFormat) String() string

String representation for fmt.Print

func (*JobSettingsFormat) Type added in v0.2.0

func (jsf *JobSettingsFormat) Type() string

Type always returns JobSettingsFormat to satisfy [pflag.Value] interface

type JobTaskSettings

type JobTaskSettings struct {
	// If dbt_task, indicates that this must execute a dbt task. It requires
	// both Databricks SQL and the ability to use a serverless or a pro SQL
	// warehouse.
	DbtTask *DbtTask `json:"dbt_task,omitempty"`
	// An optional array of objects specifying the dependency graph of the task.
	// All tasks specified in this field must complete successfully before
	// executing this task. The key is `task_key`, and the value is the name
	// assigned to the dependent task. This field is required when a job
	// consists of more than one task.
	DependsOn []TaskDependenciesItem `json:"depends_on,omitempty"`
	// An optional description for this task. The maximum length is 4096 bytes.
	Description string `json:"description,omitempty"`
	// An optional set of email addresses that is notified when runs of this
	// task begin or complete as well as when this task is deleted. The default
	// behavior is to not send any emails.
	EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"`
	// If existing_cluster_id, the ID of an existing cluster that is used for
	// all runs of this task. When running tasks on an existing cluster, you may
	// need to manually restart the cluster if it stops responding. We suggest
	// running jobs on new clusters for greater reliability.
	ExistingClusterId string `json:"existing_cluster_id,omitempty"`
	// If job_cluster_key, this task is executed reusing the cluster specified
	// in `job.settings.job_clusters`.
	JobClusterKey string `json:"job_cluster_key,omitempty"`
	// An optional list of libraries to be installed on the cluster that
	// executes the task. The default value is an empty list.
	Libraries []libraries.Library `json:"libraries,omitempty"`
	// An optional maximum number of times to retry an unsuccessful run. A run
	// is considered to be unsuccessful if it completes with the `FAILED`
	// result_state or `INTERNAL_ERROR` `life_cycle_state`. The value -1 means
	// to retry indefinitely and the value 0 means to never retry. The default
	// behavior is to never retry.
	MaxRetries int `json:"max_retries,omitempty"`
	// An optional minimal interval in milliseconds between the start of the
	// failed run and the subsequent retry run. The default behavior is that
	// unsuccessful runs are immediately retried.
	MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
	// If new_cluster, a description of a cluster that is created for only for
	// this task.
	NewCluster *clusters.BaseClusterInfo `json:"new_cluster,omitempty"`
	// If notebook_task, indicates that this task must run a notebook. This
	// field may not be specified in conjunction with spark_jar_task.
	NotebookTask *NotebookTask `json:"notebook_task,omitempty"`
	// If pipeline_task, indicates that this task must execute a Pipeline.
	PipelineTask *PipelineTask `json:"pipeline_task,omitempty"`
	// If python_wheel_task, indicates that this job must execute a PythonWheel.
	PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"`
	// An optional policy to specify whether to retry a task when it times out.
	// The default behavior is to not retry on timeout.
	RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
	// If spark_jar_task, indicates that this task must run a JAR.
	SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"`
	// If spark_python_task, indicates that this task must run a Python file.
	SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"`
	// If spark_submit_task, indicates that this task must be launched by the
	// spark submit script. This task can run only on new clusters.
	SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"`
	// If sql_task, indicates that this job must execute a SQL task.
	SqlTask *SqlTask `json:"sql_task,omitempty"`
	// A unique name for the task. This field is used to refer to this task from
	// other tasks. This field is required and must be unique within its parent
	// job. On Update or Reset, this field is used to reference the tasks to be
	// updated or reset. The maximum length is 100 characters.
	TaskKey string `json:"task_key"`
	// An optional timeout applied to each run of this job task. The default
	// behavior is to have no timeout.
	TimeoutSeconds int `json:"timeout_seconds,omitempty"`
}

type JobWebhookNotifications added in v0.2.0

type JobWebhookNotifications struct {
	// An optional list of system notification IDs to call when the run fails. A
	// maximum of 3 destinations can be specified for the `on_failure` property.
	OnFailure []JobWebhookNotificationsOnFailureItem `json:"on_failure,omitempty"`
	// An optional list of system notification IDs to call when the run starts.
	// A maximum of 3 destinations can be specified for the `on_start` property.
	OnStart []JobWebhookNotificationsOnStartItem `json:"on_start,omitempty"`
	// An optional list of system notification IDs to call when the run
	// completes successfully. A maximum of 3 destinations can be specified for
	// the `on_success` property.
	OnSuccess []JobWebhookNotificationsOnSuccessItem `json:"on_success,omitempty"`
}

type JobWebhookNotificationsOnFailureItem added in v0.2.0

type JobWebhookNotificationsOnFailureItem struct {
	Id string `json:"id,omitempty"`
}

type JobWebhookNotificationsOnStartItem added in v0.2.0

type JobWebhookNotificationsOnStartItem struct {
	Id string `json:"id,omitempty"`
}

type JobWebhookNotificationsOnSuccessItem added in v0.2.0

type JobWebhookNotificationsOnSuccessItem struct {
	Id string `json:"id,omitempty"`
}

type JobsAPI

type JobsAPI struct {
	// contains filtered or unexported fields
}

The Jobs API allows you to create, edit, and delete jobs.

You can use a Databricks job to run a data processing or data analysis task in a Databricks cluster with scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, Scala, Spark submit, and Java applications.

You should never hard code secrets or store them in plain text. Use the :service:secrets to manage secrets in the Databricks CLI. Use the Secrets utility to reference secrets in notebooks and jobs.

func NewJobs

func NewJobs(client *client.DatabricksClient) *JobsAPI

func (*JobsAPI) BaseJobSettingsNameToJobIdMap added in v0.3.0

func (a *JobsAPI) BaseJobSettingsNameToJobIdMap(ctx context.Context, request List) (map[string]int64, error)

BaseJobSettingsNameToJobIdMap calls JobsAPI.ListAll and creates a map of results with BaseJob.Settings.Name as key and BaseJob.JobId as value.

Returns an error if there's more than one BaseJob with the same .Settings.Name.

Note: All BaseJob instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*JobsAPI) CancelAllRuns

func (a *JobsAPI) CancelAllRuns(ctx context.Context, request CancelAllRuns) error

Cancel all runs of a job.

Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't prevent new runs from being started.

func (*JobsAPI) CancelAllRunsByJobId

func (a *JobsAPI) CancelAllRunsByJobId(ctx context.Context, jobId int64) error

Cancel all runs of a job.

Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't prevent new runs from being started.

func (*JobsAPI) CancelRun

func (a *JobsAPI) CancelRun(ctx context.Context, request CancelRun) error

Cancel a job run.

Cancels a job run. The run is canceled asynchronously, so it may still be running when this request completes.

func (*JobsAPI) CancelRunAndWait

func (a *JobsAPI) CancelRunAndWait(ctx context.Context, cancelRun CancelRun, options ...retries.Option[Run]) (*Run, error)

Calls JobsAPI.CancelRun and waits to reach TERMINATED or SKIPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.

func (*JobsAPI) CancelRunByRunId

func (a *JobsAPI) CancelRunByRunId(ctx context.Context, runId int64) error

Cancel a job run.

Cancels a job run. The run is canceled asynchronously, so it may still be running when this request completes.

func (*JobsAPI) CancelRunByRunIdAndWait

func (a *JobsAPI) CancelRunByRunIdAndWait(ctx context.Context, runId int64, options ...retries.Option[Run]) (*Run, error)

func (*JobsAPI) Create

func (a *JobsAPI) Create(ctx context.Context, request CreateJob) (*CreateResponse, error)

Create a new job.

Create a new job.

func (*JobsAPI) Delete

func (a *JobsAPI) Delete(ctx context.Context, request DeleteJob) error

Delete a job.

Deletes a job.

func (*JobsAPI) DeleteByJobId

func (a *JobsAPI) DeleteByJobId(ctx context.Context, jobId int64) error

Delete a job.

Deletes a job.

func (*JobsAPI) DeleteRun

func (a *JobsAPI) DeleteRun(ctx context.Context, request DeleteRun) error

Delete a job run.

Deletes a non-active run. Returns an error if the run is active.

func (*JobsAPI) DeleteRunByRunId

func (a *JobsAPI) DeleteRunByRunId(ctx context.Context, runId int64) error

Delete a job run.

Deletes a non-active run. Returns an error if the run is active.

func (*JobsAPI) ExportRun

func (a *JobsAPI) ExportRun(ctx context.Context, request ExportRun) (*ExportRunOutput, error)

Export and retrieve a job run.

Export and retrieve the job run task.

func (*JobsAPI) Get

func (a *JobsAPI) Get(ctx context.Context, request Get) (*Job, error)

Get a single job.

Retrieves the details for a single job.

func (*JobsAPI) GetByJobId

func (a *JobsAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, error)

Get a single job.

Retrieves the details for a single job.

func (*JobsAPI) GetBySettingsName

func (a *JobsAPI) GetBySettingsName(ctx context.Context, name string) (*BaseJob, error)

GetBySettingsName calls JobsAPI.BaseJobSettingsNameToJobIdMap and returns a single BaseJob.

Returns an error if there's more than one BaseJob with the same .Settings.Name.

Note: All BaseJob instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*JobsAPI) GetRun

func (a *JobsAPI) GetRun(ctx context.Context, request GetRun) (*Run, error)

Get a single job run.

Retrieve the metadata of a run.

func (*JobsAPI) GetRunAndWait

func (a *JobsAPI) GetRunAndWait(ctx context.Context, getRun GetRun, options ...retries.Option[Run]) (*Run, error)

Calls JobsAPI.GetRun and waits to reach TERMINATED or SKIPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.

func (*JobsAPI) GetRunOutput

func (a *JobsAPI) GetRunOutput(ctx context.Context, request GetRunOutput) (*RunOutput, error)

Get the output for a single run.

Retrieve the output and metadata of a single task run. When a notebook task returns a value through the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve that value. Databricks restricts this API to returning the first 5 MB of the output. To return a larger result, you can store job results in a cloud storage service.

This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after 60 days. If you to want to reference them beyond 60 days, you must save old run results before they expire.

func (*JobsAPI) GetRunOutputByRunId

func (a *JobsAPI) GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOutput, error)

Get the output for a single run.

Retrieve the output and metadata of a single task run. When a notebook task returns a value through the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve that value. Databricks restricts this API to returning the first 5 MB of the output. To return a larger result, you can store job results in a cloud storage service.

This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after 60 days. If you to want to reference them beyond 60 days, you must save old run results before they expire.

func (*JobsAPI) Impl

func (a *JobsAPI) Impl() JobsService

Impl returns low-level Jobs API implementation

func (*JobsAPI) ListAll

func (a *JobsAPI) ListAll(ctx context.Context, request List) ([]BaseJob, error)

List all jobs.

Retrieves a list of jobs.

This method is generated by Databricks SDK Code Generator.

func (*JobsAPI) ListRunsAll

func (a *JobsAPI) ListRunsAll(ctx context.Context, request ListRuns) ([]BaseRun, error)

List runs for a job.

List runs in descending order by start time.

This method is generated by Databricks SDK Code Generator.

func (*JobsAPI) RepairRun

func (a *JobsAPI) RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error)

Repair a job run.

Re-run one or more tasks. Tasks are re-run as part of the original job run. They use the current job and task settings, and can be viewed in the history for the original job run.

func (*JobsAPI) RepairRunAndWait

func (a *JobsAPI) RepairRunAndWait(ctx context.Context, repairRun RepairRun, options ...retries.Option[Run]) (*Run, error)

Calls JobsAPI.RepairRun and waits to reach TERMINATED or SKIPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.

func (*JobsAPI) Reset

func (a *JobsAPI) Reset(ctx context.Context, request ResetJob) error

Overwrites all settings for a job.

Overwrites all the settings for a specific job. Use the Update endpoint to update job settings partially.

func (*JobsAPI) RunNow

func (a *JobsAPI) RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error)

Trigger a new job run.

Run a job and return the `run_id` of the triggered run.

func (*JobsAPI) RunNowAndWait

func (a *JobsAPI) RunNowAndWait(ctx context.Context, runNow RunNow, options ...retries.Option[Run]) (*Run, error)

Calls JobsAPI.RunNow and waits to reach TERMINATED or SKIPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.

func (*JobsAPI) Submit

func (a *JobsAPI) Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error)

Create and trigger a one-time run.

Submit a one-time run. This endpoint allows you to submit a workload directly without creating a job. Runs submitted using this endpoint don’t display in the UI. Use the `jobs/runs/get` API to check the run state after the job is submitted.

func (*JobsAPI) SubmitAndWait

func (a *JobsAPI) SubmitAndWait(ctx context.Context, submitRun SubmitRun, options ...retries.Option[Run]) (*Run, error)

Calls JobsAPI.Submit and waits to reach TERMINATED or SKIPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.

func (*JobsAPI) Update

func (a *JobsAPI) Update(ctx context.Context, request UpdateJob) error

Partially updates a job.

Add, update, or remove specific settings of an existing job. Use the ResetJob to overwrite all job settings.

func (*JobsAPI) WithImpl

func (a *JobsAPI) WithImpl(impl JobsService) *JobsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type JobsService

type JobsService interface {

	// Cancel all runs of a job.
	//
	// Cancels all active runs of a job. The runs are canceled asynchronously,
	// so it doesn't prevent new runs from being started.
	CancelAllRuns(ctx context.Context, request CancelAllRuns) error

	// Cancel a job run.
	//
	// Cancels a job run. The run is canceled asynchronously, so it may still be
	// running when this request completes.
	CancelRun(ctx context.Context, request CancelRun) error

	// Create a new job.
	//
	// Create a new job.
	Create(ctx context.Context, request CreateJob) (*CreateResponse, error)

	// Delete a job.
	//
	// Deletes a job.
	Delete(ctx context.Context, request DeleteJob) error

	// Delete a job run.
	//
	// Deletes a non-active run. Returns an error if the run is active.
	DeleteRun(ctx context.Context, request DeleteRun) error

	// Export and retrieve a job run.
	//
	// Export and retrieve the job run task.
	ExportRun(ctx context.Context, request ExportRun) (*ExportRunOutput, error)

	// Get a single job.
	//
	// Retrieves the details for a single job.
	Get(ctx context.Context, request Get) (*Job, error)

	// Get a single job run.
	//
	// Retrieve the metadata of a run.
	GetRun(ctx context.Context, request GetRun) (*Run, error)

	// Get the output for a single run.
	//
	// Retrieve the output and metadata of a single task run. When a notebook
	// task returns a value through the `dbutils.notebook.exit()` call, you can
	// use this endpoint to retrieve that value. Databricks restricts this API
	// to returning the first 5 MB of the output. To return a larger result, you
	// can store job results in a cloud storage service.
	//
	// This endpoint validates that the __run_id__ parameter is valid and
	// returns an HTTP status code 400 if the __run_id__ parameter is invalid.
	// Runs are automatically removed after 60 days. If you to want to reference
	// them beyond 60 days, you must save old run results before they expire.
	GetRunOutput(ctx context.Context, request GetRunOutput) (*RunOutput, error)

	// List all jobs.
	//
	// Retrieves a list of jobs.
	//
	// Use ListAll() to get all BaseJob instances, which will iterate over every result page.
	List(ctx context.Context, request List) (*ListJobsResponse, error)

	// List runs for a job.
	//
	// List runs in descending order by start time.
	//
	// Use ListRunsAll() to get all BaseRun instances, which will iterate over every result page.
	ListRuns(ctx context.Context, request ListRuns) (*ListRunsResponse, error)

	// Repair a job run.
	//
	// Re-run one or more tasks. Tasks are re-run as part of the original job
	// run. They use the current job and task settings, and can be viewed in the
	// history for the original job run.
	RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error)

	// Overwrites all settings for a job.
	//
	// Overwrites all the settings for a specific job. Use the Update endpoint
	// to update job settings partially.
	Reset(ctx context.Context, request ResetJob) error

	// Trigger a new job run.
	//
	// Run a job and return the `run_id` of the triggered run.
	RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error)

	// Create and trigger a one-time run.
	//
	// Submit a one-time run. This endpoint allows you to submit a workload
	// directly without creating a job. Runs submitted using this endpoint
	// don’t display in the UI. Use the `jobs/runs/get` API to check the run
	// state after the job is submitted.
	Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error)

	// Partially updates a job.
	//
	// Add, update, or remove specific settings of an existing job. Use the
	// ResetJob to overwrite all job settings.
	Update(ctx context.Context, request UpdateJob) error
}

The Jobs API allows you to create, edit, and delete jobs.

You can use a Databricks job to run a data processing or data analysis task in a Databricks cluster with scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, Scala, Spark submit, and Java applications.

You should never hard code secrets or store them in plain text. Use the :service:secrets to manage secrets in the Databricks CLI. Use the Secrets utility to reference secrets in notebooks and jobs.

type List

type List struct {
	// Whether to include task and cluster details in the response.
	ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"`
	// The number of jobs to return. This value must be greater than 0 and less
	// or equal to 25. The default value is 20.
	Limit int `json:"-" url:"limit,omitempty"`
	// A filter on the list based on the exact (case insensitive) job name.
	Name string `json:"-" url:"name,omitempty"`
	// The offset of the first job to return, relative to the most recently
	// created job.
	Offset int `json:"-" url:"offset,omitempty"`
}

List all jobs

type ListJobsResponse

type ListJobsResponse struct {
	HasMore bool `json:"has_more,omitempty"`
	// The list of jobs.
	Jobs []BaseJob `json:"jobs,omitempty"`
}

type ListRuns

type ListRuns struct {
	// If active_only is `true`, only active runs are included in the results;
	// otherwise, lists both active and completed runs. An active run is a run
	// in the `PENDING`, `RUNNING`, or `TERMINATING`. This field cannot be
	// `true` when completed_only is `true`.
	ActiveOnly bool `json:"-" url:"active_only,omitempty"`
	// If completed_only is `true`, only completed runs are included in the
	// results; otherwise, lists both active and completed runs. This field
	// cannot be `true` when active_only is `true`.
	CompletedOnly bool `json:"-" url:"completed_only,omitempty"`
	// Whether to include task and cluster details in the response.
	ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"`
	// The job for which to list runs. If omitted, the Jobs service lists runs
	// from all jobs.
	JobId int64 `json:"-" url:"job_id,omitempty"`
	// The number of runs to return. This value must be greater than 0 and less
	// than 25. The default value is 25. If a request specifies a limit of 0,
	// the service instead uses the maximum limit.
	Limit int `json:"-" url:"limit,omitempty"`
	// The offset of the first run to return, relative to the most recent run.
	Offset int `json:"-" url:"offset,omitempty"`
	// The type of runs to return. For a description of run types, see
	// :method:jobs/getRun.
	RunType ListRunsRunType `json:"-" url:"run_type,omitempty"`
	// Show runs that started _at or after_ this value. The value must be a UTC
	// timestamp in milliseconds. Can be combined with _start_time_to_ to filter
	// by a time range.
	StartTimeFrom int `json:"-" url:"start_time_from,omitempty"`
	// Show runs that started _at or before_ this value. The value must be a UTC
	// timestamp in milliseconds. Can be combined with _start_time_from_ to
	// filter by a time range.
	StartTimeTo int `json:"-" url:"start_time_to,omitempty"`
}

List runs for a job

type ListRunsResponse

type ListRunsResponse struct {
	// If true, additional runs matching the provided filter are available for
	// listing.
	HasMore bool `json:"has_more,omitempty"`
	// A list of runs, from most recently started to least.
	Runs []BaseRun `json:"runs,omitempty"`
}

type ListRunsRunType

type ListRunsRunType string

This describes an enum

const ListRunsRunTypeJobRun ListRunsRunType = `JOB_RUN`

Normal job run. A run created with :method:jobs/runNow.

const ListRunsRunTypeSubmitRun ListRunsRunType = `SUBMIT_RUN`

Submit run. A run created with :method:jobs/submit.

const ListRunsRunTypeWorkflowRun ListRunsRunType = `WORKFLOW_RUN`

Workflow run. A run created with [dbutils.notebook.run](/dev-tools/databricks-utils.html#dbutils-workflow).

func (*ListRunsRunType) Set added in v0.2.0

func (lrrt *ListRunsRunType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ListRunsRunType) String added in v0.2.0

func (lrrt *ListRunsRunType) String() string

String representation for fmt.Print

func (*ListRunsRunType) Type added in v0.2.0

func (lrrt *ListRunsRunType) Type() string

Type always returns ListRunsRunType to satisfy [pflag.Value] interface

type NotebookOutput

type NotebookOutput struct {
	// The value passed to
	// [dbutils.notebook.exit()](/notebooks/notebook-workflows.html#notebook-workflows-exit).
	// Databricks restricts this API to return the first 5 MB of the value. For
	// a larger result, your job can store the results in a cloud storage
	// service. This field is absent if `dbutils.notebook.exit()` was never
	// called.
	Result string `json:"result,omitempty"`
	// Whether or not the result was truncated.
	Truncated bool `json:"truncated,omitempty"`
}

type NotebookTask

type NotebookTask struct {
	// Base parameters to be used for each run of this job. If the run is
	// initiated by a call to :method:jobs/runNow with parameters specified, the
	// two parameters maps are merged. If the same key is specified in
	// `base_parameters` and in `run-now`, the value from `run-now` is used.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// If the notebook takes a parameter that is not specified in the job’s
	// `base_parameters` or the `run-now` override parameters, the default value
	// from the notebook is used.
	//
	// Retrieve these parameters in a notebook using [dbutils.widgets.get].
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	// [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets
	BaseParameters map[string]string `json:"base_parameters,omitempty"`
	// The path of the notebook to be run in the Databricks workspace or remote
	// repository. For notebooks stored in the Databricks workspace, the path
	// must be absolute and begin with a slash. For notebooks stored in a remote
	// repository, the path must be relative. This field is required.
	NotebookPath string `json:"notebook_path"`
	// This describes an enum
	Source NotebookTaskSource `json:"source,omitempty"`
}

type NotebookTaskSource

type NotebookTaskSource string

This describes an enum

const NotebookTaskSourceGit NotebookTaskSource = `GIT`

Notebook is located in cloud Git provider.

const NotebookTaskSourceWorkspace NotebookTaskSource = `WORKSPACE`

Notebook is located in Databricks workspace.

func (*NotebookTaskSource) Set added in v0.2.0

func (nts *NotebookTaskSource) Set(v string) error

Set raw string value and validate it against allowed values

func (*NotebookTaskSource) String added in v0.2.0

func (nts *NotebookTaskSource) String() string

String representation for fmt.Print

func (*NotebookTaskSource) Type added in v0.2.0

func (nts *NotebookTaskSource) Type() string

Type always returns NotebookTaskSource to satisfy [pflag.Value] interface

type PipelineParams

type PipelineParams struct {
	// If true, triggers a full refresh on the delta live table.
	FullRefresh bool `json:"full_refresh,omitempty"`
}

type PipelineTask

type PipelineTask struct {
	// If true, a full refresh will be triggered on the delta live table.
	FullRefresh bool `json:"full_refresh,omitempty"`
	// The full name of the pipeline task to execute.
	PipelineId string `json:"pipeline_id,omitempty"`
}

type PythonWheelTask

type PythonWheelTask struct {
	// Named entry point to use, if it does not exist in the metadata of the
	// package it executes the function from the package directly using
	// `$packageName.$entryPoint()`
	EntryPoint string `json:"entry_point,omitempty"`
	// Command-line parameters passed to Python wheel task in the form of
	// `["--name=task", "--data=dbfs:/path/to/data.json"]`. Leave it empty if
	// `parameters` is not null.
	NamedParameters map[string]string `json:"named_parameters,omitempty"`
	// Name of the package to execute
	PackageName string `json:"package_name,omitempty"`
	// Command-line parameters passed to Python wheel task. Leave it empty if
	// `named_parameters` is not null.
	Parameters []string `json:"parameters,omitempty"`
}

type RepairHistoryItem

type RepairHistoryItem struct {
	// The end time of the (repaired) run.
	EndTime int64 `json:"end_time,omitempty"`
	// The ID of the repair. Only returned for the items that represent a repair
	// in `repair_history`.
	Id int64 `json:"id,omitempty"`
	// The start time of the (repaired) run.
	StartTime int64 `json:"start_time,omitempty"`
	// The result and lifecycle state of the run.
	State *RunState `json:"state,omitempty"`
	// The run IDs of the task runs that ran as part of this repair history
	// item.
	TaskRunIds []int64 `json:"task_run_ids,omitempty"`
	// The repair history item type. Indicates whether a run is the original run
	// or a repair run.
	Type RepairHistoryItemType `json:"type,omitempty"`
}

type RepairHistoryItemType

type RepairHistoryItemType string

The repair history item type. Indicates whether a run is the original run or a repair run.

const RepairHistoryItemTypeOriginal RepairHistoryItemType = `ORIGINAL`
const RepairHistoryItemTypeRepair RepairHistoryItemType = `REPAIR`

func (*RepairHistoryItemType) Set added in v0.2.0

func (rhit *RepairHistoryItemType) Set(v string) error

Set raw string value and validate it against allowed values

func (*RepairHistoryItemType) String added in v0.2.0

func (rhit *RepairHistoryItemType) String() string

String representation for fmt.Print

func (*RepairHistoryItemType) Type added in v0.2.0

func (rhit *RepairHistoryItemType) Type() string

Type always returns RepairHistoryItemType to satisfy [pflag.Value] interface

type RepairRun

type RepairRun struct {
	// An array of commands to execute for jobs with the dbt task, for example
	// `"dbt_commands": ["dbt deps", "dbt seed", "dbt run"]`
	DbtCommands []string `json:"dbt_commands,omitempty"`
	// A list of parameters for jobs with Spark JAR tasks, for example
	// `\"jar_params\": [\"john doe\", \"35\"]`. The parameters are used to
	// invoke the main function of the main class specified in the Spark JAR
	// task. If not specified upon `run-now`, it defaults to an empty list.
	// jar_params cannot be specified in conjunction with notebook_params. The
	// JSON representation of this field (for example `{\"jar_params\":[\"john
	// doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables](/jobs.html"#parameter-variables") to set
	// parameters containing information about job runs.
	JarParams []string `json:"jar_params,omitempty"`
	// The ID of the latest repair. This parameter is not required when
	// repairing a run for the first time, but must be provided on subsequent
	// requests to repair the same run.
	LatestRepairId int64 `json:"latest_repair_id,omitempty"`
	// A map from keys to values for jobs with notebook task, for example
	// `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The map
	// is passed to the notebook and is accessible through the
	// [dbutils.widgets.get] function.
	//
	// If not specified upon `run-now`, the triggered run uses the job’s base
	// parameters.
	//
	// notebook_params cannot be specified in conjunction with jar_params.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// The JSON representation of this field (for example
	// `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot
	// exceed 10,000 bytes.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	// [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html
	NotebookParams map[string]string `json:"notebook_params,omitempty"`

	PipelineParams *PipelineParams `json:"pipeline_params,omitempty"`
	// A map from keys to values for jobs with Python wheel task, for example
	// `"python_named_params": {"name": "task", "data":
	// "dbfs:/path/to/data.json"}`.
	PythonNamedParams map[string]string `json:"python_named_params,omitempty"`
	// A list of parameters for jobs with Python tasks, for example
	// `\"python_params\": [\"john doe\", \"35\"]`. The parameters are passed to
	// Python file as command-line parameters. If specified upon `run-now`, it
	// would overwrite the parameters specified in job setting. The JSON
	// representation of this field (for example `{\"python_params\":[\"john
	// doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// Important
	//
	// These parameters accept only Latin characters (ASCII character set).
	// Using non-ASCII characters returns an error. Examples of invalid,
	// non-ASCII characters are Chinese, Japanese kanjis, and emojis.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	PythonParams []string `json:"python_params,omitempty"`
	// If true, repair all failed tasks. Only one of rerun_tasks or
	// rerun_all_failed_tasks can be used.
	RerunAllFailedTasks bool `json:"rerun_all_failed_tasks,omitempty"`
	// The task keys of the task runs to repair.
	RerunTasks []string `json:"rerun_tasks,omitempty"`
	// The job run ID of the run to repair. The run must not be in progress.
	RunId int64 `json:"run_id"`
	// A list of parameters for jobs with spark submit task, for example
	// `\"spark_submit_params\": [\"--class\",
	// \"org.apache.spark.examples.SparkPi\"]`. The parameters are passed to
	// spark-submit script as command-line parameters. If specified upon
	// `run-now`, it would overwrite the parameters specified in job setting.
	// The JSON representation of this field (for example
	// `{\"python_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs
	//
	// Important
	//
	// These parameters accept only Latin characters (ASCII character set).
	// Using non-ASCII characters returns an error. Examples of invalid,
	// non-ASCII characters are Chinese, Japanese kanjis, and emojis.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	SparkSubmitParams []string `json:"spark_submit_params,omitempty"`
	// A map from keys to values for jobs with SQL task, for example
	// `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task
	// does not support custom parameters.
	SqlParams map[string]string `json:"sql_params,omitempty"`
}

type RepairRunResponse

type RepairRunResponse struct {
	// The ID of the repair.
	RepairId int64 `json:"repair_id,omitempty"`
}

type ResetJob

type ResetJob struct {
	// The canonical identifier of the job to reset. This field is required.
	JobId int64 `json:"job_id"`
	// The new settings of the job. These settings completely replace the old
	// settings.
	//
	// Changes to the field `JobSettings.timeout_seconds` are applied to active
	// runs. Changes to other fields are applied to future runs only.
	NewSettings JobSettings `json:"new_settings"`
}

type Run

type Run struct {
	// The sequence number of this run attempt for a triggered job run. The
	// initial attempt of a run has an attempt_number of 0\. If the initial run
	// attempt fails, and the job has a retry policy (`max_retries` \> 0),
	// subsequent runs are created with an `original_attempt_run_id` of the
	// original attempt’s ID and an incrementing `attempt_number`. Runs are
	// retried only until they succeed, and the maximum `attempt_number` is the
	// same as the `max_retries` value for the job.
	AttemptNumber int `json:"attempt_number,omitempty"`
	// The time in milliseconds it took to terminate the cluster and clean up
	// any associated artifacts. The duration of a task run is the sum of the
	// `setup_duration`, `execution_duration`, and the `cleanup_duration`. The
	// `cleanup_duration` field is set to 0 for multitask job runs. The total
	// duration of a multitask job run is the value of the `run_duration` field.
	CleanupDuration int64 `json:"cleanup_duration,omitempty"`
	// The cluster used for this run. If the run is specified to use a new
	// cluster, this field is set once the Jobs service has requested a cluster
	// for the run.
	ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"`
	// A snapshot of the job’s cluster specification when this run was
	// created.
	ClusterSpec *ClusterSpec `json:"cluster_spec,omitempty"`
	// The continuous trigger that triggered this run.
	Continuous *Continuous `json:"continuous,omitempty"`
	// The creator user name. This field won’t be included in the response if
	// the user has already been deleted.
	CreatorUserName string `json:"creator_user_name,omitempty"`
	// The time at which this run ended in epoch milliseconds (milliseconds
	// since 1/1/1970 UTC). This field is set to 0 if the job is still running.
	EndTime int64 `json:"end_time,omitempty"`
	// The time in milliseconds it took to execute the commands in the JAR or
	// notebook until they completed, failed, timed out, were cancelled, or
	// encountered an unexpected error. The duration of a task run is the sum of
	// the `setup_duration`, `execution_duration`, and the `cleanup_duration`.
	// The `execution_duration` field is set to 0 for multitask job runs. The
	// total duration of a multitask job run is the value of the `run_duration`
	// field.
	ExecutionDuration int64 `json:"execution_duration,omitempty"`
	// An optional specification for a remote repository containing the
	// notebooks used by this job's notebook tasks.
	GitSource *GitSource `json:"git_source,omitempty"`
	// A list of job cluster specifications that can be shared and reused by
	// tasks of this job. Libraries cannot be declared in a shared job cluster.
	// You must declare dependent libraries in task settings.
	JobClusters []JobCluster `json:"job_clusters,omitempty"`
	// The canonical identifier of the job that contains this run.
	JobId int64 `json:"job_id,omitempty"`
	// A unique identifier for this job run. This is set to the same value as
	// `run_id`.
	NumberInJob int64 `json:"number_in_job,omitempty"`
	// If this run is a retry of a prior run attempt, this field contains the
	// run_id of the original attempt; otherwise, it is the same as the run_id.
	OriginalAttemptRunId int64 `json:"original_attempt_run_id,omitempty"`
	// The parameters used for this run.
	OverridingParameters *RunParameters `json:"overriding_parameters,omitempty"`
	// The repair history of the run.
	RepairHistory []RepairHistoryItem `json:"repair_history,omitempty"`
	// The time in milliseconds it took the job run and all of its repairs to
	// finish.
	RunDuration int `json:"run_duration,omitempty"`
	// The canonical identifier of the run. This ID is unique across all runs of
	// all jobs.
	RunId int64 `json:"run_id,omitempty"`
	// An optional name for the run. The maximum allowed length is 4096 bytes in
	// UTF-8 encoding.
	RunName string `json:"run_name,omitempty"`
	// The URL to the detail page of the run.
	RunPageUrl string `json:"run_page_url,omitempty"`
	// This describes an enum
	RunType RunType `json:"run_type,omitempty"`
	// The cron schedule that triggered this run if it was triggered by the
	// periodic scheduler.
	Schedule *CronSchedule `json:"schedule,omitempty"`
	// The time in milliseconds it took to set up the cluster. For runs that run
	// on new clusters this is the cluster creation time, for runs that run on
	// existing clusters this time should be very short. The duration of a task
	// run is the sum of the `setup_duration`, `execution_duration`, and the
	// `cleanup_duration`. The `setup_duration` field is set to 0 for multitask
	// job runs. The total duration of a multitask job run is the value of the
	// `run_duration` field.
	SetupDuration int64 `json:"setup_duration,omitempty"`
	// The time at which this run was started in epoch milliseconds
	// (milliseconds since 1/1/1970 UTC). This may not be the time when the job
	// task starts executing, for example, if the job is scheduled to run on a
	// new cluster, this is the time the cluster creation call is issued.
	StartTime int64 `json:"start_time,omitempty"`
	// The result and lifecycle states of the run.
	State *RunState `json:"state,omitempty"`
	// The list of tasks performed by the run. Each task has its own `run_id`
	// which you can use to call `JobsGetOutput` to retrieve the run resutls.
	Tasks []RunTask `json:"tasks,omitempty"`
	// This describes an enum
	Trigger TriggerType `json:"trigger,omitempty"`
}

type RunLifeCycleState

type RunLifeCycleState string

This describes an enum

const RunLifeCycleStateBlocked RunLifeCycleState = `BLOCKED`

The run is blocked on an upstream dependency.

const RunLifeCycleStateInternalError RunLifeCycleState = `INTERNAL_ERROR`

An exceptional state that indicates a failure in the Jobs service, such as network failure over a long period. If a run on a new cluster ends in the `INTERNAL_ERROR` state, the Jobs service terminates the cluster as soon as possible. This state is terminal.

const RunLifeCycleStatePending RunLifeCycleState = `PENDING`

The run has been triggered. If there is not already an active run of the same job, the cluster and execution context are being prepared. If there is already an active run of the same job, the run immediately transitions into the `SKIPPED` state without preparing any resources.

const RunLifeCycleStateRunning RunLifeCycleState = `RUNNING`

The task of this run is being executed.

const RunLifeCycleStateSkipped RunLifeCycleState = `SKIPPED`

This run was aborted because a previous run of the same job was already active. This state is terminal.

const RunLifeCycleStateTerminated RunLifeCycleState = `TERMINATED`

The task of this run has completed, and the cluster and execution context have been cleaned up. This state is terminal.

const RunLifeCycleStateTerminating RunLifeCycleState = `TERMINATING`

The task of this run has completed, and the cluster and execution context are being cleaned up.

const RunLifeCycleStateWaitingForRetry RunLifeCycleState = `WAITING_FOR_RETRY`

The run is waiting for a retry.

func (*RunLifeCycleState) Set added in v0.2.0

func (rlcs *RunLifeCycleState) Set(v string) error

Set raw string value and validate it against allowed values

func (*RunLifeCycleState) String added in v0.2.0

func (rlcs *RunLifeCycleState) String() string

String representation for fmt.Print

func (*RunLifeCycleState) Type added in v0.2.0

func (rlcs *RunLifeCycleState) Type() string

Type always returns RunLifeCycleState to satisfy [pflag.Value] interface

type RunNow

type RunNow struct {
	// An array of commands to execute for jobs with the dbt task, for example
	// `"dbt_commands": ["dbt deps", "dbt seed", "dbt run"]`
	DbtCommands []string `json:"dbt_commands,omitempty"`
	// An optional token to guarantee the idempotency of job run requests. If a
	// run with the provided token already exists, the request does not create a
	// new run but returns the ID of the existing run instead. If a run with the
	// provided token is deleted, an error is returned.
	//
	// If you specify the idempotency token, upon failure you can retry until
	// the request succeeds. Databricks guarantees that exactly one run is
	// launched with that idempotency token.
	//
	// This token must have at most 64 characters.
	//
	// For more information, see [How to ensure idempotency for jobs].
	//
	// [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html
	IdempotencyToken string `json:"idempotency_token,omitempty"`
	// A list of parameters for jobs with Spark JAR tasks, for example
	// `\"jar_params\": [\"john doe\", \"35\"]`. The parameters are used to
	// invoke the main function of the main class specified in the Spark JAR
	// task. If not specified upon `run-now`, it defaults to an empty list.
	// jar_params cannot be specified in conjunction with notebook_params. The
	// JSON representation of this field (for example `{\"jar_params\":[\"john
	// doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables](/jobs.html"#parameter-variables") to set
	// parameters containing information about job runs.
	JarParams []string `json:"jar_params,omitempty"`
	// The ID of the job to be executed
	JobId int64 `json:"job_id"`
	// A map from keys to values for jobs with notebook task, for example
	// `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The map
	// is passed to the notebook and is accessible through the
	// [dbutils.widgets.get] function.
	//
	// If not specified upon `run-now`, the triggered run uses the job’s base
	// parameters.
	//
	// notebook_params cannot be specified in conjunction with jar_params.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// The JSON representation of this field (for example
	// `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot
	// exceed 10,000 bytes.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	// [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html
	NotebookParams map[string]string `json:"notebook_params,omitempty"`

	PipelineParams *PipelineParams `json:"pipeline_params,omitempty"`
	// A map from keys to values for jobs with Python wheel task, for example
	// `"python_named_params": {"name": "task", "data":
	// "dbfs:/path/to/data.json"}`.
	PythonNamedParams map[string]string `json:"python_named_params,omitempty"`
	// A list of parameters for jobs with Python tasks, for example
	// `\"python_params\": [\"john doe\", \"35\"]`. The parameters are passed to
	// Python file as command-line parameters. If specified upon `run-now`, it
	// would overwrite the parameters specified in job setting. The JSON
	// representation of this field (for example `{\"python_params\":[\"john
	// doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// Important
	//
	// These parameters accept only Latin characters (ASCII character set).
	// Using non-ASCII characters returns an error. Examples of invalid,
	// non-ASCII characters are Chinese, Japanese kanjis, and emojis.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	PythonParams []string `json:"python_params,omitempty"`
	// A list of parameters for jobs with spark submit task, for example
	// `\"spark_submit_params\": [\"--class\",
	// \"org.apache.spark.examples.SparkPi\"]`. The parameters are passed to
	// spark-submit script as command-line parameters. If specified upon
	// `run-now`, it would overwrite the parameters specified in job setting.
	// The JSON representation of this field (for example
	// `{\"python_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs
	//
	// Important
	//
	// These parameters accept only Latin characters (ASCII character set).
	// Using non-ASCII characters returns an error. Examples of invalid,
	// non-ASCII characters are Chinese, Japanese kanjis, and emojis.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	SparkSubmitParams []string `json:"spark_submit_params,omitempty"`
	// A map from keys to values for jobs with SQL task, for example
	// `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task
	// does not support custom parameters.
	SqlParams map[string]string `json:"sql_params,omitempty"`
}

type RunNowResponse

type RunNowResponse struct {
	// A unique identifier for this job run. This is set to the same value as
	// `run_id`.
	NumberInJob int64 `json:"number_in_job,omitempty"`
	// The globally unique ID of the newly triggered run.
	RunId int64 `json:"run_id,omitempty"`
}

type RunOutput

type RunOutput struct {
	// The output of a dbt task, if available.
	DbtOutput *DbtOutput `json:"dbt_output,omitempty"`
	// An error message indicating why a task failed or why output is not
	// available. The message is unstructured, and its exact format is subject
	// to change.
	Error string `json:"error,omitempty"`
	// If there was an error executing the run, this field contains any
	// available stack traces.
	ErrorTrace string `json:"error_trace,omitempty"`
	// The output from tasks that write to standard streams (stdout/stderr) such
	// as spark_jar_task, spark_python_task, python_wheel_task.
	//
	// It's not supported for the notebook_task, pipeline_task or
	// spark_submit_task.
	//
	// Databricks restricts this API to return the last 5 MB of these logs.
	Logs string `json:"logs,omitempty"`
	// Whether the logs are truncated.
	LogsTruncated bool `json:"logs_truncated,omitempty"`
	// All details of the run except for its output.
	Metadata *Run `json:"metadata,omitempty"`
	// The output of a notebook task, if available. A notebook task that
	// terminates (either successfully or with a failure) without calling
	// `dbutils.notebook.exit()` is considered to have an empty output. This
	// field is set but its result value is empty. <Databricks> restricts this
	// API to return the first 5 MB of the output. To return a larger result,
	// use the
	// [ClusterLogConf](/dev-tools/api/latest/clusters.html#clusterlogconf)
	// field to configure log storage for the job cluster.
	NotebookOutput *NotebookOutput `json:"notebook_output,omitempty"`
	// The output of a SQL task, if available.
	SqlOutput *SqlOutput `json:"sql_output,omitempty"`
}

type RunParameters

type RunParameters struct {
	// An array of commands to execute for jobs with the dbt task, for example
	// `"dbt_commands": ["dbt deps", "dbt seed", "dbt run"]`
	DbtCommands []string `json:"dbt_commands,omitempty"`
	// A list of parameters for jobs with Spark JAR tasks, for example
	// `\"jar_params\": [\"john doe\", \"35\"]`. The parameters are used to
	// invoke the main function of the main class specified in the Spark JAR
	// task. If not specified upon `run-now`, it defaults to an empty list.
	// jar_params cannot be specified in conjunction with notebook_params. The
	// JSON representation of this field (for example `{\"jar_params\":[\"john
	// doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables](/jobs.html"#parameter-variables") to set
	// parameters containing information about job runs.
	JarParams []string `json:"jar_params,omitempty"`
	// A map from keys to values for jobs with notebook task, for example
	// `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The map
	// is passed to the notebook and is accessible through the
	// [dbutils.widgets.get] function.
	//
	// If not specified upon `run-now`, the triggered run uses the job’s base
	// parameters.
	//
	// notebook_params cannot be specified in conjunction with jar_params.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// The JSON representation of this field (for example
	// `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot
	// exceed 10,000 bytes.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	// [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html
	NotebookParams map[string]string `json:"notebook_params,omitempty"`

	PipelineParams *PipelineParams `json:"pipeline_params,omitempty"`
	// A map from keys to values for jobs with Python wheel task, for example
	// `"python_named_params": {"name": "task", "data":
	// "dbfs:/path/to/data.json"}`.
	PythonNamedParams map[string]string `json:"python_named_params,omitempty"`
	// A list of parameters for jobs with Python tasks, for example
	// `\"python_params\": [\"john doe\", \"35\"]`. The parameters are passed to
	// Python file as command-line parameters. If specified upon `run-now`, it
	// would overwrite the parameters specified in job setting. The JSON
	// representation of this field (for example `{\"python_params\":[\"john
	// doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// Important
	//
	// These parameters accept only Latin characters (ASCII character set).
	// Using non-ASCII characters returns an error. Examples of invalid,
	// non-ASCII characters are Chinese, Japanese kanjis, and emojis.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	PythonParams []string `json:"python_params,omitempty"`
	// A list of parameters for jobs with spark submit task, for example
	// `\"spark_submit_params\": [\"--class\",
	// \"org.apache.spark.examples.SparkPi\"]`. The parameters are passed to
	// spark-submit script as command-line parameters. If specified upon
	// `run-now`, it would overwrite the parameters specified in job setting.
	// The JSON representation of this field (for example
	// `{\"python_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs
	//
	// Important
	//
	// These parameters accept only Latin characters (ASCII character set).
	// Using non-ASCII characters returns an error. Examples of invalid,
	// non-ASCII characters are Chinese, Japanese kanjis, and emojis.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	SparkSubmitParams []string `json:"spark_submit_params,omitempty"`
	// A map from keys to values for jobs with SQL task, for example
	// `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task
	// does not support custom parameters.
	SqlParams map[string]string `json:"sql_params,omitempty"`
}

type RunResultState

type RunResultState string

This describes an enum

const RunResultStateCanceled RunResultState = `CANCELED`

The run was canceled at user request.

const RunResultStateFailed RunResultState = `FAILED`

The task completed with an error.

const RunResultStateSuccess RunResultState = `SUCCESS`

The task completed successfully.

const RunResultStateTimedout RunResultState = `TIMEDOUT`

The run was stopped after reaching the timeout.

func (*RunResultState) Set added in v0.2.0

func (rrs *RunResultState) Set(v string) error

Set raw string value and validate it against allowed values

func (*RunResultState) String added in v0.2.0

func (rrs *RunResultState) String() string

String representation for fmt.Print

func (*RunResultState) Type added in v0.2.0

func (rrs *RunResultState) Type() string

Type always returns RunResultState to satisfy [pflag.Value] interface

type RunState

type RunState struct {
	// A description of a run’s current location in the run lifecycle. This
	// field is always available in the response.
	LifeCycleState RunLifeCycleState `json:"life_cycle_state,omitempty"`
	// This describes an enum
	ResultState RunResultState `json:"result_state,omitempty"`
	// A descriptive message for the current state. This field is unstructured,
	// and its exact format is subject to change.
	StateMessage string `json:"state_message,omitempty"`
	// Whether a run was canceled manually by a user or by the scheduler because
	// the run timed out.
	UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout,omitempty"`
}

The result and lifecycle state of the run.

type RunSubmitTaskSettings

type RunSubmitTaskSettings struct {
	// An optional array of objects specifying the dependency graph of the task.
	// All tasks specified in this field must complete successfully before
	// executing this task. The key is `task_key`, and the value is the name
	// assigned to the dependent task. This field is required when a job
	// consists of more than one task.
	DependsOn []TaskDependenciesItem `json:"depends_on,omitempty"`
	// If existing_cluster_id, the ID of an existing cluster that is used for
	// all runs of this task. When running tasks on an existing cluster, you may
	// need to manually restart the cluster if it stops responding. We suggest
	// running jobs on new clusters for greater reliability.
	ExistingClusterId string `json:"existing_cluster_id,omitempty"`
	// An optional list of libraries to be installed on the cluster that
	// executes the task. The default value is an empty list.
	Libraries []libraries.Library `json:"libraries,omitempty"`
	// If new_cluster, a description of a cluster that is created for each run.
	NewCluster *clusters.BaseClusterInfo `json:"new_cluster,omitempty"`
	// If notebook_task, indicates that this task must run a notebook. This
	// field may not be specified in conjunction with spark_jar_task.
	NotebookTask *NotebookTask `json:"notebook_task,omitempty"`
	// If pipeline_task, indicates that this task must execute a Pipeline.
	PipelineTask *PipelineTask `json:"pipeline_task,omitempty"`
	// If python_wheel_task, indicates that this job must execute a PythonWheel.
	PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"`
	// If spark_jar_task, indicates that this task must run a JAR.
	SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"`
	// If spark_python_task, indicates that this task must run a Python file.
	SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"`
	// If spark_submit_task, indicates that this task must be launched by the
	// spark submit script. This task can run only on new clusters.
	SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"`
	// A unique name for the task. This field is used to refer to this task from
	// other tasks. This field is required and must be unique within its parent
	// job. On Update or Reset, this field is used to reference the tasks to be
	// updated or reset. The maximum length is 100 characters.
	TaskKey string `json:"task_key"`
	// An optional timeout applied to each run of this job task. The default
	// behavior is to have no timeout.
	TimeoutSeconds int `json:"timeout_seconds,omitempty"`
}

type RunTask

type RunTask struct {
	// The sequence number of this run attempt for a triggered job run. The
	// initial attempt of a run has an attempt_number of 0\. If the initial run
	// attempt fails, and the job has a retry policy (`max_retries` \> 0),
	// subsequent runs are created with an `original_attempt_run_id` of the
	// original attempt’s ID and an incrementing `attempt_number`. Runs are
	// retried only until they succeed, and the maximum `attempt_number` is the
	// same as the `max_retries` value for the job.
	AttemptNumber int `json:"attempt_number,omitempty"`
	// The time in milliseconds it took to terminate the cluster and clean up
	// any associated artifacts. The duration of a task run is the sum of the
	// `setup_duration`, `execution_duration`, and the `cleanup_duration`. The
	// `cleanup_duration` field is set to 0 for multitask job runs. The total
	// duration of a multitask job run is the value of the `run_duration` field.
	CleanupDuration int64 `json:"cleanup_duration,omitempty"`
	// The cluster used for this run. If the run is specified to use a new
	// cluster, this field is set once the Jobs service has requested a cluster
	// for the run.
	ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"`
	// If dbt_task, indicates that this must execute a dbt task. It requires
	// both Databricks SQL and the ability to use a serverless or a pro SQL
	// warehouse.
	DbtTask *DbtTask `json:"dbt_task,omitempty"`
	// An optional array of objects specifying the dependency graph of the task.
	// All tasks specified in this field must complete successfully before
	// executing this task. The key is `task_key`, and the value is the name
	// assigned to the dependent task. This field is required when a job
	// consists of more than one task.
	DependsOn []TaskDependenciesItem `json:"depends_on,omitempty"`
	// An optional description for this task. The maximum length is 4096 bytes.
	Description string `json:"description,omitempty"`
	// The time at which this run ended in epoch milliseconds (milliseconds
	// since 1/1/1970 UTC). This field is set to 0 if the job is still running.
	EndTime int64 `json:"end_time,omitempty"`
	// The time in milliseconds it took to execute the commands in the JAR or
	// notebook until they completed, failed, timed out, were cancelled, or
	// encountered an unexpected error. The duration of a task run is the sum of
	// the `setup_duration`, `execution_duration`, and the `cleanup_duration`.
	// The `execution_duration` field is set to 0 for multitask job runs. The
	// total duration of a multitask job run is the value of the `run_duration`
	// field.
	ExecutionDuration int64 `json:"execution_duration,omitempty"`
	// If existing_cluster_id, the ID of an existing cluster that is used for
	// all runs of this job. When running jobs on an existing cluster, you may
	// need to manually restart the cluster if it stops responding. We suggest
	// running jobs on new clusters for greater reliability.
	ExistingClusterId string `json:"existing_cluster_id,omitempty"`
	// An optional specification for a remote repository containing the
	// notebooks used by this job's notebook tasks.
	GitSource *GitSource `json:"git_source,omitempty"`
	// An optional list of libraries to be installed on the cluster that
	// executes the job. The default value is an empty list.
	Libraries []libraries.Library `json:"libraries,omitempty"`
	// If new_cluster, a description of a new cluster that is created only for
	// this task.
	NewCluster *clusters.BaseClusterInfo `json:"new_cluster,omitempty"`
	// If notebook_task, indicates that this job must run a notebook. This field
	// may not be specified in conjunction with spark_jar_task.
	NotebookTask *NotebookTask `json:"notebook_task,omitempty"`
	// If pipeline_task, indicates that this job must execute a Pipeline.
	PipelineTask *PipelineTask `json:"pipeline_task,omitempty"`
	// If python_wheel_task, indicates that this job must execute a PythonWheel.
	PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"`
	// The ID of the task run.
	RunId int64 `json:"run_id,omitempty"`
	// The time in milliseconds it took to set up the cluster. For runs that run
	// on new clusters this is the cluster creation time, for runs that run on
	// existing clusters this time should be very short. The duration of a task
	// run is the sum of the `setup_duration`, `execution_duration`, and the
	// `cleanup_duration`. The `setup_duration` field is set to 0 for multitask
	// job runs. The total duration of a multitask job run is the value of the
	// `run_duration` field.
	SetupDuration int64 `json:"setup_duration,omitempty"`
	// If spark_jar_task, indicates that this job must run a JAR.
	SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"`
	// If spark_python_task, indicates that this job must run a Python file.
	SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"`
	// If spark_submit_task, indicates that this task must be launched by the
	// spark submit script. This task can run only on new clusters
	SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"`
	// If sql_task, indicates that this job must execute a SQL.
	SqlTask *SqlTask `json:"sql_task,omitempty"`
	// The time at which this run was started in epoch milliseconds
	// (milliseconds since 1/1/1970 UTC). This may not be the time when the job
	// task starts executing, for example, if the job is scheduled to run on a
	// new cluster, this is the time the cluster creation call is issued.
	StartTime int64 `json:"start_time,omitempty"`
	// The result and lifecycle states of the run.
	State *RunState `json:"state,omitempty"`
	// A unique name for the task. This field is used to refer to this task from
	// other tasks. This field is required and must be unique within its parent
	// job. On Update or Reset, this field is used to reference the tasks to be
	// updated or reset. The maximum length is 100 characters.
	TaskKey string `json:"task_key,omitempty"`
}

type RunType

type RunType string

This describes an enum

const RunTypeJobRun RunType = `JOB_RUN`

Normal job run. A run created with :method:jobs/runNow.

const RunTypeSubmitRun RunType = `SUBMIT_RUN`

Submit run. A run created with :method:jobs/submit.

const RunTypeWorkflowRun RunType = `WORKFLOW_RUN`

Workflow run. A run created with [dbutils.notebook.run](/dev-tools/databricks-utils.html#dbutils-workflow).

func (*RunType) Set added in v0.2.0

func (rt *RunType) Set(v string) error

Set raw string value and validate it against allowed values

func (*RunType) String added in v0.2.0

func (rt *RunType) String() string

String representation for fmt.Print

func (*RunType) Type added in v0.2.0

func (rt *RunType) Type() string

Type always returns RunType to satisfy [pflag.Value] interface

type SparkJarTask

type SparkJarTask struct {
	// Deprecated since 04/2016\\. Provide a `jar` through the `libraries` field
	// instead. For an example, see :method:jobs/create.
	JarUri string `json:"jar_uri,omitempty"`
	// The full name of the class containing the main method to be executed.
	// This class must be contained in a JAR provided as a library.
	//
	// The code must use `SparkContext.getOrCreate` to obtain a Spark context;
	// otherwise, runs of the job fail.
	MainClassName string `json:"main_class_name,omitempty"`
	// Parameters passed to the main method.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	Parameters []string `json:"parameters,omitempty"`
}

type SparkPythonTask

type SparkPythonTask struct {
	// Command line parameters passed to the Python file.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	Parameters []string `json:"parameters,omitempty"`

	PythonFile string `json:"python_file"`
}

type SparkSubmitTask

type SparkSubmitTask struct {
	// Command-line parameters passed to spark submit.
	//
	// Use [Task parameter variables] to set parameters containing information
	// about job runs.
	//
	// [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables
	Parameters []string `json:"parameters,omitempty"`
}

type SqlAlertOutput

type SqlAlertOutput struct {
	// The state of the SQL alert.
	//
	// * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not
	// fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled
	// trigger conditions
	AlertState SqlAlertState `json:"alert_state,omitempty"`
	// The link to find the output results.
	OutputLink string `json:"output_link,omitempty"`
	// The text of the SQL query. Can Run permission of the SQL query associated
	// with the SQL alert is required to view this field.
	QueryText string `json:"query_text,omitempty"`
	// Information about SQL statements executed in the run.
	SqlStatements []SqlStatementOutput `json:"sql_statements,omitempty"`
	// The canonical identifier of the SQL warehouse.
	WarehouseId string `json:"warehouse_id,omitempty"`
}

type SqlAlertState added in v0.3.1

type SqlAlertState string

The state of the SQL alert.

* UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled trigger conditions

const SqlAlertStateOk SqlAlertState = `OK`
const SqlAlertStateTriggered SqlAlertState = `TRIGGERED`
const SqlAlertStateUnknown SqlAlertState = `UNKNOWN`

func (*SqlAlertState) Set added in v0.3.1

func (sas *SqlAlertState) Set(v string) error

Set raw string value and validate it against allowed values

func (*SqlAlertState) String added in v0.3.1

func (sas *SqlAlertState) String() string

String representation for fmt.Print

func (*SqlAlertState) Type added in v0.3.1

func (sas *SqlAlertState) Type() string

Type always returns SqlAlertState to satisfy [pflag.Value] interface

type SqlDashboardOutput

type SqlDashboardOutput struct {
	// The canonical identifier of the SQL warehouse.
	WarehouseId string `json:"warehouse_id,omitempty"`
	// Widgets executed in the run. Only SQL query based widgets are listed.
	Widgets *SqlDashboardWidgetOutput `json:"widgets,omitempty"`
}

type SqlDashboardWidgetOutput

type SqlDashboardWidgetOutput struct {
	// Time (in epoch milliseconds) when execution of the SQL widget ends.
	EndTime int64 `json:"end_time,omitempty"`
	// The information about the error when execution fails.
	Error *SqlOutputError `json:"error,omitempty"`
	// The link to find the output results.
	OutputLink string `json:"output_link,omitempty"`
	// Time (in epoch milliseconds) when execution of the SQL widget starts.
	StartTime int64 `json:"start_time,omitempty"`
	// The execution status of the SQL widget.
	Status SqlDashboardWidgetOutputStatus `json:"status,omitempty"`
	// The canonical identifier of the SQL widget.
	WidgetId string `json:"widget_id,omitempty"`
	// The title of the SQL widget.
	WidgetTitle string `json:"widget_title,omitempty"`
}

type SqlDashboardWidgetOutputStatus

type SqlDashboardWidgetOutputStatus string

The execution status of the SQL widget.

const SqlDashboardWidgetOutputStatusCancelled SqlDashboardWidgetOutputStatus = `CANCELLED`
const SqlDashboardWidgetOutputStatusFailed SqlDashboardWidgetOutputStatus = `FAILED`
const SqlDashboardWidgetOutputStatusPending SqlDashboardWidgetOutputStatus = `PENDING`
const SqlDashboardWidgetOutputStatusRunning SqlDashboardWidgetOutputStatus = `RUNNING`
const SqlDashboardWidgetOutputStatusSuccess SqlDashboardWidgetOutputStatus = `SUCCESS`

func (*SqlDashboardWidgetOutputStatus) Set added in v0.2.0

Set raw string value and validate it against allowed values

func (*SqlDashboardWidgetOutputStatus) String added in v0.2.0

func (sdwos *SqlDashboardWidgetOutputStatus) String() string

String representation for fmt.Print

func (*SqlDashboardWidgetOutputStatus) Type added in v0.2.0

func (sdwos *SqlDashboardWidgetOutputStatus) Type() string

Type always returns SqlDashboardWidgetOutputStatus to satisfy [pflag.Value] interface

type SqlOutput

type SqlOutput struct {
	// The output of a SQL alert task, if available.
	AlertOutput *SqlAlertOutput `json:"alert_output,omitempty"`
	// The output of a SQL dashboard task, if available.
	DashboardOutput *SqlDashboardOutput `json:"dashboard_output,omitempty"`
	// The output of a SQL query task, if available.
	QueryOutput *SqlQueryOutput `json:"query_output,omitempty"`
}

type SqlOutputError

type SqlOutputError struct {
	// The error message when execution fails.
	Message string `json:"message,omitempty"`
}

type SqlQueryOutput

type SqlQueryOutput struct {
	// The link to find the output results.
	OutputLink string `json:"output_link,omitempty"`
	// The text of the SQL query. Can Run permission of the SQL query is
	// required to view this field.
	QueryText string `json:"query_text,omitempty"`
	// Information about SQL statements executed in the run.
	SqlStatements []SqlStatementOutput `json:"sql_statements,omitempty"`
	// The canonical identifier of the SQL warehouse.
	WarehouseId string `json:"warehouse_id,omitempty"`
}

type SqlStatementOutput

type SqlStatementOutput struct {
	// A key that can be used to look up query details.
	LookupKey string `json:"lookup_key,omitempty"`
}

type SqlTask

type SqlTask struct {
	// If alert, indicates that this job must refresh a SQL alert.
	Alert *SqlTaskAlert `json:"alert,omitempty"`
	// If dashboard, indicates that this job must refresh a SQL dashboard.
	Dashboard *SqlTaskDashboard `json:"dashboard,omitempty"`
	// Parameters to be used for each run of this job. The SQL alert task does
	// not support custom parameters.
	Parameters map[string]string `json:"parameters,omitempty"`
	// If query, indicates that this job must execute a SQL query.
	Query *SqlTaskQuery `json:"query,omitempty"`
	// The canonical identifier of the SQL warehouse. Only serverless and pro
	// SQL warehouses are supported.
	WarehouseId string `json:"warehouse_id"`
}

type SqlTaskAlert

type SqlTaskAlert struct {
	// The canonical identifier of the SQL alert.
	AlertId string `json:"alert_id"`
	// If true, the alert notifications are not sent to subscribers.
	PauseSubscriptions bool `json:"pause_subscriptions,omitempty"`
	// If specified, alert notifications are sent to subscribers.
	Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"`
}

type SqlTaskDashboard

type SqlTaskDashboard struct {
	// Subject of the email sent to subscribers of this task.
	CustomSubject string `json:"custom_subject,omitempty"`
	// The canonical identifier of the SQL dashboard.
	DashboardId string `json:"dashboard_id"`
	// If true, the dashboard snapshot is not taken, and emails are not sent to
	// subscribers.
	PauseSubscriptions bool `json:"pause_subscriptions,omitempty"`
	// If specified, dashboard snapshots are sent to subscriptions.
	Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"`
}

type SqlTaskQuery

type SqlTaskQuery struct {
	// The canonical identifier of the SQL query.
	QueryId string `json:"query_id"`
}

type SqlTaskSubscription added in v0.3.1

type SqlTaskSubscription struct {
	// The canonical identifier of the destination to receive email
	// notification.
	DestinationId string `json:"destination_id,omitempty"`
	// The user name to receive the subscription email.
	UserName string `json:"user_name,omitempty"`
}

type SubmitRun

type SubmitRun struct {
	// List of permissions to set on the job.
	AccessControlList []permissions.AccessControlRequest `json:"access_control_list,omitempty"`
	// An optional specification for a remote repository containing the
	// notebooks used by this job's notebook tasks.
	GitSource *GitSource `json:"git_source,omitempty"`
	// An optional token that can be used to guarantee the idempotency of job
	// run requests. If a run with the provided token already exists, the
	// request does not create a new run but returns the ID of the existing run
	// instead. If a run with the provided token is deleted, an error is
	// returned.
	//
	// If you specify the idempotency token, upon failure you can retry until
	// the request succeeds. Databricks guarantees that exactly one run is
	// launched with that idempotency token.
	//
	// This token must have at most 64 characters.
	//
	// For more information, see [How to ensure idempotency for jobs].
	//
	// [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html
	IdempotencyToken string `json:"idempotency_token,omitempty"`
	// An optional name for the run. The default value is `Untitled`.
	RunName string `json:"run_name,omitempty"`

	Tasks []RunSubmitTaskSettings `json:"tasks,omitempty"`
	// An optional timeout applied to each run of this job. The default behavior
	// is to have no timeout.
	TimeoutSeconds int `json:"timeout_seconds,omitempty"`
	// A collection of system notification IDs to notify when the run begins or
	// completes. The default behavior is to not send any system notifications.
	WebhookNotifications *JobWebhookNotifications `json:"webhook_notifications,omitempty"`
}

type SubmitRunResponse

type SubmitRunResponse struct {
	// The canonical identifier for the newly submitted run.
	RunId int64 `json:"run_id,omitempty"`
}

type TaskDependenciesItem

type TaskDependenciesItem struct {
	TaskKey string `json:"task_key,omitempty"`
}

type TriggerEvaluation added in v0.4.0

type TriggerEvaluation struct {
	// Human-readable description of the the trigger evaluation result. Explains
	// why the trigger evaluation triggered or did not trigger a run, or failed.
	Description string `json:"description,omitempty"`
	// The ID of the run that was triggered by the trigger evaluation. Only
	// returned if a run was triggered.
	RunId int64 `json:"run_id,omitempty"`
	// Timestamp at which the trigger was evaluated.
	Timestamp int64 `json:"timestamp,omitempty"`
}

type TriggerHistory added in v0.4.0

type TriggerHistory struct {
	// The last time the trigger failed to evaluate.
	LastFailed *TriggerEvaluation `json:"last_failed,omitempty"`
	// The last time the trigger was evaluated but did not trigger a run.
	LastNotTriggered *TriggerEvaluation `json:"last_not_triggered,omitempty"`
	// The last time the run was triggered due to a file arrival.
	LastTriggered *TriggerEvaluation `json:"last_triggered,omitempty"`
}

type TriggerSettings added in v0.4.0

type TriggerSettings struct {
	// File arrival trigger settings.
	FileArrival *FileArrivalTriggerSettings `json:"file_arrival,omitempty"`
	// Whether this trigger is paused or not.
	PauseStatus TriggerSettingsPauseStatus `json:"pause_status,omitempty"`
}

type TriggerSettingsPauseStatus added in v0.4.0

type TriggerSettingsPauseStatus string

Whether this trigger is paused or not.

const TriggerSettingsPauseStatusPaused TriggerSettingsPauseStatus = `PAUSED`
const TriggerSettingsPauseStatusUnpaused TriggerSettingsPauseStatus = `UNPAUSED`

func (*TriggerSettingsPauseStatus) Set added in v0.4.0

Set raw string value and validate it against allowed values

func (*TriggerSettingsPauseStatus) String added in v0.4.0

func (tsps *TriggerSettingsPauseStatus) String() string

String representation for fmt.Print

func (*TriggerSettingsPauseStatus) Type added in v0.4.0

func (tsps *TriggerSettingsPauseStatus) Type() string

Type always returns TriggerSettingsPauseStatus to satisfy [pflag.Value] interface

type TriggerType

type TriggerType string

This describes an enum

const TriggerTypeFileArrival TriggerType = `FILE_ARRIVAL`

Indicates a run that is triggered by a file arrival.

const TriggerTypeOneTime TriggerType = `ONE_TIME`

One time triggers that fire a single run. This occurs you triggered a single run on demand through the UI or the API.

const TriggerTypePeriodic TriggerType = `PERIODIC`

Schedules that periodically trigger runs, such as a cron scheduler.

const TriggerTypeRetry TriggerType = `RETRY`

Indicates a run that is triggered as a retry of a previously failed run. This occurs when you request to re-run the job in case of failures.

func (*TriggerType) Set added in v0.2.0

func (tt *TriggerType) Set(v string) error

Set raw string value and validate it against allowed values

func (*TriggerType) String added in v0.2.0

func (tt *TriggerType) String() string

String representation for fmt.Print

func (*TriggerType) Type added in v0.2.0

func (tt *TriggerType) Type() string

Type always returns TriggerType to satisfy [pflag.Value] interface

type UpdateJob

type UpdateJob struct {
	// Remove top-level fields in the job settings. Removing nested fields is
	// not supported. This field is optional.
	FieldsToRemove []string `json:"fields_to_remove,omitempty"`
	// The canonical identifier of the job to update. This field is required.
	JobId int64 `json:"job_id"`
	// The new settings for the job. Any top-level fields specified in
	// `new_settings` are completely replaced. Partially updating nested fields
	// is not supported.
	//
	// Changes to the field `JobSettings.timeout_seconds` are applied to active
	// runs. Changes to other fields are applied to future runs only.
	NewSettings *JobSettings `json:"new_settings,omitempty"`
}

type ViewItem

type ViewItem struct {
	// Content of the view.
	Content string `json:"content,omitempty"`
	// Name of the view item. In the case of code view, it would be the
	// notebook’s name. In the case of dashboard view, it would be the
	// dashboard’s name.
	Name string `json:"name,omitempty"`
	// Type of the view item.
	Type ViewType `json:"type,omitempty"`
}

type ViewType

type ViewType string

This describes an enum

const ViewTypeDashboard ViewType = `DASHBOARD`

Dashboard view item.

const ViewTypeNotebook ViewType = `NOTEBOOK`

Notebook view item.

func (*ViewType) Set added in v0.2.0

func (vt *ViewType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ViewType) String added in v0.2.0

func (vt *ViewType) String() string

String representation for fmt.Print

func (*ViewType) Type added in v0.2.0

func (vt *ViewType) Type() string

Type always returns ViewType to satisfy [pflag.Value] interface

type ViewsToExport

type ViewsToExport string

This describes an enum

const ViewsToExportAll ViewsToExport = `ALL`

All views of the notebook.

const ViewsToExportCode ViewsToExport = `CODE`

Code view of the notebook.

const ViewsToExportDashboards ViewsToExport = `DASHBOARDS`

All dashboard views of the notebook.

func (*ViewsToExport) Set added in v0.2.0

func (vte *ViewsToExport) Set(v string) error

Set raw string value and validate it against allowed values

func (*ViewsToExport) String added in v0.2.0

func (vte *ViewsToExport) String() string

String representation for fmt.Print

func (*ViewsToExport) Type added in v0.2.0

func (vte *ViewsToExport) Type() string

Type always returns ViewsToExport to satisfy [pflag.Value] interface

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL