Documentation ¶
Overview ¶
The Jobs API allows you to create, edit, and delete jobs.
Index ¶
- type BaseJob
- type BaseRun
- type CancelAllRuns
- type CancelAllRunsResponse
- type CancelRun
- type CancelRunResponse
- type ClusterInstance
- type ClusterSpec
- type Condition
- type ConditionTask
- type ConditionTaskOp
- type Continuous
- type CreateJob
- type CreateResponse
- type CronSchedule
- type DbtOutput
- type DbtTask
- type DeleteJob
- type DeleteResponse
- type DeleteRun
- type DeleteRunResponse
- type ExportRunOutput
- type ExportRunRequest
- type FileArrivalTriggerConfiguration
- type ForEachStats
- type ForEachTask
- type ForEachTaskErrorMessageStats
- type ForEachTaskTaskRunStats
- type Format
- type GetJobPermissionLevelsRequest
- type GetJobPermissionLevelsResponse
- type GetJobPermissionsRequest
- type GetJobRequest
- type GetRunOutputRequest
- type GetRunRequest
- type GitProvider
- type GitSnapshot
- type GitSource
- type Job
- type JobAccessControlRequest
- type JobAccessControlResponse
- type JobCluster
- type JobCompute
- type JobDeployment
- type JobDeploymentKind
- type JobEditMode
- type JobEmailNotifications
- type JobNotificationSettings
- type JobParameter
- type JobParameterDefinition
- type JobPermission
- type JobPermissionLevel
- type JobPermissions
- type JobPermissionsDescription
- type JobPermissionsRequest
- type JobRunAs
- type JobSettings
- type JobSource
- type JobSourceDirtyState
- type JobsAPI
- func (a *JobsAPI) BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error)
- func (a *JobsAPI) CancelAllRuns(ctx context.Context, request CancelAllRuns) error
- func (a *JobsAPI) CancelRun(ctx context.Context, cancelRun CancelRun) (*WaitGetRunJobTerminatedOrSkipped[struct{}], error)
- func (a *JobsAPI) CancelRunAndWait(ctx context.Context, cancelRun CancelRun, options ...retries.Option[Run]) (*Run, error)deprecated
- func (a *JobsAPI) CancelRunByRunId(ctx context.Context, runId int64) error
- func (a *JobsAPI) CancelRunByRunIdAndWait(ctx context.Context, runId int64, options ...retries.Option[Run]) (*Run, error)
- func (a *JobsAPI) Create(ctx context.Context, request CreateJob) (*CreateResponse, error)
- func (a *JobsAPI) Delete(ctx context.Context, request DeleteJob) error
- func (a *JobsAPI) DeleteByJobId(ctx context.Context, jobId int64) error
- func (a *JobsAPI) DeleteRun(ctx context.Context, request DeleteRun) error
- func (a *JobsAPI) DeleteRunByRunId(ctx context.Context, runId int64) error
- func (a *JobsAPI) ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error)
- func (a *JobsAPI) Get(ctx context.Context, request GetJobRequest) (*Job, error)
- func (a *JobsAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, error)
- func (a *JobsAPI) GetBySettingsName(ctx context.Context, name string) (*BaseJob, error)
- func (a *JobsAPI) GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error)
- func (a *JobsAPI) GetPermissionLevelsByJobId(ctx context.Context, jobId string) (*GetJobPermissionLevelsResponse, error)
- func (a *JobsAPI) GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error)
- func (a *JobsAPI) GetPermissionsByJobId(ctx context.Context, jobId string) (*JobPermissions, error)
- func (a *JobsAPI) GetRun(ctx context.Context, request GetRunRequest) (*Run, error)
- func (a *JobsAPI) GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error)
- func (a *JobsAPI) GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOutput, error)
- func (a *JobsAPI) Impl() JobsService
- func (a *JobsAPI) List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob]
- func (a *JobsAPI) ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error)
- func (a *JobsAPI) ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun]
- func (a *JobsAPI) ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error)
- func (a *JobsAPI) RepairRun(ctx context.Context, repairRun RepairRun) (*WaitGetRunJobTerminatedOrSkipped[RepairRunResponse], error)
- func (a *JobsAPI) RepairRunAndWait(ctx context.Context, repairRun RepairRun, options ...retries.Option[Run]) (*Run, error)deprecated
- func (a *JobsAPI) Reset(ctx context.Context, request ResetJob) error
- func (a *JobsAPI) RunNow(ctx context.Context, runNow RunNow) (*WaitGetRunJobTerminatedOrSkipped[RunNowResponse], error)
- func (a *JobsAPI) RunNowAndWait(ctx context.Context, runNow RunNow, options ...retries.Option[Run]) (*Run, error)deprecated
- func (a *JobsAPI) SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error)
- func (a *JobsAPI) Submit(ctx context.Context, submitRun SubmitRun) (*WaitGetRunJobTerminatedOrSkipped[SubmitRunResponse], error)
- func (a *JobsAPI) SubmitAndWait(ctx context.Context, submitRun SubmitRun, options ...retries.Option[Run]) (*Run, error)deprecated
- func (a *JobsAPI) Update(ctx context.Context, request UpdateJob) error
- func (a *JobsAPI) UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error)
- func (a *JobsAPI) WaitGetRunJobTerminatedOrSkipped(ctx context.Context, runId int64, timeout time.Duration, callback func(*Run)) (*Run, error)
- func (a *JobsAPI) WithImpl(impl JobsService) JobsInterface
- type JobsHealthMetric
- type JobsHealthOperator
- type JobsHealthRule
- type JobsHealthRules
- type JobsInterface
- type JobsService
- type ListJobsRequest
- type ListJobsResponse
- type ListRunsRequest
- type ListRunsResponse
- type NotebookOutput
- type NotebookTask
- type PauseStatus
- type PipelineParams
- type PipelineTask
- type PythonWheelTask
- type QueueSettings
- type RepairHistoryItem
- type RepairHistoryItemType
- type RepairRun
- type RepairRunResponse
- type ResetJob
- type ResetResponse
- type ResolvedConditionTaskValues
- type ResolvedDbtTaskValues
- type ResolvedNotebookTaskValues
- type ResolvedParamPairValues
- type ResolvedPythonWheelTaskValues
- type ResolvedRunJobTaskValues
- type ResolvedStringParamsValues
- type ResolvedValues
- type Run
- type RunConditionTask
- type RunForEachTask
- type RunIf
- type RunJobOutput
- type RunJobTask
- type RunLifeCycleState
- type RunNow
- type RunNowResponse
- type RunOutput
- type RunParameters
- type RunResultState
- type RunState
- type RunTask
- type RunType
- type Source
- type SparkJarTask
- type SparkPythonTask
- type SparkSubmitTask
- type SqlAlertOutput
- type SqlAlertState
- type SqlDashboardOutput
- type SqlDashboardWidgetOutput
- type SqlDashboardWidgetOutputStatus
- type SqlOutput
- type SqlOutputError
- type SqlQueryOutput
- type SqlStatementOutput
- type SqlTask
- type SqlTaskAlert
- type SqlTaskDashboard
- type SqlTaskFile
- type SqlTaskQuery
- type SqlTaskSubscription
- type SubmitRun
- type SubmitRunResponse
- type SubmitTask
- type TableTriggerConfiguration
- type Task
- type TaskDependency
- type TaskEmailNotifications
- type TaskNotificationSettings
- type TriggerInfo
- type TriggerSettings
- type TriggerType
- type UpdateJob
- type UpdateResponse
- type ViewItem
- type ViewType
- type ViewsToExport
- type WaitGetRunJobTerminatedOrSkipped
- type Webhook
- type WebhookNotifications
Examples ¶
- JobsAPI.CancelAllRuns (JobsApiFullIntegration)
- JobsAPI.CancelRun (JobsApiFullIntegration)
- JobsAPI.Create (JobsApiFullIntegration)
- JobsAPI.ExportRun (JobsApiFullIntegration)
- JobsAPI.Get (JobsApiFullIntegration)
- JobsAPI.GetRunOutput (JobsApiFullIntegration)
- JobsAPI.ListAll (JobsApiFullIntegration)
- JobsAPI.ListRuns (JobsApiFullIntegration)
- JobsAPI.RepairRun (JobsApiFullIntegration)
- JobsAPI.Reset (JobsApiFullIntegration)
- JobsAPI.RunNow (JobsApiFullIntegration)
- JobsAPI.Submit (JobsApiFullIntegration)
- JobsAPI.Update (JobsApiFullIntegration)
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type BaseJob ¶ added in v0.3.0
type BaseJob struct { // The time at which this job was created in epoch milliseconds // (milliseconds since 1/1/1970 UTC). CreatedTime int64 `json:"created_time,omitempty"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName string `json:"creator_user_name,omitempty"` // The canonical identifier for this job. JobId int64 `json:"job_id,omitempty"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. Settings *JobSettings `json:"settings,omitempty"` ForceSendFields []string `json:"-"` }
func (BaseJob) MarshalJSON ¶ added in v0.23.0
func (*BaseJob) UnmarshalJSON ¶ added in v0.23.0
type BaseRun ¶ added in v0.3.0
type BaseRun struct { // The sequence number of this run attempt for a triggered job run. The // initial attempt of a run has an attempt_number of 0\. If the initial run // attempt fails, and the job has a retry policy (`max_retries` \> 0), // subsequent runs are created with an `original_attempt_run_id` of the // original attempt’s ID and an incrementing `attempt_number`. Runs are // retried only until they succeed, and the maximum `attempt_number` is the // same as the `max_retries` value for the job. AttemptNumber int `json:"attempt_number,omitempty"` // The time in milliseconds it took to terminate the cluster and clean up // any associated artifacts. The duration of a task run is the sum of the // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The // `cleanup_duration` field is set to 0 for multitask job runs. The total // duration of a multitask job run is the value of the `run_duration` field. CleanupDuration int64 `json:"cleanup_duration,omitempty"` // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"` // A snapshot of the job’s cluster specification when this run was // created. ClusterSpec *ClusterSpec `json:"cluster_spec,omitempty"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime int64 `json:"end_time,omitempty"` // The time in milliseconds it took to execute the commands in the JAR or // notebook until they completed, failed, timed out, were cancelled, or // encountered an unexpected error. The duration of a task run is the sum of // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. // The `execution_duration` field is set to 0 for multitask job runs. The // total duration of a multitask job run is the value of the `run_duration` // field. ExecutionDuration int64 `json:"execution_duration,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. // // If `git_source` is set, these tasks retrieve the file from the remote // repository by default. However, this behavior can be overridden by // setting `source` to `WORKSPACE` on the task. // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // The canonical identifier of the job that contains this run. JobId int64 `json:"job_id,omitempty"` // Job-level parameters used in the run JobParameters []JobParameter `json:"job_parameters,omitempty"` // A unique identifier for this job run. This is set to the same value as // `run_id`. NumberInJob int64 `json:"number_in_job,omitempty"` // If this run is a retry of a prior run attempt, this field contains the // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId int64 `json:"original_attempt_run_id,omitempty"` // The parameters used for this run. OverridingParameters *RunParameters `json:"overriding_parameters,omitempty"` // The time in milliseconds that the run has spent in the queue. QueueDuration int64 `json:"queue_duration,omitempty"` // The repair history of the run. RepairHistory []RepairHistoryItem `json:"repair_history,omitempty"` // The time in milliseconds it took the job run and all of its repairs to // finish. RunDuration int64 `json:"run_duration,omitempty"` // The canonical identifier of the run. This ID is unique across all runs of // all jobs. RunId int64 `json:"run_id,omitempty"` // An optional name for the run. The maximum length is 4096 bytes in UTF-8 // encoding. RunName string `json:"run_name,omitempty"` // The URL to the detail page of the run. RunPageUrl string `json:"run_page_url,omitempty"` // The type of a run. * `JOB_RUN`: Normal job run. A run created with // :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with // [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with // :method:jobs/submit. // // [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow RunType RunType `json:"run_type,omitempty"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. Schedule *CronSchedule `json:"schedule,omitempty"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task // run is the sum of the `setup_duration`, `execution_duration`, and the // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask // job runs. The total duration of a multitask job run is the value of the // `run_duration` field. SetupDuration int64 `json:"setup_duration,omitempty"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime int64 `json:"start_time,omitempty"` // The current state of the run. State *RunState `json:"state,omitempty"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `json:"tasks,omitempty"` // The type of trigger that fired this run. // // * `PERIODIC`: Schedules that periodically trigger runs, such as a cron // scheduler. * `ONE_TIME`: One time triggers that fire a single run. This // occurs you triggered a single run on demand through the UI or the API. * // `RETRY`: Indicates a run that is triggered as a retry of a previously // failed run. This occurs when you request to re-run the job in case of // failures. * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run // Job task. * `FILE_ARRIVAL`: Indicates a run that is triggered by a file // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger TriggerType `json:"trigger,omitempty"` // Additional details about what triggered the run TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` ForceSendFields []string `json:"-"` }
func (BaseRun) MarshalJSON ¶ added in v0.23.0
func (*BaseRun) UnmarshalJSON ¶ added in v0.23.0
type CancelAllRuns ¶
type CancelAllRuns struct { // Optional boolean parameter to cancel all queued runs. If no job_id is // provided, all queued runs in the workspace are canceled. AllQueuedRuns bool `json:"all_queued_runs,omitempty"` // The canonical identifier of the job to cancel all runs of. JobId int64 `json:"job_id,omitempty"` ForceSendFields []string `json:"-"` }
func (CancelAllRuns) MarshalJSON ¶ added in v0.23.0
func (s CancelAllRuns) MarshalJSON() ([]byte, error)
func (*CancelAllRuns) UnmarshalJSON ¶ added in v0.23.0
func (s *CancelAllRuns) UnmarshalJSON(b []byte) error
type CancelAllRunsResponse ¶ added in v0.34.0
type CancelAllRunsResponse struct { }
type CancelRunResponse ¶ added in v0.34.0
type CancelRunResponse struct { }
type ClusterInstance ¶
type ClusterInstance struct { // The canonical identifier for the cluster used by a run. This field is // always available for runs on existing clusters. For runs on new clusters, // it becomes available once the cluster is created. This value can be used // to view logs by browsing to `/#setting/sparkui/$cluster_id/driver-logs`. // The logs continue to be available after the run completes. // // The response won’t include this field if the identifier is not // available yet. ClusterId string `json:"cluster_id,omitempty"` // The canonical identifier for the Spark context used by a run. This field // is filled in once the run begins execution. This value can be used to // view the Spark UI by browsing to // `/#setting/sparkui/$cluster_id/$spark_context_id`. The Spark UI continues // to be available after the run has completed. // // The response won’t include this field if the identifier is not // available yet. SparkContextId string `json:"spark_context_id,omitempty"` ForceSendFields []string `json:"-"` }
func (ClusterInstance) MarshalJSON ¶ added in v0.23.0
func (s ClusterInstance) MarshalJSON() ([]byte, error)
func (*ClusterInstance) UnmarshalJSON ¶ added in v0.23.0
func (s *ClusterInstance) UnmarshalJSON(b []byte) error
type ClusterSpec ¶
type ClusterSpec struct { // The key of the compute requirement, specified in `job.settings.compute`, // to use for execution of this task. ComputeKey string `json:"compute_key,omitempty"` // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey string `json:"job_cluster_key,omitempty"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. Libraries []compute.Library `json:"libraries,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` ForceSendFields []string `json:"-"` }
func (ClusterSpec) MarshalJSON ¶ added in v0.23.0
func (s ClusterSpec) MarshalJSON() ([]byte, error)
func (*ClusterSpec) UnmarshalJSON ¶ added in v0.23.0
func (s *ClusterSpec) UnmarshalJSON(b []byte) error
type Condition ¶ added in v0.32.0
type Condition string
const ConditionAllUpdated Condition = `ALL_UPDATED`
const ConditionAnyUpdated Condition = `ANY_UPDATED`
func (*Condition) Set ¶ added in v0.32.0
Set raw string value and validate it against allowed values
type ConditionTask ¶ added in v0.11.0
type ConditionTask struct { // The left operand of the condition task. Can be either a string value or a // job state or parameter reference. Left string `json:"left"` // * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their // operands. This means that `“12.0” == “12”` will evaluate to // `false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, // `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their // operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” // >= “12”` will evaluate to `false`. // // The boolean comparison to task values can be implemented with operators // `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it // will be serialized to `“true”` or `“false”` for the comparison. Op ConditionTaskOp `json:"op"` // The right operand of the condition task. Can be either a string value or // a job state or parameter reference. Right string `json:"right"` }
type ConditionTaskOp ¶ added in v0.11.0
type ConditionTaskOp string
* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to `false`.
The boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.
const ConditionTaskOpEqualTo ConditionTaskOp = `EQUAL_TO`
const ConditionTaskOpGreaterThan ConditionTaskOp = `GREATER_THAN`
const ConditionTaskOpGreaterThanOrEqual ConditionTaskOp = `GREATER_THAN_OR_EQUAL`
const ConditionTaskOpLessThan ConditionTaskOp = `LESS_THAN`
const ConditionTaskOpLessThanOrEqual ConditionTaskOp = `LESS_THAN_OR_EQUAL`
const ConditionTaskOpNotEqual ConditionTaskOp = `NOT_EQUAL`
func (*ConditionTaskOp) Set ¶ added in v0.11.0
func (f *ConditionTaskOp) Set(v string) error
Set raw string value and validate it against allowed values
func (*ConditionTaskOp) String ¶ added in v0.11.0
func (f *ConditionTaskOp) String() string
String representation for fmt.Print
func (*ConditionTaskOp) Type ¶ added in v0.11.0
func (f *ConditionTaskOp) Type() string
Type always returns ConditionTaskOp to satisfy [pflag.Value] interface
type Continuous ¶ added in v0.4.0
type Continuous struct { // Indicate whether the continuous execution of the job is paused or not. // Defaults to UNPAUSED. PauseStatus PauseStatus `json:"pause_status,omitempty"` }
type CreateJob ¶
type CreateJob struct { // List of permissions to set on the job. AccessControlList []iam.AccessControlRequest `json:"access_control_list,omitempty"` // A list of compute requirements that can be referenced by tasks of this // job. Compute []JobCompute `json:"compute,omitempty"` // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. Continuous *Continuous `json:"continuous,omitempty"` // Deployment information for jobs managed by external sources. Deployment *JobDeployment `json:"deployment,omitempty"` // An optional description for the job. The maximum length is 1024 // characters in UTF-8 encoding. Description string `json:"description,omitempty"` // Edit mode of the job. // // * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * // `EDITABLE`: The job is in an editable state and can be modified. EditMode JobEditMode `json:"edit_mode,omitempty"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is // always set to `"MULTI_TASK"`. Format Format `json:"format,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. // // If `git_source` is set, these tasks retrieve the file from the remote // repository by default. However, this behavior can be overridden by // setting `source` to `WORKSPACE` on the task. // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // An optional maximum allowed number of concurrent runs of the job. Set // this value if you want to be able to execute multiple runs of the same // job concurrently. This is useful for example if you trigger your job on a // frequent schedule and want to allow consecutive runs to overlap with each // other, or if you want to trigger multiple runs which differ by their // input parameters. This setting affects only new runs. For example, // suppose the job’s concurrency is 4 and there are 4 concurrent active // runs. Then setting the concurrency to 3 won’t kill any of the active // runs. However, from then on, new runs are skipped unless there are fewer // than 3 active runs. This value cannot exceed 1000. Setting this value to // `0` causes all new runs to be skipped. MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"` // An optional name for the job. The maximum length is 4096 bytes in UTF-8 // encoding. Name string `json:"name,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` // Write-only setting, available only in Create/Update/Reset and Submit // calls. Specifies the user or service principal that the job runs as. If // not specified, the job runs as the user who created the job. // // Only `user_name` or `service_principal_name` can be specified. If both // are specified, an error is thrown. RunAs *JobRunAs `json:"run_as,omitempty"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. Schedule *CronSchedule `json:"schedule,omitempty"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the // job. Tags map[string]string `json:"tags,omitempty"` // A list of task specifications to be executed by this job. Tasks []Task `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means // no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. Trigger *TriggerSettings `json:"trigger,omitempty"` // A collection of system notification IDs to notify when runs of this job // begin or complete. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` ForceSendFields []string `json:"-"` }
func (CreateJob) MarshalJSON ¶ added in v0.23.0
func (*CreateJob) UnmarshalJSON ¶ added in v0.23.0
type CreateResponse ¶
type CreateResponse struct { // The canonical identifier for the newly created job. JobId int64 `json:"job_id,omitempty"` ForceSendFields []string `json:"-"` }
Job was created successfully
func (CreateResponse) MarshalJSON ¶ added in v0.23.0
func (s CreateResponse) MarshalJSON() ([]byte, error)
func (*CreateResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *CreateResponse) UnmarshalJSON(b []byte) error
type CronSchedule ¶
type CronSchedule struct { // Indicate whether this schedule is paused or not. PauseStatus PauseStatus `json:"pause_status,omitempty"` // A Cron expression using Quartz syntax that describes the schedule for a // job. See [Cron Trigger] for details. This field is required. // // [Cron Trigger]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html QuartzCronExpression string `json:"quartz_cron_expression"` // A Java timezone ID. The schedule for a job is resolved with respect to // this timezone. See [Java TimeZone] for details. This field is required. // // [Java TimeZone]: https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html TimezoneId string `json:"timezone_id"` }
type DbtOutput ¶
type DbtOutput struct { // An optional map of headers to send when retrieving the artifact from the // `artifacts_link`. ArtifactsHeaders map[string]string `json:"artifacts_headers,omitempty"` // A pre-signed URL to download the (compressed) dbt artifacts. This link is // valid for a limited time (30 minutes). This information is only available // after the run has finished. ArtifactsLink string `json:"artifacts_link,omitempty"` ForceSendFields []string `json:"-"` }
func (DbtOutput) MarshalJSON ¶ added in v0.23.0
func (*DbtOutput) UnmarshalJSON ¶ added in v0.23.0
type DbtTask ¶
type DbtTask struct { // Optional name of the catalog to use. The value is the top level in the // 3-level namespace of Unity Catalog (catalog / schema / relation). The // catalog value can only be specified if a warehouse_id is specified. // Requires dbt-databricks >= 1.1.1. Catalog string `json:"catalog,omitempty"` // A list of dbt commands to execute. All commands must start with `dbt`. // This parameter must not be empty. A maximum of up to 10 commands can be // provided. Commands []string `json:"commands"` // Optional (relative) path to the profiles directory. Can only be specified // if no warehouse_id is specified. If no warehouse_id is specified and this // folder is unset, the root directory is used. ProfilesDirectory string `json:"profiles_directory,omitempty"` // Path to the project directory. Optional for Git sourced tasks, in which // case if no value is provided, the root of the Git repository is used. ProjectDirectory string `json:"project_directory,omitempty"` // Optional schema to write to. This parameter is only used when a // warehouse_id is also provided. If not provided, the `default` schema is // used. Schema string `json:"schema,omitempty"` // Optional location type of the project directory. When set to `WORKSPACE`, // the project will be retrieved from the local Databricks workspace. When // set to `GIT`, the project will be retrieved from a Git repository defined // in `git_source`. If the value is empty, the task will use `GIT` if // `git_source` is defined and `WORKSPACE` otherwise. // // * `WORKSPACE`: Project is located in Databricks workspace. * `GIT`: // Project is located in cloud Git provider. Source Source `json:"source,omitempty"` // ID of the SQL warehouse to connect to. If provided, we automatically // generate and provide the profile and connection details to dbt. It can be // overridden on a per-command basis by using the `--profiles-dir` command // line argument. WarehouseId string `json:"warehouse_id,omitempty"` ForceSendFields []string `json:"-"` }
func (DbtTask) MarshalJSON ¶ added in v0.23.0
func (*DbtTask) UnmarshalJSON ¶ added in v0.23.0
type DeleteJob ¶
type DeleteJob struct { // The canonical identifier of the job to delete. This field is required. JobId int64 `json:"job_id"` }
type DeleteResponse ¶ added in v0.34.0
type DeleteResponse struct { }
type DeleteRunResponse ¶ added in v0.34.0
type DeleteRunResponse struct { }
type ExportRunOutput ¶
type ExportRunOutput struct { // The exported content in HTML format (one for every view item). To extract // the HTML notebook from the JSON response, download and run this [Python // script]. // // [Python script]: https://docs.databricks.com/en/_static/examples/extract.py Views []ViewItem `json:"views,omitempty"` }
Run was exported successfully.
type ExportRunRequest ¶ added in v0.8.0
type ExportRunRequest struct { // The canonical identifier for the run. This field is required. RunId int64 `json:"-" url:"run_id"` // Which views to export (CODE, DASHBOARDS, or ALL). Defaults to CODE. ViewsToExport ViewsToExport `json:"-" url:"views_to_export,omitempty"` }
Export and retrieve a job run
type FileArrivalTriggerConfiguration ¶ added in v0.11.0
type FileArrivalTriggerConfiguration struct { // If set, the trigger starts a run only after the specified amount of time // passed since the last time the trigger fired. The minimum allowed value // is 60 seconds MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` // URL to be monitored for file arrivals. The path must point to the root or // a subpath of the external location. Url string `json:"url"` // If set, the trigger starts a run only after no file activity has occurred // for the specified amount of time. This makes it possible to wait for a // batch of incoming files to arrive before triggering a run. The minimum // allowed value is 60 seconds. WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` ForceSendFields []string `json:"-"` }
func (FileArrivalTriggerConfiguration) MarshalJSON ¶ added in v0.23.0
func (s FileArrivalTriggerConfiguration) MarshalJSON() ([]byte, error)
func (*FileArrivalTriggerConfiguration) UnmarshalJSON ¶ added in v0.23.0
func (s *FileArrivalTriggerConfiguration) UnmarshalJSON(b []byte) error
type ForEachStats ¶ added in v0.31.0
type ForEachStats struct { // Sample of 3 most common error messages occurred during the iteration. ErrorMessageStats []ForEachTaskErrorMessageStats `json:"error_message_stats,omitempty"` // Describes stats of the iteration. Only latest retries are considered. TaskRunStats *ForEachTaskTaskRunStats `json:"task_run_stats,omitempty"` }
type ForEachTask ¶ added in v0.31.0
type ForEachTask struct { // Controls the number of active iterations task runs. Default is 20, // maximum allowed is 100. Concurrency int `json:"concurrency,omitempty"` // Array for task to iterate on. This can be a JSON string or a reference to // an array parameter. Inputs string `json:"inputs"` // Configuration for the task that will be run for each element in the array Task Task `json:"task"` ForceSendFields []string `json:"-"` }
func (ForEachTask) MarshalJSON ¶ added in v0.31.0
func (s ForEachTask) MarshalJSON() ([]byte, error)
func (*ForEachTask) UnmarshalJSON ¶ added in v0.31.0
func (s *ForEachTask) UnmarshalJSON(b []byte) error
type ForEachTaskErrorMessageStats ¶ added in v0.31.0
type ForEachTaskErrorMessageStats struct { // Describes the count of such error message encountered during the // iterations. Count int `json:"count,omitempty"` // Describes the error message occured during the iterations. ErrorMessage string `json:"error_message,omitempty"` ForceSendFields []string `json:"-"` }
func (ForEachTaskErrorMessageStats) MarshalJSON ¶ added in v0.31.0
func (s ForEachTaskErrorMessageStats) MarshalJSON() ([]byte, error)
func (*ForEachTaskErrorMessageStats) UnmarshalJSON ¶ added in v0.31.0
func (s *ForEachTaskErrorMessageStats) UnmarshalJSON(b []byte) error
type ForEachTaskTaskRunStats ¶ added in v0.31.0
type ForEachTaskTaskRunStats struct { // Describes the iteration runs having an active lifecycle state or an // active run sub state. ActiveIterations int `json:"active_iterations,omitempty"` // Describes the number of failed and succeeded iteration runs. CompletedIterations int `json:"completed_iterations,omitempty"` // Describes the number of failed iteration runs. FailedIterations int `json:"failed_iterations,omitempty"` // Describes the number of iteration runs that have been scheduled. ScheduledIterations int `json:"scheduled_iterations,omitempty"` // Describes the number of succeeded iteration runs. SucceededIterations int `json:"succeeded_iterations,omitempty"` // Describes the length of the list of items to iterate over. TotalIterations int `json:"total_iterations,omitempty"` ForceSendFields []string `json:"-"` }
func (ForEachTaskTaskRunStats) MarshalJSON ¶ added in v0.31.0
func (s ForEachTaskTaskRunStats) MarshalJSON() ([]byte, error)
func (*ForEachTaskTaskRunStats) UnmarshalJSON ¶ added in v0.31.0
func (s *ForEachTaskTaskRunStats) UnmarshalJSON(b []byte) error
type Format ¶ added in v0.11.0
type Format string
const FormatMultiTask Format = `MULTI_TASK`
const FormatSingleTask Format = `SINGLE_TASK`
type GetJobPermissionLevelsRequest ¶ added in v0.15.0
type GetJobPermissionLevelsRequest struct { // The job for which to get or manage permissions. JobId string `json:"-" url:"-"` }
Get job permission levels
type GetJobPermissionLevelsResponse ¶ added in v0.15.0
type GetJobPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []JobPermissionsDescription `json:"permission_levels,omitempty"` }
type GetJobPermissionsRequest ¶ added in v0.15.0
type GetJobPermissionsRequest struct { // The job for which to get or manage permissions. JobId string `json:"-" url:"-"` }
Get job permissions
type GetJobRequest ¶ added in v0.8.0
type GetJobRequest struct { // The canonical identifier of the job to retrieve information about. This // field is required. JobId int64 `json:"-" url:"job_id"` }
Get a single job
type GetRunOutputRequest ¶ added in v0.8.0
type GetRunOutputRequest struct { // The canonical identifier for the run. RunId int64 `json:"-" url:"run_id"` }
Get the output for a single run
type GetRunRequest ¶ added in v0.8.0
type GetRunRequest struct { // Whether to include the repair history in the response. IncludeHistory bool `json:"-" url:"include_history,omitempty"` // Whether to include resolved parameter values in the response. IncludeResolvedValues bool `json:"-" url:"include_resolved_values,omitempty"` // The canonical identifier of the run for which to retrieve the metadata. // This field is required. RunId int64 `json:"-" url:"run_id"` ForceSendFields []string `json:"-"` }
Get a single job run
func (GetRunRequest) MarshalJSON ¶ added in v0.23.0
func (s GetRunRequest) MarshalJSON() ([]byte, error)
func (*GetRunRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *GetRunRequest) UnmarshalJSON(b []byte) error
type GitProvider ¶ added in v0.11.0
type GitProvider string
const GitProviderAwsCodeCommit GitProvider = `awsCodeCommit`
const GitProviderAzureDevOpsServices GitProvider = `azureDevOpsServices`
const GitProviderBitbucketCloud GitProvider = `bitbucketCloud`
const GitProviderBitbucketServer GitProvider = `bitbucketServer`
const GitProviderGitHub GitProvider = `gitHub`
const GitProviderGitHubEnterprise GitProvider = `gitHubEnterprise`
const GitProviderGitLab GitProvider = `gitLab`
const GitProviderGitLabEnterpriseEdition GitProvider = `gitLabEnterpriseEdition`
func (*GitProvider) Set ¶ added in v0.11.0
func (f *GitProvider) Set(v string) error
Set raw string value and validate it against allowed values
func (*GitProvider) String ¶ added in v0.11.0
func (f *GitProvider) String() string
String representation for fmt.Print
func (*GitProvider) Type ¶ added in v0.11.0
func (f *GitProvider) Type() string
Type always returns GitProvider to satisfy [pflag.Value] interface
type GitSnapshot ¶
type GitSnapshot struct { // Commit that was used to execute the run. If git_branch was specified, // this points to the HEAD of the branch at the time of the run; if git_tag // was specified, this points to the commit the tag points to. UsedCommit string `json:"used_commit,omitempty"` ForceSendFields []string `json:"-"` }
Read-only state of the remote repository at the time the job was run. This field is only included on job runs.
func (GitSnapshot) MarshalJSON ¶ added in v0.23.0
func (s GitSnapshot) MarshalJSON() ([]byte, error)
func (*GitSnapshot) UnmarshalJSON ¶ added in v0.23.0
func (s *GitSnapshot) UnmarshalJSON(b []byte) error
type GitSource ¶
type GitSource struct { // Name of the branch to be checked out and used by this job. This field // cannot be specified in conjunction with git_tag or git_commit. GitBranch string `json:"git_branch,omitempty"` // Commit to be checked out and used by this job. This field cannot be // specified in conjunction with git_branch or git_tag. GitCommit string `json:"git_commit,omitempty"` // Unique identifier of the service used to host the Git repository. The // value is case insensitive. GitProvider GitProvider `json:"git_provider"` // Read-only state of the remote repository at the time the job was run. // This field is only included on job runs. GitSnapshot *GitSnapshot `json:"git_snapshot,omitempty"` // Name of the tag to be checked out and used by this job. This field cannot // be specified in conjunction with git_branch or git_commit. GitTag string `json:"git_tag,omitempty"` // URL of the repository to be cloned by this job. GitUrl string `json:"git_url"` // The source of the job specification in the remote repository when the job // is source controlled. JobSource *JobSource `json:"job_source,omitempty"` ForceSendFields []string `json:"-"` }
An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks.
If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task.
Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job.
func (GitSource) MarshalJSON ¶ added in v0.23.0
func (*GitSource) UnmarshalJSON ¶ added in v0.23.0
type Job ¶
type Job struct { // The time at which this job was created in epoch milliseconds // (milliseconds since 1/1/1970 UTC). CreatedTime int64 `json:"created_time,omitempty"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName string `json:"creator_user_name,omitempty"` // The canonical identifier for this job. JobId int64 `json:"job_id,omitempty"` // The email of an active workspace user or the application ID of a service // principal that the job runs as. This value can be changed by setting the // `run_as` field when creating or updating a job. // // By default, `run_as_user_name` is based on the current job settings and // is set to the creator of the job if job access control is disabled or to // the user with the `is_owner` permission if job access control is enabled. RunAsUserName string `json:"run_as_user_name,omitempty"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. Settings *JobSettings `json:"settings,omitempty"` ForceSendFields []string `json:"-"` }
Job was retrieved successfully.
func (Job) MarshalJSON ¶ added in v0.23.0
func (*Job) UnmarshalJSON ¶ added in v0.23.0
type JobAccessControlRequest ¶ added in v0.15.0
type JobAccessControlRequest struct { // name of the group GroupName string `json:"group_name,omitempty"` // Permission level PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` // application ID of a service principal ServicePrincipalName string `json:"service_principal_name,omitempty"` // name of the user UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-"` }
func (JobAccessControlRequest) MarshalJSON ¶ added in v0.23.0
func (s JobAccessControlRequest) MarshalJSON() ([]byte, error)
func (*JobAccessControlRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *JobAccessControlRequest) UnmarshalJSON(b []byte) error
type JobAccessControlResponse ¶ added in v0.15.0
type JobAccessControlResponse struct { // All permissions. AllPermissions []JobPermission `json:"all_permissions,omitempty"` // Display name of the user or service principal. DisplayName string `json:"display_name,omitempty"` // name of the group GroupName string `json:"group_name,omitempty"` // Name of the service principal. ServicePrincipalName string `json:"service_principal_name,omitempty"` // name of the user UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-"` }
func (JobAccessControlResponse) MarshalJSON ¶ added in v0.23.0
func (s JobAccessControlResponse) MarshalJSON() ([]byte, error)
func (*JobAccessControlResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *JobAccessControlResponse) UnmarshalJSON(b []byte) error
type JobCluster ¶
type JobCluster struct { // A unique name for the job cluster. This field is required and must be // unique within the job. `JobTaskSettings` may refer to this field to // determine which cluster to launch for the task execution. JobClusterKey string `json:"job_cluster_key"` // If new_cluster, a description of a cluster that is created for each task. NewCluster compute.ClusterSpec `json:"new_cluster"` }
type JobCompute ¶ added in v0.11.0
type JobCompute struct { // A unique name for the compute requirement. This field is required and // must be unique within the job. `JobTaskSettings` may refer to this field // to determine the compute requirements for the task execution. ComputeKey string `json:"compute_key"` Spec compute.ComputeSpec `json:"spec"` }
type JobDeployment ¶ added in v0.23.0
type JobDeployment struct { // The kind of deployment that manages the job. // // * `BUNDLE`: The job is managed by Databricks Asset Bundle. Kind JobDeploymentKind `json:"kind"` // Path of the file that contains deployment metadata. MetadataFilePath string `json:"metadata_file_path,omitempty"` ForceSendFields []string `json:"-"` }
func (JobDeployment) MarshalJSON ¶ added in v0.23.0
func (s JobDeployment) MarshalJSON() ([]byte, error)
func (*JobDeployment) UnmarshalJSON ¶ added in v0.23.0
func (s *JobDeployment) UnmarshalJSON(b []byte) error
type JobDeploymentKind ¶ added in v0.23.0
type JobDeploymentKind string
* `BUNDLE`: The job is managed by Databricks Asset Bundle.
const JobDeploymentKindBundle JobDeploymentKind = `BUNDLE`
The job is managed by Databricks Asset Bundle.
func (*JobDeploymentKind) Set ¶ added in v0.23.0
func (f *JobDeploymentKind) Set(v string) error
Set raw string value and validate it against allowed values
func (*JobDeploymentKind) String ¶ added in v0.23.0
func (f *JobDeploymentKind) String() string
String representation for fmt.Print
func (*JobDeploymentKind) Type ¶ added in v0.23.0
func (f *JobDeploymentKind) Type() string
Type always returns JobDeploymentKind to satisfy [pflag.Value] interface
type JobEditMode ¶ added in v0.37.0
type JobEditMode string
Edit mode of the job.
* `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * `EDITABLE`: The job is in an editable state and can be modified.
const JobEditModeEditable JobEditMode = `EDITABLE`
The job is in an editable state and can be modified.
const JobEditModeUiLocked JobEditMode = `UI_LOCKED`
The job is in a locked UI state and cannot be modified.
func (*JobEditMode) Set ¶ added in v0.37.0
func (f *JobEditMode) Set(v string) error
Set raw string value and validate it against allowed values
func (*JobEditMode) String ¶ added in v0.37.0
func (f *JobEditMode) String() string
String representation for fmt.Print
func (*JobEditMode) Type ¶ added in v0.37.0
func (f *JobEditMode) Type() string
Type always returns JobEditMode to satisfy [pflag.Value] interface
type JobEmailNotifications ¶
type JobEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the // run is skipped. NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` // A list of email addresses to be notified when the duration of a run // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in // the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is // specified in the `health` field for the job, notifications are not sent. OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` // A list of email addresses to be notified when a run unsuccessfully // completes. A run is considered to have completed unsuccessfully if it // ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or // `TIMED_OUT` result_state. If this is not specified on job creation, // reset, or update the list is empty, and notifications are not sent. OnFailure []string `json:"on_failure,omitempty"` // A list of email addresses to be notified when a run begins. If not // specified on job creation, reset, or update, the list is empty, and // notifications are not sent. OnStart []string `json:"on_start,omitempty"` // A list of email addresses to be notified when a run successfully // completes. A run is considered to have completed successfully if it ends // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If // not specified on job creation, reset, or update, the list is empty, and // notifications are not sent. OnSuccess []string `json:"on_success,omitempty"` ForceSendFields []string `json:"-"` }
func (JobEmailNotifications) MarshalJSON ¶ added in v0.23.0
func (s JobEmailNotifications) MarshalJSON() ([]byte, error)
func (*JobEmailNotifications) UnmarshalJSON ¶ added in v0.23.0
func (s *JobEmailNotifications) UnmarshalJSON(b []byte) error
type JobNotificationSettings ¶ added in v0.9.0
type JobNotificationSettings struct { // If true, do not send notifications to recipients specified in // `on_failure` if the run is canceled. NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` // If true, do not send notifications to recipients specified in // `on_failure` if the run is skipped. NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` ForceSendFields []string `json:"-"` }
func (JobNotificationSettings) MarshalJSON ¶ added in v0.23.0
func (s JobNotificationSettings) MarshalJSON() ([]byte, error)
func (*JobNotificationSettings) UnmarshalJSON ¶ added in v0.23.0
func (s *JobNotificationSettings) UnmarshalJSON(b []byte) error
type JobParameter ¶ added in v0.12.0
type JobParameter struct { // The optional default value of the parameter Default string `json:"default,omitempty"` // The name of the parameter Name string `json:"name,omitempty"` // The value used in the run Value string `json:"value,omitempty"` ForceSendFields []string `json:"-"` }
func (JobParameter) MarshalJSON ¶ added in v0.23.0
func (s JobParameter) MarshalJSON() ([]byte, error)
func (*JobParameter) UnmarshalJSON ¶ added in v0.23.0
func (s *JobParameter) UnmarshalJSON(b []byte) error
type JobParameterDefinition ¶ added in v0.12.0
type JobPermission ¶ added in v0.15.0
type JobPermission struct { Inherited bool `json:"inherited,omitempty"` InheritedFromObject []string `json:"inherited_from_object,omitempty"` // Permission level PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` ForceSendFields []string `json:"-"` }
func (JobPermission) MarshalJSON ¶ added in v0.23.0
func (s JobPermission) MarshalJSON() ([]byte, error)
func (*JobPermission) UnmarshalJSON ¶ added in v0.23.0
func (s *JobPermission) UnmarshalJSON(b []byte) error
type JobPermissionLevel ¶ added in v0.15.0
type JobPermissionLevel string
Permission level
const JobPermissionLevelCanManage JobPermissionLevel = `CAN_MANAGE`
const JobPermissionLevelCanManageRun JobPermissionLevel = `CAN_MANAGE_RUN`
const JobPermissionLevelCanView JobPermissionLevel = `CAN_VIEW`
const JobPermissionLevelIsOwner JobPermissionLevel = `IS_OWNER`
func (*JobPermissionLevel) Set ¶ added in v0.15.0
func (f *JobPermissionLevel) Set(v string) error
Set raw string value and validate it against allowed values
func (*JobPermissionLevel) String ¶ added in v0.15.0
func (f *JobPermissionLevel) String() string
String representation for fmt.Print
func (*JobPermissionLevel) Type ¶ added in v0.15.0
func (f *JobPermissionLevel) Type() string
Type always returns JobPermissionLevel to satisfy [pflag.Value] interface
type JobPermissions ¶ added in v0.15.0
type JobPermissions struct { AccessControlList []JobAccessControlResponse `json:"access_control_list,omitempty"` ObjectId string `json:"object_id,omitempty"` ObjectType string `json:"object_type,omitempty"` ForceSendFields []string `json:"-"` }
func (JobPermissions) MarshalJSON ¶ added in v0.23.0
func (s JobPermissions) MarshalJSON() ([]byte, error)
func (*JobPermissions) UnmarshalJSON ¶ added in v0.23.0
func (s *JobPermissions) UnmarshalJSON(b []byte) error
type JobPermissionsDescription ¶ added in v0.15.0
type JobPermissionsDescription struct { Description string `json:"description,omitempty"` // Permission level PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` ForceSendFields []string `json:"-"` }
func (JobPermissionsDescription) MarshalJSON ¶ added in v0.23.0
func (s JobPermissionsDescription) MarshalJSON() ([]byte, error)
func (*JobPermissionsDescription) UnmarshalJSON ¶ added in v0.23.0
func (s *JobPermissionsDescription) UnmarshalJSON(b []byte) error
type JobPermissionsRequest ¶ added in v0.15.0
type JobPermissionsRequest struct { AccessControlList []JobAccessControlRequest `json:"access_control_list,omitempty"` // The job for which to get or manage permissions. JobId string `json:"-" url:"-"` }
type JobRunAs ¶ added in v0.10.0
type JobRunAs struct { // Application ID of an active service principal. Setting this field // requires the `servicePrincipal/user` role. ServicePrincipalName string `json:"service_principal_name,omitempty"` // The email of an active workspace user. Non-admin users can only set this // field to their own email. UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-"` }
Write-only setting, available only in Create/Update/Reset and Submit calls. Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who created the job.
Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.
func (JobRunAs) MarshalJSON ¶ added in v0.23.0
func (*JobRunAs) UnmarshalJSON ¶ added in v0.23.0
type JobSettings ¶
type JobSettings struct { // A list of compute requirements that can be referenced by tasks of this // job. Compute []JobCompute `json:"compute,omitempty"` // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. Continuous *Continuous `json:"continuous,omitempty"` // Deployment information for jobs managed by external sources. Deployment *JobDeployment `json:"deployment,omitempty"` // An optional description for the job. The maximum length is 1024 // characters in UTF-8 encoding. Description string `json:"description,omitempty"` // Edit mode of the job. // // * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * // `EDITABLE`: The job is in an editable state and can be modified. EditMode JobEditMode `json:"edit_mode,omitempty"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is // always set to `"MULTI_TASK"`. Format Format `json:"format,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. // // If `git_source` is set, these tasks retrieve the file from the remote // repository by default. However, this behavior can be overridden by // setting `source` to `WORKSPACE` on the task. // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // An optional maximum allowed number of concurrent runs of the job. Set // this value if you want to be able to execute multiple runs of the same // job concurrently. This is useful for example if you trigger your job on a // frequent schedule and want to allow consecutive runs to overlap with each // other, or if you want to trigger multiple runs which differ by their // input parameters. This setting affects only new runs. For example, // suppose the job’s concurrency is 4 and there are 4 concurrent active // runs. Then setting the concurrency to 3 won’t kill any of the active // runs. However, from then on, new runs are skipped unless there are fewer // than 3 active runs. This value cannot exceed 1000. Setting this value to // `0` causes all new runs to be skipped. MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"` // An optional name for the job. The maximum length is 4096 bytes in UTF-8 // encoding. Name string `json:"name,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` // Write-only setting, available only in Create/Update/Reset and Submit // calls. Specifies the user or service principal that the job runs as. If // not specified, the job runs as the user who created the job. // // Only `user_name` or `service_principal_name` can be specified. If both // are specified, an error is thrown. RunAs *JobRunAs `json:"run_as,omitempty"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. Schedule *CronSchedule `json:"schedule,omitempty"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the // job. Tags map[string]string `json:"tags,omitempty"` // A list of task specifications to be executed by this job. Tasks []Task `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means // no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. Trigger *TriggerSettings `json:"trigger,omitempty"` // A collection of system notification IDs to notify when runs of this job // begin or complete. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` ForceSendFields []string `json:"-"` }
func (JobSettings) MarshalJSON ¶ added in v0.23.0
func (s JobSettings) MarshalJSON() ([]byte, error)
func (*JobSettings) UnmarshalJSON ¶ added in v0.23.0
func (s *JobSettings) UnmarshalJSON(b []byte) error
type JobSource ¶ added in v0.13.0
type JobSource struct { // Dirty state indicates the job is not fully synced with the job // specification in the remote repository. // // Possible values are: * `NOT_SYNCED`: The job is not yet synced with the // remote job specification. Import the remote job specification from UI to // make the job fully synced. * `DISCONNECTED`: The job is temporary // disconnected from the remote job specification and is allowed for live // edit. Import the remote job specification again from UI to make the job // fully synced. DirtyState JobSourceDirtyState `json:"dirty_state,omitempty"` // Name of the branch which the job is imported from. ImportFromGitBranch string `json:"import_from_git_branch"` // Path of the job YAML file that contains the job specification. JobConfigPath string `json:"job_config_path"` }
The source of the job specification in the remote repository when the job is source controlled.
type JobSourceDirtyState ¶ added in v0.13.0
type JobSourceDirtyState string
Dirty state indicates the job is not fully synced with the job specification in the remote repository.
Possible values are: * `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced. * `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.
const JobSourceDirtyStateDisconnected JobSourceDirtyState = `DISCONNECTED`
The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.
const JobSourceDirtyStateNotSynced JobSourceDirtyState = `NOT_SYNCED`
The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.
func (*JobSourceDirtyState) Set ¶ added in v0.13.0
func (f *JobSourceDirtyState) Set(v string) error
Set raw string value and validate it against allowed values
func (*JobSourceDirtyState) String ¶ added in v0.13.0
func (f *JobSourceDirtyState) String() string
String representation for fmt.Print
func (*JobSourceDirtyState) Type ¶ added in v0.13.0
func (f *JobSourceDirtyState) Type() string
Type always returns JobSourceDirtyState to satisfy [pflag.Value] interface
type JobsAPI ¶
type JobsAPI struct {
// contains filtered or unexported fields
}
The Jobs API allows you to create, edit, and delete jobs.
You can use a Databricks job to run a data processing or data analysis task in a Databricks cluster with scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, Scala, Spark submit, and Java applications.
You should never hard code secrets or store them in plain text. Use the Secrets CLI to manage secrets in the Databricks CLI. Use the Secrets utility to reference secrets in notebooks and jobs.
func NewJobs ¶
func NewJobs(client *client.DatabricksClient) *JobsAPI
func (*JobsAPI) BaseJobSettingsNameToJobIdMap ¶ added in v0.3.0
func (a *JobsAPI) BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error)
BaseJobSettingsNameToJobIdMap calls JobsAPI.ListAll and creates a map of results with BaseJob.Settings.Name as key and BaseJob.JobId as value.
Returns an error if there's more than one BaseJob with the same .Settings.Name.
Note: All BaseJob instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
func (*JobsAPI) CancelAllRuns ¶
func (a *JobsAPI) CancelAllRuns(ctx context.Context, request CancelAllRuns) error
Cancel all runs of a job.
Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't prevent new runs from being started.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) err = w.Jobs.CancelAllRuns(ctx, jobs.CancelAllRuns{ JobId: createdJob.JobId, }) if err != nil { panic(err) } // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) CancelRun ¶
func (a *JobsAPI) CancelRun(ctx context.Context, cancelRun CancelRun) (*WaitGetRunJobTerminatedOrSkipped[struct{}], error)
Cancel a run.
Cancels a job run or a task run. The run is canceled asynchronously, so it may still be running when this request completes.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) runNowResponse, err := w.Jobs.RunNow(ctx, jobs.RunNow{ JobId: createdJob.JobId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", runNowResponse) cancelledRun, err := w.Jobs.CancelRunAndWait(ctx, jobs.CancelRun{ RunId: runNowResponse.Response.RunId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", cancelledRun) // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) CancelRunAndWait
deprecated
func (a *JobsAPI) CancelRunAndWait(ctx context.Context, cancelRun CancelRun, options ...retries.Option[Run]) (*Run, error)
Calls JobsAPI.CancelRun and waits to reach TERMINATED or SKIPPED state
You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.
Deprecated: use JobsAPI.CancelRun.Get() or JobsAPI.WaitGetRunJobTerminatedOrSkipped
func (*JobsAPI) CancelRunByRunId ¶
Cancel a run.
Cancels a job run or a task run. The run is canceled asynchronously, so it may still be running when this request completes.
func (*JobsAPI) CancelRunByRunIdAndWait ¶
func (*JobsAPI) Create ¶
Create a new job.
Create a new job.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) DeleteRun ¶
Delete a job run.
Deletes a non-active run. Returns an error if the run is active.
func (*JobsAPI) DeleteRunByRunId ¶
Delete a job run.
Deletes a non-active run. Returns an error if the run is active.
func (*JobsAPI) ExportRun ¶
func (a *JobsAPI) ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error)
Export and retrieve a job run.
Export and retrieve the job run task.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) runById, err := w.Jobs.RunNowAndWait(ctx, jobs.RunNow{ JobId: createdJob.JobId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", runById) exportedView, err := w.Jobs.ExportRun(ctx, jobs.ExportRunRequest{ RunId: runById.Tasks[0].RunId, ViewsToExport: "CODE", }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", exportedView) // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) Get ¶
Get a single job.
Retrieves the details for a single job.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) byId, err := w.Jobs.GetByJobId(ctx, createdJob.JobId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) GetBySettingsName ¶
GetBySettingsName calls JobsAPI.BaseJobSettingsNameToJobIdMap and returns a single BaseJob.
Returns an error if there's more than one BaseJob with the same .Settings.Name.
Note: All BaseJob instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*JobsAPI) GetPermissionLevels ¶ added in v0.19.0
func (a *JobsAPI) GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error)
Get job permission levels.
Gets the permission levels that a user can have on an object.
func (*JobsAPI) GetPermissionLevelsByJobId ¶ added in v0.19.0
func (a *JobsAPI) GetPermissionLevelsByJobId(ctx context.Context, jobId string) (*GetJobPermissionLevelsResponse, error)
Get job permission levels.
Gets the permission levels that a user can have on an object.
func (*JobsAPI) GetPermissions ¶ added in v0.19.0
func (a *JobsAPI) GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error)
Get job permissions.
Gets the permissions of a job. Jobs can inherit permissions from their root object.
func (*JobsAPI) GetPermissionsByJobId ¶ added in v0.19.0
Get job permissions.
Gets the permissions of a job. Jobs can inherit permissions from their root object.
func (*JobsAPI) GetRunOutput ¶
func (a *JobsAPI) GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error)
Get the output for a single run.
Retrieve the output and metadata of a single task run. When a notebook task returns a value through the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve that value. Databricks restricts this API to returning the first 5 MB of the output. To return a larger result, you can store job results in a cloud storage service.
This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after 60 days. If you to want to reference them beyond 60 days, you must save old run results before they expire.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() run, err := w.Jobs.SubmitAndWait(ctx, jobs.SubmitRun{ RunName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.SubmitTask{jobs.SubmitTask{ ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", run) output, err := w.Jobs.GetRunOutputByRunId(ctx, run.Tasks[0].RunId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", output) // cleanup err = w.Jobs.DeleteRunByRunId(ctx, run.RunId) if err != nil { panic(err) }
Output:
func (*JobsAPI) GetRunOutputByRunId ¶
Get the output for a single run.
Retrieve the output and metadata of a single task run. When a notebook task returns a value through the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve that value. Databricks restricts this API to returning the first 5 MB of the output. To return a larger result, you can store job results in a cloud storage service.
This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after 60 days. If you to want to reference them beyond 60 days, you must save old run results before they expire.
func (*JobsAPI) Impl ¶
func (a *JobsAPI) Impl() JobsService
Impl returns low-level Jobs API implementation Deprecated: use MockJobsInterface instead.
func (*JobsAPI) List ¶ added in v0.24.0
List jobs.
Retrieves a list of jobs.
This method is generated by Databricks SDK Code Generator.
func (*JobsAPI) ListAll ¶
List jobs.
Retrieves a list of jobs.
This method is generated by Databricks SDK Code Generator.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } jobList, err := w.Jobs.ListAll(ctx, jobs.ListJobsRequest{ ExpandTasks: false, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", jobList)
Output:
func (*JobsAPI) ListRuns ¶ added in v0.24.0
List job runs.
List runs in descending order by start time.
This method is generated by Databricks SDK Code Generator.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) runList, err := w.Jobs.ListRunsAll(ctx, jobs.ListRunsRequest{ JobId: createdJob.JobId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", runList) // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) ListRunsAll ¶
List job runs.
List runs in descending order by start time.
This method is generated by Databricks SDK Code Generator.
func (*JobsAPI) RepairRun ¶
func (a *JobsAPI) RepairRun(ctx context.Context, repairRun RepairRun) (*WaitGetRunJobTerminatedOrSkipped[RepairRunResponse], error)
Repair a job run.
Re-run one or more tasks. Tasks are re-run as part of the original job run. They use the current job and task settings, and can be viewed in the history for the original job run.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) runNowResponse, err := w.Jobs.RunNow(ctx, jobs.RunNow{ JobId: createdJob.JobId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", runNowResponse) cancelledRun, err := w.Jobs.CancelRunAndWait(ctx, jobs.CancelRun{ RunId: runNowResponse.Response.RunId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", cancelledRun) repairedRun, err := w.Jobs.RepairRunAndWait(ctx, jobs.RepairRun{ RerunTasks: []string{cancelledRun.Tasks[0].TaskKey}, RunId: runNowResponse.Response.RunId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", repairedRun) // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) RepairRunAndWait
deprecated
func (a *JobsAPI) RepairRunAndWait(ctx context.Context, repairRun RepairRun, options ...retries.Option[Run]) (*Run, error)
Calls JobsAPI.RepairRun and waits to reach TERMINATED or SKIPPED state
You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.
Deprecated: use JobsAPI.RepairRun.Get() or JobsAPI.WaitGetRunJobTerminatedOrSkipped
func (*JobsAPI) Reset ¶
Update all job settings (reset).
Overwrite all settings for the given job. Use the [_Update_ endpoint](:method:jobs/update) to update job settings partially.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) newName := fmt.Sprintf("sdk-%x", time.Now().UnixNano()) byId, err := w.Jobs.GetByJobId(ctx, createdJob.JobId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) err = w.Jobs.Reset(ctx, jobs.ResetJob{ JobId: byId.JobId, NewSettings: jobs.JobSettings{ Name: newName, Tasks: byId.Settings.Tasks, }, }) if err != nil { panic(err) } // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) RunNow ¶
func (a *JobsAPI) RunNow(ctx context.Context, runNow RunNow) (*WaitGetRunJobTerminatedOrSkipped[RunNowResponse], error)
Trigger a new job run.
Run a job and return the `run_id` of the triggered run.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) runById, err := w.Jobs.RunNowAndWait(ctx, jobs.RunNow{ JobId: createdJob.JobId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", runById) // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) RunNowAndWait
deprecated
func (a *JobsAPI) RunNowAndWait(ctx context.Context, runNow RunNow, options ...retries.Option[Run]) (*Run, error)
Calls JobsAPI.RunNow and waits to reach TERMINATED or SKIPPED state
You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.
Deprecated: use JobsAPI.RunNow.Get() or JobsAPI.WaitGetRunJobTerminatedOrSkipped
func (*JobsAPI) SetPermissions ¶ added in v0.19.0
func (a *JobsAPI) SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error)
Set job permissions.
Sets permissions on a job. Jobs can inherit permissions from their root object.
func (*JobsAPI) Submit ¶
func (a *JobsAPI) Submit(ctx context.Context, submitRun SubmitRun) (*WaitGetRunJobTerminatedOrSkipped[SubmitRunResponse], error)
Create and trigger a one-time run.
Submit a one-time run. This endpoint allows you to submit a workload directly without creating a job. Runs submitted using this endpoint don’t display in the UI. Use the `jobs/runs/get` API to check the run state after the job is submitted.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() run, err := w.Jobs.SubmitAndWait(ctx, jobs.SubmitRun{ RunName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.SubmitTask{jobs.SubmitTask{ ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", run) // cleanup err = w.Jobs.DeleteRunByRunId(ctx, run.RunId) if err != nil { panic(err) }
Output:
func (*JobsAPI) SubmitAndWait
deprecated
func (a *JobsAPI) SubmitAndWait(ctx context.Context, submitRun SubmitRun, options ...retries.Option[Run]) (*Run, error)
Calls JobsAPI.Submit and waits to reach TERMINATED or SKIPPED state
You can override the default timeout of 20 minutes by calling adding retries.Timeout[Run](60*time.Minute) functional option.
Deprecated: use JobsAPI.Submit.Get() or JobsAPI.WaitGetRunJobTerminatedOrSkipped
func (*JobsAPI) Update ¶
Update job settings partially.
Add, update, or remove specific settings of an existing job. Use the [_Reset_ endpoint](:method:jobs/reset) to overwrite all job settings.
Example (JobsApiFullIntegration) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() clusterId := func() string { clusterId := os.Getenv("DATABRICKS_CLUSTER_ID") err := w.Clusters.EnsureClusterIsRunning(ctx, clusterId) if err != nil { panic(err) } return clusterId }() newName := fmt.Sprintf("sdk-%x", time.Now().UnixNano()) createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{ Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Tasks: []jobs.Task{jobs.Task{ Description: "test", ExistingClusterId: clusterId, NotebookTask: &jobs.NotebookTask{ NotebookPath: notebookPath, }, TaskKey: "test", TimeoutSeconds: 0, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", createdJob) err = w.Jobs.Update(ctx, jobs.UpdateJob{ JobId: createdJob.JobId, NewSettings: &jobs.JobSettings{ Name: newName, MaxConcurrentRuns: 5, }, }) if err != nil { panic(err) } // cleanup err = w.Jobs.DeleteByJobId(ctx, createdJob.JobId) if err != nil { panic(err) }
Output:
func (*JobsAPI) UpdatePermissions ¶ added in v0.19.0
func (a *JobsAPI) UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error)
Update job permissions.
Updates the permissions on a job. Jobs can inherit permissions from their root object.
func (*JobsAPI) WaitGetRunJobTerminatedOrSkipped ¶ added in v0.10.0
func (a *JobsAPI) WaitGetRunJobTerminatedOrSkipped(ctx context.Context, runId int64, timeout time.Duration, callback func(*Run)) (*Run, error)
WaitGetRunJobTerminatedOrSkipped repeatedly calls JobsAPI.GetRun and waits to reach TERMINATED or SKIPPED state
func (*JobsAPI) WithImpl ¶
func (a *JobsAPI) WithImpl(impl JobsService) JobsInterface
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockJobsInterface instead.
type JobsHealthMetric ¶ added in v0.13.0
type JobsHealthMetric string
Specifies the health metric that is being evaluated for a particular health rule.
const JobsHealthMetricRunDurationSeconds JobsHealthMetric = `RUN_DURATION_SECONDS`
func (*JobsHealthMetric) Set ¶ added in v0.13.0
func (f *JobsHealthMetric) Set(v string) error
Set raw string value and validate it against allowed values
func (*JobsHealthMetric) String ¶ added in v0.13.0
func (f *JobsHealthMetric) String() string
String representation for fmt.Print
func (*JobsHealthMetric) Type ¶ added in v0.13.0
func (f *JobsHealthMetric) Type() string
Type always returns JobsHealthMetric to satisfy [pflag.Value] interface
type JobsHealthOperator ¶ added in v0.13.0
type JobsHealthOperator string
Specifies the operator used to compare the health metric value with the specified threshold.
const JobsHealthOperatorGreaterThan JobsHealthOperator = `GREATER_THAN`
func (*JobsHealthOperator) Set ¶ added in v0.13.0
func (f *JobsHealthOperator) Set(v string) error
Set raw string value and validate it against allowed values
func (*JobsHealthOperator) String ¶ added in v0.13.0
func (f *JobsHealthOperator) String() string
String representation for fmt.Print
func (*JobsHealthOperator) Type ¶ added in v0.13.0
func (f *JobsHealthOperator) Type() string
Type always returns JobsHealthOperator to satisfy [pflag.Value] interface
type JobsHealthRule ¶ added in v0.13.0
type JobsHealthRule struct { // Specifies the health metric that is being evaluated for a particular // health rule. Metric JobsHealthMetric `json:"metric"` // Specifies the operator used to compare the health metric value with the // specified threshold. Op JobsHealthOperator `json:"op"` // Specifies the threshold value that the health metric should obey to // satisfy the health rule. Value int64 `json:"value"` }
type JobsHealthRules ¶ added in v0.13.0
type JobsHealthRules struct {
Rules []JobsHealthRule `json:"rules,omitempty"`
}
An optional set of health rules that can be defined for this job.
type JobsInterface ¶ added in v0.29.0
type JobsInterface interface { // WithImpl could be used to override low-level API implementations for unit // testing purposes with [github.com/golang/mock] or other mocking frameworks. // Deprecated: use MockJobsInterface instead. WithImpl(impl JobsService) JobsInterface // Impl returns low-level Jobs API implementation // Deprecated: use MockJobsInterface instead. Impl() JobsService // WaitGetRunJobTerminatedOrSkipped repeatedly calls [JobsAPI.GetRun] and waits to reach TERMINATED or SKIPPED state WaitGetRunJobTerminatedOrSkipped(ctx context.Context, runId int64, timeout time.Duration, callback func(*Run)) (*Run, error) // Cancel all runs of a job. // // Cancels all active runs of a job. The runs are canceled asynchronously, so it // doesn't prevent new runs from being started. CancelAllRuns(ctx context.Context, request CancelAllRuns) error // Cancel a run. // // Cancels a job run or a task run. The run is canceled asynchronously, so it // may still be running when this request completes. CancelRun(ctx context.Context, cancelRun CancelRun) (*WaitGetRunJobTerminatedOrSkipped[struct{}], error) // Calls [JobsAPIInterface.CancelRun] and waits to reach TERMINATED or SKIPPED state // // You can override the default timeout of 20 minutes by calling adding // retries.Timeout[Run](60*time.Minute) functional option. // // Deprecated: use [JobsAPIInterface.CancelRun].Get() or [JobsAPIInterface.WaitGetRunJobTerminatedOrSkipped] CancelRunAndWait(ctx context.Context, cancelRun CancelRun, options ...retries.Option[Run]) (*Run, error) // Cancel a run. // // Cancels a job run or a task run. The run is canceled asynchronously, so it // may still be running when this request completes. CancelRunByRunId(ctx context.Context, runId int64) error CancelRunByRunIdAndWait(ctx context.Context, runId int64, options ...retries.Option[Run]) (*Run, error) // Create a new job. // // Create a new job. Create(ctx context.Context, request CreateJob) (*CreateResponse, error) // Delete a job. // // Deletes a job. Delete(ctx context.Context, request DeleteJob) error // Delete a job. // // Deletes a job. DeleteByJobId(ctx context.Context, jobId int64) error // Delete a job run. // // Deletes a non-active run. Returns an error if the run is active. DeleteRun(ctx context.Context, request DeleteRun) error // Delete a job run. // // Deletes a non-active run. Returns an error if the run is active. DeleteRunByRunId(ctx context.Context, runId int64) error // Export and retrieve a job run. // // Export and retrieve the job run task. ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) // Get a single job. // // Retrieves the details for a single job. Get(ctx context.Context, request GetJobRequest) (*Job, error) // Get a single job. // // Retrieves the details for a single job. GetByJobId(ctx context.Context, jobId int64) (*Job, error) // Get job permission levels. // // Gets the permission levels that a user can have on an object. GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error) // Get job permission levels. // // Gets the permission levels that a user can have on an object. GetPermissionLevelsByJobId(ctx context.Context, jobId string) (*GetJobPermissionLevelsResponse, error) // Get job permissions. // // Gets the permissions of a job. Jobs can inherit permissions from their root // object. GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error) // Get job permissions. // // Gets the permissions of a job. Jobs can inherit permissions from their root // object. GetPermissionsByJobId(ctx context.Context, jobId string) (*JobPermissions, error) // Get a single job run. // // Retrieve the metadata of a run. GetRun(ctx context.Context, request GetRunRequest) (*Run, error) // Get the output for a single run. // // Retrieve the output and metadata of a single task run. When a notebook task // returns a value through the `dbutils.notebook.exit()` call, you can use this // endpoint to retrieve that value. Databricks restricts this API to returning // the first 5 MB of the output. To return a larger result, you can store job // results in a cloud storage service. // // This endpoint validates that the __run_id__ parameter is valid and returns an // HTTP status code 400 if the __run_id__ parameter is invalid. Runs are // automatically removed after 60 days. If you to want to reference them beyond // 60 days, you must save old run results before they expire. GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) // Get the output for a single run. // // Retrieve the output and metadata of a single task run. When a notebook task // returns a value through the `dbutils.notebook.exit()` call, you can use this // endpoint to retrieve that value. Databricks restricts this API to returning // the first 5 MB of the output. To return a larger result, you can store job // results in a cloud storage service. // // This endpoint validates that the __run_id__ parameter is valid and returns an // HTTP status code 400 if the __run_id__ parameter is invalid. Runs are // automatically removed after 60 days. If you to want to reference them beyond // 60 days, you must save old run results before they expire. GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOutput, error) // List jobs. // // Retrieves a list of jobs. // // This method is generated by Databricks SDK Code Generator. List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] // List jobs. // // Retrieves a list of jobs. // // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) // BaseJobSettingsNameToJobIdMap calls [JobsAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. // // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. // // Note: All [BaseJob] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error) // GetBySettingsName calls [JobsAPI.BaseJobSettingsNameToJobIdMap] and returns a single [BaseJob]. // // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. // // Note: All [BaseJob] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. GetBySettingsName(ctx context.Context, name string) (*BaseJob, error) // List job runs. // // List runs in descending order by start time. // // This method is generated by Databricks SDK Code Generator. ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] // List job runs. // // List runs in descending order by start time. // // This method is generated by Databricks SDK Code Generator. ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error) // Repair a job run. // // Re-run one or more tasks. Tasks are re-run as part of the original job run. // They use the current job and task settings, and can be viewed in the history // for the original job run. RepairRun(ctx context.Context, repairRun RepairRun) (*WaitGetRunJobTerminatedOrSkipped[RepairRunResponse], error) // Calls [JobsAPIInterface.RepairRun] and waits to reach TERMINATED or SKIPPED state // // You can override the default timeout of 20 minutes by calling adding // retries.Timeout[Run](60*time.Minute) functional option. // // Deprecated: use [JobsAPIInterface.RepairRun].Get() or [JobsAPIInterface.WaitGetRunJobTerminatedOrSkipped] RepairRunAndWait(ctx context.Context, repairRun RepairRun, options ...retries.Option[Run]) (*Run, error) // Update all job settings (reset). // // Overwrite all settings for the given job. Use the [_Update_ // endpoint](:method:jobs/update) to update job settings partially. Reset(ctx context.Context, request ResetJob) error // Trigger a new job run. // // Run a job and return the `run_id` of the triggered run. RunNow(ctx context.Context, runNow RunNow) (*WaitGetRunJobTerminatedOrSkipped[RunNowResponse], error) // Calls [JobsAPIInterface.RunNow] and waits to reach TERMINATED or SKIPPED state // // You can override the default timeout of 20 minutes by calling adding // retries.Timeout[Run](60*time.Minute) functional option. // // Deprecated: use [JobsAPIInterface.RunNow].Get() or [JobsAPIInterface.WaitGetRunJobTerminatedOrSkipped] RunNowAndWait(ctx context.Context, runNow RunNow, options ...retries.Option[Run]) (*Run, error) // Set job permissions. // // Sets permissions on a job. Jobs can inherit permissions from their root // object. SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) // Create and trigger a one-time run. // // Submit a one-time run. This endpoint allows you to submit a workload directly // without creating a job. Runs submitted using this endpoint don’t display in // the UI. Use the `jobs/runs/get` API to check the run state after the job is // submitted. Submit(ctx context.Context, submitRun SubmitRun) (*WaitGetRunJobTerminatedOrSkipped[SubmitRunResponse], error) // Calls [JobsAPIInterface.Submit] and waits to reach TERMINATED or SKIPPED state // // You can override the default timeout of 20 minutes by calling adding // retries.Timeout[Run](60*time.Minute) functional option. // // Deprecated: use [JobsAPIInterface.Submit].Get() or [JobsAPIInterface.WaitGetRunJobTerminatedOrSkipped] SubmitAndWait(ctx context.Context, submitRun SubmitRun, options ...retries.Option[Run]) (*Run, error) // Update job settings partially. // // Add, update, or remove specific settings of an existing job. Use the [_Reset_ // endpoint](:method:jobs/reset) to overwrite all job settings. Update(ctx context.Context, request UpdateJob) error // Update job permissions. // // Updates the permissions on a job. Jobs can inherit permissions from their // root object. UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) }
type JobsService ¶
type JobsService interface { // Cancel all runs of a job. // // Cancels all active runs of a job. The runs are canceled asynchronously, // so it doesn't prevent new runs from being started. CancelAllRuns(ctx context.Context, request CancelAllRuns) error // Cancel a run. // // Cancels a job run or a task run. The run is canceled asynchronously, so // it may still be running when this request completes. CancelRun(ctx context.Context, request CancelRun) error // Create a new job. // // Create a new job. Create(ctx context.Context, request CreateJob) (*CreateResponse, error) // Delete a job. // // Deletes a job. Delete(ctx context.Context, request DeleteJob) error // Delete a job run. // // Deletes a non-active run. Returns an error if the run is active. DeleteRun(ctx context.Context, request DeleteRun) error // Export and retrieve a job run. // // Export and retrieve the job run task. ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) // Get a single job. // // Retrieves the details for a single job. Get(ctx context.Context, request GetJobRequest) (*Job, error) // Get job permission levels. // // Gets the permission levels that a user can have on an object. GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error) // Get job permissions. // // Gets the permissions of a job. Jobs can inherit permissions from their // root object. GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error) // Get a single job run. // // Retrieve the metadata of a run. GetRun(ctx context.Context, request GetRunRequest) (*Run, error) // Get the output for a single run. // // Retrieve the output and metadata of a single task run. When a notebook // task returns a value through the `dbutils.notebook.exit()` call, you can // use this endpoint to retrieve that value. Databricks restricts this API // to returning the first 5 MB of the output. To return a larger result, you // can store job results in a cloud storage service. // // This endpoint validates that the __run_id__ parameter is valid and // returns an HTTP status code 400 if the __run_id__ parameter is invalid. // Runs are automatically removed after 60 days. If you to want to reference // them beyond 60 days, you must save old run results before they expire. GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) // List jobs. // // Retrieves a list of jobs. // // Use ListAll() to get all BaseJob instances, which will iterate over every result page. List(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) // List job runs. // // List runs in descending order by start time. // // Use ListRunsAll() to get all BaseRun instances, which will iterate over every result page. ListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) // Repair a job run. // // Re-run one or more tasks. Tasks are re-run as part of the original job // run. They use the current job and task settings, and can be viewed in the // history for the original job run. RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error) // Update all job settings (reset). // // Overwrite all settings for the given job. Use the [_Update_ // endpoint](:method:jobs/update) to update job settings partially. Reset(ctx context.Context, request ResetJob) error // Trigger a new job run. // // Run a job and return the `run_id` of the triggered run. RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error) // Set job permissions. // // Sets permissions on a job. Jobs can inherit permissions from their root // object. SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) // Create and trigger a one-time run. // // Submit a one-time run. This endpoint allows you to submit a workload // directly without creating a job. Runs submitted using this endpoint // don’t display in the UI. Use the `jobs/runs/get` API to check the run // state after the job is submitted. Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error) // Update job settings partially. // // Add, update, or remove specific settings of an existing job. Use the // [_Reset_ endpoint](:method:jobs/reset) to overwrite all job settings. Update(ctx context.Context, request UpdateJob) error // Update job permissions. // // Updates the permissions on a job. Jobs can inherit permissions from their // root object. UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) }
The Jobs API allows you to create, edit, and delete jobs.
You can use a Databricks job to run a data processing or data analysis task in a Databricks cluster with scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, Scala, Spark submit, and Java applications.
You should never hard code secrets or store them in plain text. Use the Secrets CLI to manage secrets in the Databricks CLI. Use the Secrets utility to reference secrets in notebooks and jobs.
type ListJobsRequest ¶ added in v0.8.0
type ListJobsRequest struct { // Whether to include task and cluster details in the response. ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` // The number of jobs to return. This value must be greater than 0 and less // or equal to 100. The default value is 20. Limit int `json:"-" url:"limit,omitempty"` // A filter on the list based on the exact (case insensitive) job name. Name string `json:"-" url:"name,omitempty"` // The offset of the first job to return, relative to the most recently // created job. Deprecated since June 2023. Use `page_token` to iterate // through the pages instead. Offset int `json:"-" url:"offset,omitempty"` // Use `next_page_token` or `prev_page_token` returned from the previous // request to list the next or previous page of jobs respectively. PageToken string `json:"-" url:"page_token,omitempty"` ForceSendFields []string `json:"-"` }
List jobs
func (ListJobsRequest) MarshalJSON ¶ added in v0.23.0
func (s ListJobsRequest) MarshalJSON() ([]byte, error)
func (*ListJobsRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *ListJobsRequest) UnmarshalJSON(b []byte) error
type ListJobsResponse ¶
type ListJobsResponse struct { // If true, additional jobs matching the provided filter are available for // listing. HasMore bool `json:"has_more,omitempty"` // The list of jobs. Only included in the response if there are jobs to // list. Jobs []BaseJob `json:"jobs,omitempty"` // A token that can be used to list the next page of jobs (if applicable). NextPageToken string `json:"next_page_token,omitempty"` // A token that can be used to list the previous page of jobs (if // applicable). PrevPageToken string `json:"prev_page_token,omitempty"` ForceSendFields []string `json:"-"` }
List of jobs was retrieved successfully.
func (ListJobsResponse) MarshalJSON ¶ added in v0.23.0
func (s ListJobsResponse) MarshalJSON() ([]byte, error)
func (*ListJobsResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *ListJobsResponse) UnmarshalJSON(b []byte) error
type ListRunsRequest ¶ added in v0.8.0
type ListRunsRequest struct { // If active_only is `true`, only active runs are included in the results; // otherwise, lists both active and completed runs. An active run is a run // in the `QUEUED`, `PENDING`, `RUNNING`, or `TERMINATING`. This field // cannot be `true` when completed_only is `true`. ActiveOnly bool `json:"-" url:"active_only,omitempty"` // If completed_only is `true`, only completed runs are included in the // results; otherwise, lists both active and completed runs. This field // cannot be `true` when active_only is `true`. CompletedOnly bool `json:"-" url:"completed_only,omitempty"` // Whether to include task and cluster details in the response. ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` // The job for which to list runs. If omitted, the Jobs service lists runs // from all jobs. JobId int64 `json:"-" url:"job_id,omitempty"` // The number of runs to return. This value must be greater than 0 and less // than 25. The default value is 20. If a request specifies a limit of 0, // the service instead uses the maximum limit. Limit int `json:"-" url:"limit,omitempty"` // The offset of the first run to return, relative to the most recent run. // Deprecated since June 2023. Use `page_token` to iterate through the pages // instead. Offset int `json:"-" url:"offset,omitempty"` // Use `next_page_token` or `prev_page_token` returned from the previous // request to list the next or previous page of runs respectively. PageToken string `json:"-" url:"page_token,omitempty"` // The type of runs to return. For a description of run types, see // :method:jobs/getRun. RunType RunType `json:"-" url:"run_type,omitempty"` // Show runs that started _at or after_ this value. The value must be a UTC // timestamp in milliseconds. Can be combined with _start_time_to_ to filter // by a time range. StartTimeFrom int64 `json:"-" url:"start_time_from,omitempty"` // Show runs that started _at or before_ this value. The value must be a UTC // timestamp in milliseconds. Can be combined with _start_time_from_ to // filter by a time range. StartTimeTo int64 `json:"-" url:"start_time_to,omitempty"` ForceSendFields []string `json:"-"` }
List job runs
func (ListRunsRequest) MarshalJSON ¶ added in v0.23.0
func (s ListRunsRequest) MarshalJSON() ([]byte, error)
func (*ListRunsRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *ListRunsRequest) UnmarshalJSON(b []byte) error
type ListRunsResponse ¶
type ListRunsResponse struct { // If true, additional runs matching the provided filter are available for // listing. HasMore bool `json:"has_more,omitempty"` // A token that can be used to list the next page of runs (if applicable). NextPageToken string `json:"next_page_token,omitempty"` // A token that can be used to list the previous page of runs (if // applicable). PrevPageToken string `json:"prev_page_token,omitempty"` // A list of runs, from most recently started to least. Only included in the // response if there are runs to list. Runs []BaseRun `json:"runs,omitempty"` ForceSendFields []string `json:"-"` }
List of runs was retrieved successfully.
func (ListRunsResponse) MarshalJSON ¶ added in v0.23.0
func (s ListRunsResponse) MarshalJSON() ([]byte, error)
func (*ListRunsResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *ListRunsResponse) UnmarshalJSON(b []byte) error
type NotebookOutput ¶
type NotebookOutput struct { // The value passed to // [dbutils.notebook.exit()](/notebooks/notebook-workflows.html#notebook-workflows-exit). // Databricks restricts this API to return the first 5 MB of the value. For // a larger result, your job can store the results in a cloud storage // service. This field is absent if `dbutils.notebook.exit()` was never // called. Result string `json:"result,omitempty"` // Whether or not the result was truncated. Truncated bool `json:"truncated,omitempty"` ForceSendFields []string `json:"-"` }
func (NotebookOutput) MarshalJSON ¶ added in v0.23.0
func (s NotebookOutput) MarshalJSON() ([]byte, error)
func (*NotebookOutput) UnmarshalJSON ¶ added in v0.23.0
func (s *NotebookOutput) UnmarshalJSON(b []byte) error
type NotebookTask ¶
type NotebookTask struct { // Base parameters to be used for each run of this job. If the run is // initiated by a call to :method:jobs/run Now with parameters specified, // the two parameters maps are merged. If the same key is specified in // `base_parameters` and in `run-now`, the value from `run-now` is used. Use // [Task parameter variables] to set parameters containing information about // job runs. // // If the notebook takes a parameter that is not specified in the job’s // `base_parameters` or the `run-now` override parameters, the default value // from the notebook is used. // // Retrieve these parameters in a notebook using [dbutils.widgets.get]. // // The JSON representation of this field cannot exceed 1MB. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets BaseParameters map[string]string `json:"base_parameters,omitempty"` // The path of the notebook to be run in the Databricks workspace or remote // repository. For notebooks stored in the Databricks workspace, the path // must be absolute and begin with a slash. For notebooks stored in a remote // repository, the path must be relative. This field is required. NotebookPath string `json:"notebook_path"` // Optional location type of the notebook. When set to `WORKSPACE`, the // notebook will be retrieved from the local Databricks workspace. When set // to `GIT`, the notebook will be retrieved from a Git repository defined in // `git_source`. If the value is empty, the task will use `GIT` if // `git_source` is defined and `WORKSPACE` otherwise. * `WORKSPACE`: // Notebook is located in Databricks workspace. * `GIT`: Notebook is located // in cloud Git provider. Source Source `json:"source,omitempty"` }
type PauseStatus ¶ added in v0.11.0
type PauseStatus string
const PauseStatusPaused PauseStatus = `PAUSED`
const PauseStatusUnpaused PauseStatus = `UNPAUSED`
func (*PauseStatus) Set ¶ added in v0.11.0
func (f *PauseStatus) Set(v string) error
Set raw string value and validate it against allowed values
func (*PauseStatus) String ¶ added in v0.11.0
func (f *PauseStatus) String() string
String representation for fmt.Print
func (*PauseStatus) Type ¶ added in v0.11.0
func (f *PauseStatus) Type() string
Type always returns PauseStatus to satisfy [pflag.Value] interface
type PipelineParams ¶
type PipelineParams struct { // If true, triggers a full refresh on the delta live table. FullRefresh bool `json:"full_refresh,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineParams) MarshalJSON ¶ added in v0.23.0
func (s PipelineParams) MarshalJSON() ([]byte, error)
func (*PipelineParams) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineParams) UnmarshalJSON(b []byte) error
type PipelineTask ¶
type PipelineTask struct { // If true, triggers a full refresh on the delta live table. FullRefresh bool `json:"full_refresh,omitempty"` // The full name of the pipeline task to execute. PipelineId string `json:"pipeline_id"` ForceSendFields []string `json:"-"` }
func (PipelineTask) MarshalJSON ¶ added in v0.23.0
func (s PipelineTask) MarshalJSON() ([]byte, error)
func (*PipelineTask) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineTask) UnmarshalJSON(b []byte) error
type PythonWheelTask ¶
type PythonWheelTask struct { // Named entry point to use, if it does not exist in the metadata of the // package it executes the function from the package directly using // `$packageName.$entryPoint()` EntryPoint string `json:"entry_point"` // Command-line parameters passed to Python wheel task in the form of // `["--name=task", "--data=dbfs:/path/to/data.json"]`. Leave it empty if // `parameters` is not null. NamedParameters map[string]string `json:"named_parameters,omitempty"` // Name of the package to execute PackageName string `json:"package_name"` // Command-line parameters passed to Python wheel task. Leave it empty if // `named_parameters` is not null. Parameters []string `json:"parameters,omitempty"` }
type QueueSettings ¶ added in v0.21.0
type QueueSettings struct { // If true, enable queueing for the job. This is a required field. Enabled bool `json:"enabled"` }
type RepairHistoryItem ¶
type RepairHistoryItem struct { // The end time of the (repaired) run. EndTime int64 `json:"end_time,omitempty"` // The ID of the repair. Only returned for the items that represent a repair // in `repair_history`. Id int64 `json:"id,omitempty"` // The start time of the (repaired) run. StartTime int64 `json:"start_time,omitempty"` // The current state of the run. State *RunState `json:"state,omitempty"` // The run IDs of the task runs that ran as part of this repair history // item. TaskRunIds []int64 `json:"task_run_ids,omitempty"` // The repair history item type. Indicates whether a run is the original run // or a repair run. Type RepairHistoryItemType `json:"type,omitempty"` ForceSendFields []string `json:"-"` }
func (RepairHistoryItem) MarshalJSON ¶ added in v0.23.0
func (s RepairHistoryItem) MarshalJSON() ([]byte, error)
func (*RepairHistoryItem) UnmarshalJSON ¶ added in v0.23.0
func (s *RepairHistoryItem) UnmarshalJSON(b []byte) error
type RepairHistoryItemType ¶
type RepairHistoryItemType string
The repair history item type. Indicates whether a run is the original run or a repair run.
const RepairHistoryItemTypeOriginal RepairHistoryItemType = `ORIGINAL`
const RepairHistoryItemTypeRepair RepairHistoryItemType = `REPAIR`
func (*RepairHistoryItemType) Set ¶ added in v0.2.0
func (f *RepairHistoryItemType) Set(v string) error
Set raw string value and validate it against allowed values
func (*RepairHistoryItemType) String ¶ added in v0.2.0
func (f *RepairHistoryItemType) String() string
String representation for fmt.Print
func (*RepairHistoryItemType) Type ¶ added in v0.2.0
func (f *RepairHistoryItemType) Type() string
Type always returns RepairHistoryItemType to satisfy [pflag.Value] interface
type RepairRun ¶
type RepairRun struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt // run"]` DbtCommands []string `json:"dbt_commands,omitempty"` // A list of parameters for jobs with Spark JAR tasks, for example // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the // main function of the main class specified in the Spark JAR task. If not // specified upon `run-now`, it defaults to an empty list. jar_params cannot // be specified in conjunction with notebook_params. The JSON representation // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set // parameters containing information about job runs. JarParams []string `json:"jar_params,omitempty"` // Job-level parameters used in the run. for example `"param": // "overriding_val"` JobParameters map[string]string `json:"job_parameters,omitempty"` // The ID of the latest repair. This parameter is not required when // repairing a run for the first time, but must be provided on subsequent // requests to repair the same run. LatestRepairId int64 `json:"latest_repair_id,omitempty"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed // to the notebook and is accessible through the [dbutils.widgets.get] // function. // // If not specified upon `run-now`, the triggered run uses the job’s base // parameters. // // notebook_params cannot be specified in conjunction with jar_params. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // The JSON representation of this field (for example // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed // 10,000 bytes. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]string `json:"notebook_params,omitempty"` PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` // A map from keys to values for jobs with Python wheel task, for example // `"python_named_params": {"name": "task", "data": // "dbfs:/path/to/data.json"}`. PythonNamedParams map[string]string `json:"python_named_params,omitempty"` // A list of parameters for jobs with Python tasks, for example // `"python_params": ["john doe", "35"]`. The parameters are passed to // Python file as command-line parameters. If specified upon `run-now`, it // would overwrite the parameters specified in job setting. The JSON // representation of this field (for example `{"python_params":["john // doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables PythonParams []string `json:"python_params,omitempty"` // If true, repair all failed tasks. Only one of `rerun_tasks` or // `rerun_all_failed_tasks` can be used. RerunAllFailedTasks bool `json:"rerun_all_failed_tasks,omitempty"` // If true, repair all tasks that depend on the tasks in `rerun_tasks`, even // if they were previously successful. Can be also used in combination with // `rerun_all_failed_tasks`. RerunDependentTasks bool `json:"rerun_dependent_tasks,omitempty"` // The task keys of the task runs to repair. RerunTasks []string `json:"rerun_tasks,omitempty"` // The job run ID of the run to repair. The run must not be in progress. RunId int64 `json:"run_id"` // A list of parameters for jobs with spark submit task, for example // `"spark_submit_params": ["--class", // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to // spark-submit script as command-line parameters. If specified upon // `run-now`, it would overwrite the parameters specified in job setting. // The JSON representation of this field (for example // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables SparkSubmitParams []string `json:"spark_submit_params,omitempty"` // A map from keys to values for jobs with SQL task, for example // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task // does not support custom parameters. SqlParams map[string]string `json:"sql_params,omitempty"` ForceSendFields []string `json:"-"` }
func (RepairRun) MarshalJSON ¶ added in v0.23.0
func (*RepairRun) UnmarshalJSON ¶ added in v0.23.0
type RepairRunResponse ¶
type RepairRunResponse struct { // The ID of the repair. Must be provided in subsequent repairs using the // `latest_repair_id` field to ensure sequential repairs. RepairId int64 `json:"repair_id,omitempty"` ForceSendFields []string `json:"-"` }
Run repair was initiated.
func (RepairRunResponse) MarshalJSON ¶ added in v0.23.0
func (s RepairRunResponse) MarshalJSON() ([]byte, error)
func (*RepairRunResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *RepairRunResponse) UnmarshalJSON(b []byte) error
type ResetJob ¶
type ResetJob struct { // The canonical identifier of the job to reset. This field is required. JobId int64 `json:"job_id"` // The new settings of the job. These settings completely replace the old // settings. // // Changes to the field `JobBaseSettings.timeout_seconds` are applied to // active runs. Changes to other fields are applied to future runs only. NewSettings JobSettings `json:"new_settings"` }
type ResetResponse ¶ added in v0.34.0
type ResetResponse struct { }
type ResolvedConditionTaskValues ¶ added in v0.12.0
type ResolvedConditionTaskValues struct { Left string `json:"left,omitempty"` Right string `json:"right,omitempty"` ForceSendFields []string `json:"-"` }
func (ResolvedConditionTaskValues) MarshalJSON ¶ added in v0.23.0
func (s ResolvedConditionTaskValues) MarshalJSON() ([]byte, error)
func (*ResolvedConditionTaskValues) UnmarshalJSON ¶ added in v0.23.0
func (s *ResolvedConditionTaskValues) UnmarshalJSON(b []byte) error
type ResolvedDbtTaskValues ¶ added in v0.12.0
type ResolvedDbtTaskValues struct {
Commands []string `json:"commands,omitempty"`
}
type ResolvedNotebookTaskValues ¶ added in v0.12.0
type ResolvedParamPairValues ¶ added in v0.12.0
type ResolvedPythonWheelTaskValues ¶ added in v0.12.0
type ResolvedRunJobTaskValues ¶ added in v0.12.0
type ResolvedStringParamsValues ¶ added in v0.12.0
type ResolvedStringParamsValues struct {
Parameters []string `json:"parameters,omitempty"`
}
type ResolvedValues ¶ added in v0.12.0
type ResolvedValues struct { ConditionTask *ResolvedConditionTaskValues `json:"condition_task,omitempty"` DbtTask *ResolvedDbtTaskValues `json:"dbt_task,omitempty"` NotebookTask *ResolvedNotebookTaskValues `json:"notebook_task,omitempty"` PythonWheelTask *ResolvedPythonWheelTaskValues `json:"python_wheel_task,omitempty"` RunJobTask *ResolvedRunJobTaskValues `json:"run_job_task,omitempty"` SimulationTask *ResolvedParamPairValues `json:"simulation_task,omitempty"` SparkJarTask *ResolvedStringParamsValues `json:"spark_jar_task,omitempty"` SparkPythonTask *ResolvedStringParamsValues `json:"spark_python_task,omitempty"` SparkSubmitTask *ResolvedStringParamsValues `json:"spark_submit_task,omitempty"` SqlTask *ResolvedParamPairValues `json:"sql_task,omitempty"` }
type Run ¶
type Run struct { // The sequence number of this run attempt for a triggered job run. The // initial attempt of a run has an attempt_number of 0\. If the initial run // attempt fails, and the job has a retry policy (`max_retries` \> 0), // subsequent runs are created with an `original_attempt_run_id` of the // original attempt’s ID and an incrementing `attempt_number`. Runs are // retried only until they succeed, and the maximum `attempt_number` is the // same as the `max_retries` value for the job. AttemptNumber int `json:"attempt_number,omitempty"` // The time in milliseconds it took to terminate the cluster and clean up // any associated artifacts. The duration of a task run is the sum of the // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The // `cleanup_duration` field is set to 0 for multitask job runs. The total // duration of a multitask job run is the value of the `run_duration` field. CleanupDuration int64 `json:"cleanup_duration,omitempty"` // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"` // A snapshot of the job’s cluster specification when this run was // created. ClusterSpec *ClusterSpec `json:"cluster_spec,omitempty"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime int64 `json:"end_time,omitempty"` // The time in milliseconds it took to execute the commands in the JAR or // notebook until they completed, failed, timed out, were cancelled, or // encountered an unexpected error. The duration of a task run is the sum of // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. // The `execution_duration` field is set to 0 for multitask job runs. The // total duration of a multitask job run is the value of the `run_duration` // field. ExecutionDuration int64 `json:"execution_duration,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. // // If `git_source` is set, these tasks retrieve the file from the remote // repository by default. However, this behavior can be overridden by // setting `source` to `WORKSPACE` on the task. // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // The canonical identifier of the job that contains this run. JobId int64 `json:"job_id,omitempty"` // Job-level parameters used in the run JobParameters []JobParameter `json:"job_parameters,omitempty"` // A unique identifier for this job run. This is set to the same value as // `run_id`. NumberInJob int64 `json:"number_in_job,omitempty"` // If this run is a retry of a prior run attempt, this field contains the // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId int64 `json:"original_attempt_run_id,omitempty"` // The parameters used for this run. OverridingParameters *RunParameters `json:"overriding_parameters,omitempty"` // The time in milliseconds that the run has spent in the queue. QueueDuration int64 `json:"queue_duration,omitempty"` // The repair history of the run. RepairHistory []RepairHistoryItem `json:"repair_history,omitempty"` // The time in milliseconds it took the job run and all of its repairs to // finish. RunDuration int64 `json:"run_duration,omitempty"` // The canonical identifier of the run. This ID is unique across all runs of // all jobs. RunId int64 `json:"run_id,omitempty"` // An optional name for the run. The maximum length is 4096 bytes in UTF-8 // encoding. RunName string `json:"run_name,omitempty"` // The URL to the detail page of the run. RunPageUrl string `json:"run_page_url,omitempty"` // The type of a run. * `JOB_RUN`: Normal job run. A run created with // :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with // [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with // :method:jobs/submit. // // [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow RunType RunType `json:"run_type,omitempty"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. Schedule *CronSchedule `json:"schedule,omitempty"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task // run is the sum of the `setup_duration`, `execution_duration`, and the // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask // job runs. The total duration of a multitask job run is the value of the // `run_duration` field. SetupDuration int64 `json:"setup_duration,omitempty"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime int64 `json:"start_time,omitempty"` // The current state of the run. State *RunState `json:"state,omitempty"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `json:"tasks,omitempty"` // The type of trigger that fired this run. // // * `PERIODIC`: Schedules that periodically trigger runs, such as a cron // scheduler. * `ONE_TIME`: One time triggers that fire a single run. This // occurs you triggered a single run on demand through the UI or the API. * // `RETRY`: Indicates a run that is triggered as a retry of a previously // failed run. This occurs when you request to re-run the job in case of // failures. * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run // Job task. * `FILE_ARRIVAL`: Indicates a run that is triggered by a file // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger TriggerType `json:"trigger,omitempty"` // Additional details about what triggered the run TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` ForceSendFields []string `json:"-"` }
func (Run) MarshalJSON ¶ added in v0.23.0
func (*Run) UnmarshalJSON ¶ added in v0.23.0
type RunConditionTask ¶ added in v0.11.0
type RunConditionTask struct { // The left operand of the condition task. Can be either a string value or a // job state or parameter reference. Left string `json:"left"` // * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their // operands. This means that `“12.0” == “12”` will evaluate to // `false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, // `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their // operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” // >= “12”` will evaluate to `false`. // // The boolean comparison to task values can be implemented with operators // `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it // will be serialized to `“true”` or `“false”` for the comparison. Op ConditionTaskOp `json:"op"` // The condition expression evaluation result. Filled in if the task was // successfully completed. Can be `"true"` or `"false"` Outcome string `json:"outcome,omitempty"` // The right operand of the condition task. Can be either a string value or // a job state or parameter reference. Right string `json:"right"` ForceSendFields []string `json:"-"` }
func (RunConditionTask) MarshalJSON ¶ added in v0.23.0
func (s RunConditionTask) MarshalJSON() ([]byte, error)
func (*RunConditionTask) UnmarshalJSON ¶ added in v0.23.0
func (s *RunConditionTask) UnmarshalJSON(b []byte) error
type RunForEachTask ¶ added in v0.31.0
type RunForEachTask struct { // Controls the number of active iterations task runs. Default is 20, // maximum allowed is 100. Concurrency int `json:"concurrency,omitempty"` // Array for task to iterate on. This can be a JSON string or a reference to // an array parameter. Inputs string `json:"inputs"` // Read only field. Populated for GetRun and ListRuns RPC calls and stores // the execution stats of an For each task Stats *ForEachStats `json:"stats,omitempty"` // Configuration for the task that will be run for each element in the array Task Task `json:"task"` ForceSendFields []string `json:"-"` }
func (RunForEachTask) MarshalJSON ¶ added in v0.31.0
func (s RunForEachTask) MarshalJSON() ([]byte, error)
func (*RunForEachTask) UnmarshalJSON ¶ added in v0.31.0
func (s *RunForEachTask) UnmarshalJSON(b []byte) error
type RunIf ¶ added in v0.12.0
type RunIf string
An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`.
Possible values are: * `ALL_SUCCESS`: All dependencies have executed and succeeded * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded * `NONE_FAILED`: None of the dependencies have failed and at least one was executed * `ALL_DONE`: All dependencies have been completed * `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl dependencies have failed
const RunIfAllDone RunIf = `ALL_DONE`
All dependencies have been completed
const RunIfAllFailed RunIf = `ALL_FAILED`
ALl dependencies have failed
const RunIfAllSuccess RunIf = `ALL_SUCCESS`
All dependencies have executed and succeeded
const RunIfAtLeastOneFailed RunIf = `AT_LEAST_ONE_FAILED`
At least one dependency failed
const RunIfAtLeastOneSuccess RunIf = `AT_LEAST_ONE_SUCCESS`
At least one dependency has succeeded
const RunIfNoneFailed RunIf = `NONE_FAILED`
None of the dependencies have failed and at least one was executed
type RunJobOutput ¶ added in v0.13.0
type RunJobOutput struct { // The run id of the triggered job run RunId int64 `json:"run_id,omitempty"` ForceSendFields []string `json:"-"` }
func (RunJobOutput) MarshalJSON ¶ added in v0.23.0
func (s RunJobOutput) MarshalJSON() ([]byte, error)
func (*RunJobOutput) UnmarshalJSON ¶ added in v0.23.0
func (s *RunJobOutput) UnmarshalJSON(b []byte) error
type RunJobTask ¶ added in v0.13.0
type RunJobTask struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt // run"]` DbtCommands []string `json:"dbt_commands,omitempty"` // A list of parameters for jobs with Spark JAR tasks, for example // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the // main function of the main class specified in the Spark JAR task. If not // specified upon `run-now`, it defaults to an empty list. jar_params cannot // be specified in conjunction with notebook_params. The JSON representation // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set // parameters containing information about job runs. JarParams []string `json:"jar_params,omitempty"` // ID of the job to trigger. JobId int64 `json:"job_id"` // Job-level parameters used to trigger the job. JobParameters map[string]string `json:"job_parameters,omitempty"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed // to the notebook and is accessible through the [dbutils.widgets.get] // function. // // If not specified upon `run-now`, the triggered run uses the job’s base // parameters. // // notebook_params cannot be specified in conjunction with jar_params. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // The JSON representation of this field (for example // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed // 10,000 bytes. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]string `json:"notebook_params,omitempty"` PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` // A map from keys to values for jobs with Python wheel task, for example // `"python_named_params": {"name": "task", "data": // "dbfs:/path/to/data.json"}`. PythonNamedParams map[string]string `json:"python_named_params,omitempty"` // A list of parameters for jobs with Python tasks, for example // `"python_params": ["john doe", "35"]`. The parameters are passed to // Python file as command-line parameters. If specified upon `run-now`, it // would overwrite the parameters specified in job setting. The JSON // representation of this field (for example `{"python_params":["john // doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables PythonParams []string `json:"python_params,omitempty"` // A list of parameters for jobs with spark submit task, for example // `"spark_submit_params": ["--class", // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to // spark-submit script as command-line parameters. If specified upon // `run-now`, it would overwrite the parameters specified in job setting. // The JSON representation of this field (for example // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables SparkSubmitParams []string `json:"spark_submit_params,omitempty"` // A map from keys to values for jobs with SQL task, for example // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task // does not support custom parameters. SqlParams map[string]string `json:"sql_params,omitempty"` }
type RunLifeCycleState ¶
type RunLifeCycleState string
A value indicating the run's lifecycle state. The possible values are: * `QUEUED`: The run is queued. * `PENDING`: The run is waiting to be executed while the cluster and execution context are being prepared. * `RUNNING`: The task of this run is being executed. * `TERMINATING`: The task of this run has completed, and the cluster and execution context are being cleaned up. * `TERMINATED`: The task of this run has completed, and the cluster and execution context have been cleaned up. This state is terminal. * `SKIPPED`: This run was aborted because a previous run of the same job was already active. This state is terminal. * `INTERNAL_ERROR`: An exceptional state that indicates a failure in the Jobs service, such as network failure over a long period. If a run on a new cluster ends in the `INTERNAL_ERROR` state, the Jobs service terminates the cluster as soon as possible. This state is terminal. * `BLOCKED`: The run is blocked on an upstream dependency. * `WAITING_FOR_RETRY`: The run is waiting for a retry.
const RunLifeCycleStateBlocked RunLifeCycleState = `BLOCKED`
The run is blocked on an upstream dependency.
const RunLifeCycleStateInternalError RunLifeCycleState = `INTERNAL_ERROR`
An exceptional state that indicates a failure in the Jobs service, such as network failure over a long period. If a run on a new cluster ends in the `INTERNAL_ERROR` state, the Jobs service terminates the cluster as soon as possible. This state is terminal.
const RunLifeCycleStatePending RunLifeCycleState = `PENDING`
The run is waiting to be executed while the cluster and execution context are being prepared.
const RunLifeCycleStateQueued RunLifeCycleState = `QUEUED`
The run is queued.
const RunLifeCycleStateRunning RunLifeCycleState = `RUNNING`
The task of this run is being executed.
const RunLifeCycleStateSkipped RunLifeCycleState = `SKIPPED`
This run was aborted because a previous run of the same job was already active. This state is terminal.
const RunLifeCycleStateTerminated RunLifeCycleState = `TERMINATED`
The task of this run has completed, and the cluster and execution context have been cleaned up. This state is terminal.
const RunLifeCycleStateTerminating RunLifeCycleState = `TERMINATING`
The task of this run has completed, and the cluster and execution context are being cleaned up.
const RunLifeCycleStateWaitingForRetry RunLifeCycleState = `WAITING_FOR_RETRY`
The run is waiting for a retry.
func (*RunLifeCycleState) Set ¶ added in v0.2.0
func (f *RunLifeCycleState) Set(v string) error
Set raw string value and validate it against allowed values
func (*RunLifeCycleState) String ¶ added in v0.2.0
func (f *RunLifeCycleState) String() string
String representation for fmt.Print
func (*RunLifeCycleState) Type ¶ added in v0.2.0
func (f *RunLifeCycleState) Type() string
Type always returns RunLifeCycleState to satisfy [pflag.Value] interface
type RunNow ¶
type RunNow struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt // run"]` DbtCommands []string `json:"dbt_commands,omitempty"` // An optional token to guarantee the idempotency of job run requests. If a // run with the provided token already exists, the request does not create a // new run but returns the ID of the existing run instead. If a run with the // provided token is deleted, an error is returned. // // If you specify the idempotency token, upon failure you can retry until // the request succeeds. Databricks guarantees that exactly one run is // launched with that idempotency token. // // This token must have at most 64 characters. // // For more information, see [How to ensure idempotency for jobs]. // // [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html IdempotencyToken string `json:"idempotency_token,omitempty"` // A list of parameters for jobs with Spark JAR tasks, for example // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the // main function of the main class specified in the Spark JAR task. If not // specified upon `run-now`, it defaults to an empty list. jar_params cannot // be specified in conjunction with notebook_params. The JSON representation // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set // parameters containing information about job runs. JarParams []string `json:"jar_params,omitempty"` // The ID of the job to be executed JobId int64 `json:"job_id"` // Job-level parameters used in the run. for example `"param": // "overriding_val"` JobParameters map[string]string `json:"job_parameters,omitempty"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed // to the notebook and is accessible through the [dbutils.widgets.get] // function. // // If not specified upon `run-now`, the triggered run uses the job’s base // parameters. // // notebook_params cannot be specified in conjunction with jar_params. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // The JSON representation of this field (for example // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed // 10,000 bytes. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]string `json:"notebook_params,omitempty"` PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` // A map from keys to values for jobs with Python wheel task, for example // `"python_named_params": {"name": "task", "data": // "dbfs:/path/to/data.json"}`. PythonNamedParams map[string]string `json:"python_named_params,omitempty"` // A list of parameters for jobs with Python tasks, for example // `"python_params": ["john doe", "35"]`. The parameters are passed to // Python file as command-line parameters. If specified upon `run-now`, it // would overwrite the parameters specified in job setting. The JSON // representation of this field (for example `{"python_params":["john // doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables PythonParams []string `json:"python_params,omitempty"` // The queue settings of the run. Queue *QueueSettings `json:"queue,omitempty"` // A list of parameters for jobs with spark submit task, for example // `"spark_submit_params": ["--class", // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to // spark-submit script as command-line parameters. If specified upon // `run-now`, it would overwrite the parameters specified in job setting. // The JSON representation of this field (for example // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables SparkSubmitParams []string `json:"spark_submit_params,omitempty"` // A map from keys to values for jobs with SQL task, for example // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task // does not support custom parameters. SqlParams map[string]string `json:"sql_params,omitempty"` ForceSendFields []string `json:"-"` }
func (RunNow) MarshalJSON ¶ added in v0.23.0
func (*RunNow) UnmarshalJSON ¶ added in v0.23.0
type RunNowResponse ¶
type RunNowResponse struct { // A unique identifier for this job run. This is set to the same value as // `run_id`. NumberInJob int64 `json:"number_in_job,omitempty"` // The globally unique ID of the newly triggered run. RunId int64 `json:"run_id,omitempty"` ForceSendFields []string `json:"-"` }
Run was started successfully.
func (RunNowResponse) MarshalJSON ¶ added in v0.23.0
func (s RunNowResponse) MarshalJSON() ([]byte, error)
func (*RunNowResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *RunNowResponse) UnmarshalJSON(b []byte) error
type RunOutput ¶
type RunOutput struct { // The output of a dbt task, if available. DbtOutput *DbtOutput `json:"dbt_output,omitempty"` // An error message indicating why a task failed or why output is not // available. The message is unstructured, and its exact format is subject // to change. Error string `json:"error,omitempty"` // If there was an error executing the run, this field contains any // available stack traces. ErrorTrace string `json:"error_trace,omitempty"` Info string `json:"info,omitempty"` // The output from tasks that write to standard streams (stdout/stderr) such // as spark_jar_task, spark_python_task, python_wheel_task. // // It's not supported for the notebook_task, pipeline_task or // spark_submit_task. // // Databricks restricts this API to return the last 5 MB of these logs. Logs string `json:"logs,omitempty"` // Whether the logs are truncated. LogsTruncated bool `json:"logs_truncated,omitempty"` // All details of the run except for its output. Metadata *Run `json:"metadata,omitempty"` // The output of a notebook task, if available. A notebook task that // terminates (either successfully or with a failure) without calling // `dbutils.notebook.exit()` is considered to have an empty output. This // field is set but its result value is empty. Databricks restricts this API // to return the first 5 MB of the output. To return a larger result, use // the [ClusterLogConf] field to configure log storage for the job cluster. // // [ClusterLogConf]: https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlogconf NotebookOutput *NotebookOutput `json:"notebook_output,omitempty"` // The output of a run job task, if available RunJobOutput *RunJobOutput `json:"run_job_output,omitempty"` // The output of a SQL task, if available. SqlOutput *SqlOutput `json:"sql_output,omitempty"` ForceSendFields []string `json:"-"` }
Run output was retrieved successfully.
func (RunOutput) MarshalJSON ¶ added in v0.23.0
func (*RunOutput) UnmarshalJSON ¶ added in v0.23.0
type RunParameters ¶
type RunParameters struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt // run"]` DbtCommands []string `json:"dbt_commands,omitempty"` // A list of parameters for jobs with Spark JAR tasks, for example // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the // main function of the main class specified in the Spark JAR task. If not // specified upon `run-now`, it defaults to an empty list. jar_params cannot // be specified in conjunction with notebook_params. The JSON representation // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set // parameters containing information about job runs. JarParams []string `json:"jar_params,omitempty"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed // to the notebook and is accessible through the [dbutils.widgets.get] // function. // // If not specified upon `run-now`, the triggered run uses the job’s base // parameters. // // notebook_params cannot be specified in conjunction with jar_params. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // The JSON representation of this field (for example // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed // 10,000 bytes. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]string `json:"notebook_params,omitempty"` PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` // A map from keys to values for jobs with Python wheel task, for example // `"python_named_params": {"name": "task", "data": // "dbfs:/path/to/data.json"}`. PythonNamedParams map[string]string `json:"python_named_params,omitempty"` // A list of parameters for jobs with Python tasks, for example // `"python_params": ["john doe", "35"]`. The parameters are passed to // Python file as command-line parameters. If specified upon `run-now`, it // would overwrite the parameters specified in job setting. The JSON // representation of this field (for example `{"python_params":["john // doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables PythonParams []string `json:"python_params,omitempty"` // A list of parameters for jobs with spark submit task, for example // `"spark_submit_params": ["--class", // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to // spark-submit script as command-line parameters. If specified upon // `run-now`, it would overwrite the parameters specified in job setting. // The JSON representation of this field (for example // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. // // Use [Task parameter variables] to set parameters containing information // about job runs // // Important // // These parameters accept only Latin characters (ASCII character set). // Using non-ASCII characters returns an error. Examples of invalid, // non-ASCII characters are Chinese, Japanese kanjis, and emojis. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables SparkSubmitParams []string `json:"spark_submit_params,omitempty"` // A map from keys to values for jobs with SQL task, for example // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task // does not support custom parameters. SqlParams map[string]string `json:"sql_params,omitempty"` }
type RunResultState ¶
type RunResultState string
A value indicating the run's result. The possible values are: * `SUCCESS`: The task completed successfully. * `FAILED`: The task completed with an error. * `TIMEDOUT`: The run was stopped after reaching the timeout. * `CANCELED`: The run was canceled at user request. * `MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum concurrent runs were reached. * `EXCLUDED`: The run was skipped because the necessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run completed successfully with some failures; leaf tasks were successful. * `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * `UPSTREAM_CANCELED`: The run was skipped because an upstream task was canceled.
const RunResultStateCanceled RunResultState = `CANCELED`
The run was canceled at user request.
const RunResultStateExcluded RunResultState = `EXCLUDED`
The run was skipped because the necessary conditions were not met.
const RunResultStateFailed RunResultState = `FAILED`
The task completed with an error.
const RunResultStateMaximumConcurrentRunsReached RunResultState = `MAXIMUM_CONCURRENT_RUNS_REACHED`
The run was skipped because the maximum concurrent runs were reached.
const RunResultStateSuccess RunResultState = `SUCCESS`
The task completed successfully.
const RunResultStateSuccessWithFailures RunResultState = `SUCCESS_WITH_FAILURES`
The job run completed successfully with some failures; leaf tasks were successful.
const RunResultStateTimedout RunResultState = `TIMEDOUT`
The run was stopped after reaching the timeout.
const RunResultStateUpstreamCanceled RunResultState = `UPSTREAM_CANCELED`
The run was skipped because an upstream task was canceled.
const RunResultStateUpstreamFailed RunResultState = `UPSTREAM_FAILED`
The run was skipped because of an upstream failure.
func (*RunResultState) Set ¶ added in v0.2.0
func (f *RunResultState) Set(v string) error
Set raw string value and validate it against allowed values
func (*RunResultState) String ¶ added in v0.2.0
func (f *RunResultState) String() string
String representation for fmt.Print
func (*RunResultState) Type ¶ added in v0.2.0
func (f *RunResultState) Type() string
Type always returns RunResultState to satisfy [pflag.Value] interface
type RunState ¶
type RunState struct { // A value indicating the run's current lifecycle state. This field is // always available in the response. LifeCycleState RunLifeCycleState `json:"life_cycle_state,omitempty"` // The reason indicating why the run was queued. QueueReason string `json:"queue_reason,omitempty"` // A value indicating the run's result. This field is only available for // terminal lifecycle states. ResultState RunResultState `json:"result_state,omitempty"` // A descriptive message for the current state. This field is unstructured, // and its exact format is subject to change. StateMessage string `json:"state_message,omitempty"` // A value indicating whether a run was canceled manually by a user or by // the scheduler because the run timed out. UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout,omitempty"` ForceSendFields []string `json:"-"` }
The current state of the run.
func (RunState) MarshalJSON ¶ added in v0.23.0
func (*RunState) UnmarshalJSON ¶ added in v0.23.0
type RunTask ¶
type RunTask struct { // The sequence number of this run attempt for a triggered job run. The // initial attempt of a run has an attempt_number of 0\. If the initial run // attempt fails, and the job has a retry policy (`max_retries` \> 0), // subsequent runs are created with an `original_attempt_run_id` of the // original attempt’s ID and an incrementing `attempt_number`. Runs are // retried only until they succeed, and the maximum `attempt_number` is the // same as the `max_retries` value for the job. AttemptNumber int `json:"attempt_number,omitempty"` // The time in milliseconds it took to terminate the cluster and clean up // any associated artifacts. The duration of a task run is the sum of the // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The // `cleanup_duration` field is set to 0 for multitask job runs. The total // duration of a multitask job run is the value of the `run_duration` field. CleanupDuration int64 `json:"cleanup_duration,omitempty"` // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"` // The key of the compute requirement, specified in `job.settings.compute`, // to use for execution of this task. ComputeKey string `json:"compute_key,omitempty"` // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. ConditionTask *RunConditionTask `json:"condition_task,omitempty"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name // assigned to the dependent task. DependsOn []TaskDependency `json:"depends_on,omitempty"` // An optional description for this task. Description string `json:"description,omitempty"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime int64 `json:"end_time,omitempty"` // The time in milliseconds it took to execute the commands in the JAR or // notebook until they completed, failed, timed out, were cancelled, or // encountered an unexpected error. The duration of a task run is the sum of // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. // The `execution_duration` field is set to 0 for multitask job runs. The // total duration of a multitask job run is the value of the `run_duration` // field. ExecutionDuration int64 `json:"execution_duration,omitempty"` // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` // If for_each_task, indicates that this task must execute the nested task // within it. ForEachTask *RunForEachTask `json:"for_each_task,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. If `git_source` is set, // these tasks retrieve the file from the remote repository by default. // However, this behavior can be overridden by setting `source` to // `WORKSPACE` on the task. Note: dbt and SQL File tasks support only // version-controlled sources. If dbt or SQL File tasks are used, // `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey string `json:"job_cluster_key,omitempty"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. Libraries []compute.Library `json:"libraries,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` // If pipeline_task, indicates that this task must execute a Pipeline. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` // If python_wheel_task, indicates that this job must execute a PythonWheel. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // The time in milliseconds that the run has spent in the queue. QueueDuration int64 `json:"queue_duration,omitempty"` // Parameter values including resolved references ResolvedValues *ResolvedValues `json:"resolved_values,omitempty"` // The time in milliseconds it took the job run and all of its repairs to // finish. RunDuration int64 `json:"run_duration,omitempty"` // The ID of the task run. RunId int64 `json:"run_id,omitempty"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf RunIf `json:"run_if,omitempty"` // If run_job_task, indicates that this task must execute another job. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` RunPageUrl string `json:"run_page_url,omitempty"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task // run is the sum of the `setup_duration`, `execution_duration`, and the // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask // job runs. The total duration of a multitask job run is the value of the // `run_duration` field. SetupDuration int64 `json:"setup_duration,omitempty"` // If spark_jar_task, indicates that this task must run a JAR. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` // If spark_python_task, indicates that this task must run a Python file. SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python // libraries and `--conf` to set the Spark configurations. // // `master`, `deploy-mode`, and `executor-cores` are automatically // configured by Databricks; you _cannot_ specify them in parameters. // // By default, the Spark submit job uses all available memory (excluding // reserved memory for Databricks services). You can set `--driver-memory`, // and `--executor-memory` to a smaller value to leave some room for // off-heap usage. // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` // If sql_task, indicates that this job must execute a SQL task. SqlTask *SqlTask `json:"sql_task,omitempty"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime int64 `json:"start_time,omitempty"` // The current state of the run. State *RunState `json:"state,omitempty"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be // updated or reset. TaskKey string `json:"task_key"` // An optional timeout applied to each run of this job task. A value of `0` // means no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` ForceSendFields []string `json:"-"` }
Used when outputting a child run, in GetRun or ListRuns.
func (RunTask) MarshalJSON ¶ added in v0.23.0
func (*RunTask) UnmarshalJSON ¶ added in v0.23.0
type RunType ¶
type RunType string
The type of a run. * `JOB_RUN`: Normal job run. A run created with :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with dbutils.notebook.run. * `SUBMIT_RUN`: Submit run. A run created with :method:jobs/submit.
const RunTypeJobRun RunType = `JOB_RUN`
Normal job run. A run created with :method:jobs/runNow.
const RunTypeSubmitRun RunType = `SUBMIT_RUN`
Submit run. A run created with :method:jobs/submit.
const RunTypeWorkflowRun RunType = `WORKFLOW_RUN`
Workflow run. A run created with dbutils.notebook.run.
type Source ¶ added in v0.11.0
type Source string
Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\ from the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository defined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.
* `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL file is located in cloud Git provider.
const SourceGit Source = `GIT`
SQL file is located in cloud Git provider.
const SourceWorkspace Source = `WORKSPACE`
SQL file is located in <Databricks> workspace.
type SparkJarTask ¶
type SparkJarTask struct { // Deprecated since 04/2016. Provide a `jar` through the `libraries` field // instead. For an example, see :method:jobs/create. JarUri string `json:"jar_uri,omitempty"` // The full name of the class containing the main method to be executed. // This class must be contained in a JAR provided as a library. // // The code must use `SparkContext.getOrCreate` to obtain a Spark context; // otherwise, runs of the job fail. MainClassName string `json:"main_class_name,omitempty"` // Parameters passed to the main method. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables Parameters []string `json:"parameters,omitempty"` ForceSendFields []string `json:"-"` }
func (SparkJarTask) MarshalJSON ¶ added in v0.23.0
func (s SparkJarTask) MarshalJSON() ([]byte, error)
func (*SparkJarTask) UnmarshalJSON ¶ added in v0.23.0
func (s *SparkJarTask) UnmarshalJSON(b []byte) error
type SparkPythonTask ¶
type SparkPythonTask struct { // Command line parameters passed to the Python file. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables Parameters []string `json:"parameters,omitempty"` // The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, // adls:/, gcs:/) and workspace paths are supported. For python files stored // in the Databricks workspace, the path must be absolute and begin with // `/`. For files stored in a remote repository, the path must be relative. // This field is required. PythonFile string `json:"python_file"` // Optional location type of the Python file. When set to `WORKSPACE` or not // specified, the file will be retrieved from the local Databricks workspace // or cloud location (if the `python_file` has a URI format). When set to // `GIT`, the Python file will be retrieved from a Git repository defined in // `git_source`. // // * `WORKSPACE`: The Python file is located in a Databricks workspace or at // a cloud filesystem URI. * `GIT`: The Python file is located in a remote // Git repository. Source Source `json:"source,omitempty"` }
type SparkSubmitTask ¶
type SparkSubmitTask struct { // Command-line parameters passed to spark submit. // // Use [Task parameter variables] to set parameters containing information // about job runs. // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables Parameters []string `json:"parameters,omitempty"` }
type SqlAlertOutput ¶
type SqlAlertOutput struct { // The state of the SQL alert. // // * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not // fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled // trigger conditions AlertState SqlAlertState `json:"alert_state,omitempty"` // The link to find the output results. OutputLink string `json:"output_link,omitempty"` // The text of the SQL query. Can Run permission of the SQL query associated // with the SQL alert is required to view this field. QueryText string `json:"query_text,omitempty"` // Information about SQL statements executed in the run. SqlStatements []SqlStatementOutput `json:"sql_statements,omitempty"` // The canonical identifier of the SQL warehouse. WarehouseId string `json:"warehouse_id,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlAlertOutput) MarshalJSON ¶ added in v0.23.0
func (s SqlAlertOutput) MarshalJSON() ([]byte, error)
func (*SqlAlertOutput) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlAlertOutput) UnmarshalJSON(b []byte) error
type SqlAlertState ¶ added in v0.3.1
type SqlAlertState string
The state of the SQL alert.
* UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled trigger conditions
const SqlAlertStateOk SqlAlertState = `OK`
const SqlAlertStateTriggered SqlAlertState = `TRIGGERED`
const SqlAlertStateUnknown SqlAlertState = `UNKNOWN`
func (*SqlAlertState) Set ¶ added in v0.3.1
func (f *SqlAlertState) Set(v string) error
Set raw string value and validate it against allowed values
func (*SqlAlertState) String ¶ added in v0.3.1
func (f *SqlAlertState) String() string
String representation for fmt.Print
func (*SqlAlertState) Type ¶ added in v0.3.1
func (f *SqlAlertState) Type() string
Type always returns SqlAlertState to satisfy [pflag.Value] interface
type SqlDashboardOutput ¶
type SqlDashboardOutput struct { // The canonical identifier of the SQL warehouse. WarehouseId string `json:"warehouse_id,omitempty"` // Widgets executed in the run. Only SQL query based widgets are listed. Widgets []SqlDashboardWidgetOutput `json:"widgets,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlDashboardOutput) MarshalJSON ¶ added in v0.23.0
func (s SqlDashboardOutput) MarshalJSON() ([]byte, error)
func (*SqlDashboardOutput) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlDashboardOutput) UnmarshalJSON(b []byte) error
type SqlDashboardWidgetOutput ¶
type SqlDashboardWidgetOutput struct { // Time (in epoch milliseconds) when execution of the SQL widget ends. EndTime int64 `json:"end_time,omitempty"` // The information about the error when execution fails. Error *SqlOutputError `json:"error,omitempty"` // The link to find the output results. OutputLink string `json:"output_link,omitempty"` // Time (in epoch milliseconds) when execution of the SQL widget starts. StartTime int64 `json:"start_time,omitempty"` // The execution status of the SQL widget. Status SqlDashboardWidgetOutputStatus `json:"status,omitempty"` // The canonical identifier of the SQL widget. WidgetId string `json:"widget_id,omitempty"` // The title of the SQL widget. WidgetTitle string `json:"widget_title,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlDashboardWidgetOutput) MarshalJSON ¶ added in v0.23.0
func (s SqlDashboardWidgetOutput) MarshalJSON() ([]byte, error)
func (*SqlDashboardWidgetOutput) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlDashboardWidgetOutput) UnmarshalJSON(b []byte) error
type SqlDashboardWidgetOutputStatus ¶
type SqlDashboardWidgetOutputStatus string
const SqlDashboardWidgetOutputStatusCancelled SqlDashboardWidgetOutputStatus = `CANCELLED`
const SqlDashboardWidgetOutputStatusFailed SqlDashboardWidgetOutputStatus = `FAILED`
const SqlDashboardWidgetOutputStatusPending SqlDashboardWidgetOutputStatus = `PENDING`
const SqlDashboardWidgetOutputStatusRunning SqlDashboardWidgetOutputStatus = `RUNNING`
const SqlDashboardWidgetOutputStatusSuccess SqlDashboardWidgetOutputStatus = `SUCCESS`
func (*SqlDashboardWidgetOutputStatus) Set ¶ added in v0.2.0
func (f *SqlDashboardWidgetOutputStatus) Set(v string) error
Set raw string value and validate it against allowed values
func (*SqlDashboardWidgetOutputStatus) String ¶ added in v0.2.0
func (f *SqlDashboardWidgetOutputStatus) String() string
String representation for fmt.Print
func (*SqlDashboardWidgetOutputStatus) Type ¶ added in v0.2.0
func (f *SqlDashboardWidgetOutputStatus) Type() string
Type always returns SqlDashboardWidgetOutputStatus to satisfy [pflag.Value] interface
type SqlOutput ¶
type SqlOutput struct { // The output of a SQL alert task, if available. AlertOutput *SqlAlertOutput `json:"alert_output,omitempty"` // The output of a SQL dashboard task, if available. DashboardOutput *SqlDashboardOutput `json:"dashboard_output,omitempty"` // The output of a SQL query task, if available. QueryOutput *SqlQueryOutput `json:"query_output,omitempty"` }
type SqlOutputError ¶
type SqlOutputError struct { // The error message when execution fails. Message string `json:"message,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlOutputError) MarshalJSON ¶ added in v0.23.0
func (s SqlOutputError) MarshalJSON() ([]byte, error)
func (*SqlOutputError) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlOutputError) UnmarshalJSON(b []byte) error
type SqlQueryOutput ¶
type SqlQueryOutput struct { EndpointId string `json:"endpoint_id,omitempty"` // The link to find the output results. OutputLink string `json:"output_link,omitempty"` // The text of the SQL query. Can Run permission of the SQL query is // required to view this field. QueryText string `json:"query_text,omitempty"` // Information about SQL statements executed in the run. SqlStatements []SqlStatementOutput `json:"sql_statements,omitempty"` // The canonical identifier of the SQL warehouse. WarehouseId string `json:"warehouse_id,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlQueryOutput) MarshalJSON ¶ added in v0.23.0
func (s SqlQueryOutput) MarshalJSON() ([]byte, error)
func (*SqlQueryOutput) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlQueryOutput) UnmarshalJSON(b []byte) error
type SqlStatementOutput ¶
type SqlStatementOutput struct { // A key that can be used to look up query details. LookupKey string `json:"lookup_key,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlStatementOutput) MarshalJSON ¶ added in v0.23.0
func (s SqlStatementOutput) MarshalJSON() ([]byte, error)
func (*SqlStatementOutput) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlStatementOutput) UnmarshalJSON(b []byte) error
type SqlTask ¶
type SqlTask struct { // If alert, indicates that this job must refresh a SQL alert. Alert *SqlTaskAlert `json:"alert,omitempty"` // If dashboard, indicates that this job must refresh a SQL dashboard. Dashboard *SqlTaskDashboard `json:"dashboard,omitempty"` // If file, indicates that this job runs a SQL file in a remote Git // repository. Only one SQL statement is supported in a file. Multiple SQL // statements separated by semicolons (;) are not permitted. File *SqlTaskFile `json:"file,omitempty"` // Parameters to be used for each run of this job. The SQL alert task does // not support custom parameters. Parameters map[string]string `json:"parameters,omitempty"` // If query, indicates that this job must execute a SQL query. Query *SqlTaskQuery `json:"query,omitempty"` // The canonical identifier of the SQL warehouse. Recommended to use with // serverless or pro SQL warehouses. Classic SQL warehouses are only // supported for SQL alert, dashboard and query tasks and are limited to // scheduled single-task jobs. WarehouseId string `json:"warehouse_id"` }
type SqlTaskAlert ¶
type SqlTaskAlert struct { // The canonical identifier of the SQL alert. AlertId string `json:"alert_id"` // If true, the alert notifications are not sent to subscribers. PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` // If specified, alert notifications are sent to subscribers. Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlTaskAlert) MarshalJSON ¶ added in v0.23.0
func (s SqlTaskAlert) MarshalJSON() ([]byte, error)
func (*SqlTaskAlert) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlTaskAlert) UnmarshalJSON(b []byte) error
type SqlTaskDashboard ¶
type SqlTaskDashboard struct { // Subject of the email sent to subscribers of this task. CustomSubject string `json:"custom_subject,omitempty"` // The canonical identifier of the SQL dashboard. DashboardId string `json:"dashboard_id"` // If true, the dashboard snapshot is not taken, and emails are not sent to // subscribers. PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` // If specified, dashboard snapshots are sent to subscriptions. Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlTaskDashboard) MarshalJSON ¶ added in v0.23.0
func (s SqlTaskDashboard) MarshalJSON() ([]byte, error)
func (*SqlTaskDashboard) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlTaskDashboard) UnmarshalJSON(b []byte) error
type SqlTaskFile ¶ added in v0.9.0
type SqlTaskFile struct { // Path of the SQL file. Must be relative if the source is a remote Git // repository and absolute for workspace paths. Path string `json:"path"` // Optional location type of the SQL file. When set to `WORKSPACE`, the SQL // file will be retrieved from the local Databricks workspace. When set to // `GIT`, the SQL file will be retrieved from a Git repository defined in // `git_source`. If the value is empty, the task will use `GIT` if // `git_source` is defined and `WORKSPACE` otherwise. // // * `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL // file is located in cloud Git provider. Source Source `json:"source,omitempty"` }
type SqlTaskQuery ¶
type SqlTaskQuery struct { // The canonical identifier of the SQL query. QueryId string `json:"query_id"` }
type SqlTaskSubscription ¶ added in v0.3.1
type SqlTaskSubscription struct { // The canonical identifier of the destination to receive email // notification. This parameter is mutually exclusive with user_name. You // cannot set both destination_id and user_name for subscription // notifications. DestinationId string `json:"destination_id,omitempty"` // The user name to receive the subscription email. This parameter is // mutually exclusive with destination_id. You cannot set both // destination_id and user_name for subscription notifications. UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-"` }
func (SqlTaskSubscription) MarshalJSON ¶ added in v0.23.0
func (s SqlTaskSubscription) MarshalJSON() ([]byte, error)
func (*SqlTaskSubscription) UnmarshalJSON ¶ added in v0.23.0
func (s *SqlTaskSubscription) UnmarshalJSON(b []byte) error
type SubmitRun ¶
type SubmitRun struct { // List of permissions to set on the job. AccessControlList []iam.AccessControlRequest `json:"access_control_list,omitempty"` // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional set of email addresses notified when the run begins or // completes. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. // // If `git_source` is set, these tasks retrieve the file from the remote // repository by default. However, this behavior can be overridden by // setting `source` to `WORKSPACE` on the task. // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` // An optional token that can be used to guarantee the idempotency of job // run requests. If a run with the provided token already exists, the // request does not create a new run but returns the ID of the existing run // instead. If a run with the provided token is deleted, an error is // returned. // // If you specify the idempotency token, upon failure you can retry until // the request succeeds. Databricks guarantees that exactly one run is // launched with that idempotency token. // // This token must have at most 64 characters. // // For more information, see [How to ensure idempotency for jobs]. // // [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html IdempotencyToken string `json:"idempotency_token,omitempty"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // run. NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // If pipeline_task, indicates that this task must execute a Pipeline. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` // If python_wheel_task, indicates that this job must execute a PythonWheel. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // The queue settings of the one-time run. Queue *QueueSettings `json:"queue,omitempty"` // If run_job_task, indicates that this task must execute another job. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` // An optional name for the run. The default value is `Untitled`. RunName string `json:"run_name,omitempty"` // If spark_jar_task, indicates that this task must run a JAR. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` // If spark_python_task, indicates that this task must run a Python file. SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python // libraries and `--conf` to set the Spark configurations. // // `master`, `deploy-mode`, and `executor-cores` are automatically // configured by Databricks; you _cannot_ specify them in parameters. // // By default, the Spark submit job uses all available memory (excluding // reserved memory for Databricks services). You can set `--driver-memory`, // and `--executor-memory` to a smaller value to leave some room for // off-heap usage. // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` // If sql_task, indicates that this job must execute a SQL task. SqlTask *SqlTask `json:"sql_task,omitempty"` Tasks []SubmitTask `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means // no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` // A collection of system notification IDs to notify when the run begins or // completes. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` ForceSendFields []string `json:"-"` }
func (SubmitRun) MarshalJSON ¶ added in v0.23.0
func (*SubmitRun) UnmarshalJSON ¶ added in v0.23.0
type SubmitRunResponse ¶
type SubmitRunResponse struct { // The canonical identifier for the newly submitted run. RunId int64 `json:"run_id,omitempty"` ForceSendFields []string `json:"-"` }
Run was created and started successfully.
func (SubmitRunResponse) MarshalJSON ¶ added in v0.23.0
func (s SubmitRunResponse) MarshalJSON() ([]byte, error)
func (*SubmitRunResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *SubmitRunResponse) UnmarshalJSON(b []byte) error
type SubmitTask ¶ added in v0.11.0
type SubmitTask struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name // assigned to the dependent task. DependsOn []TaskDependency `json:"depends_on,omitempty"` // An optional description for this task. Description string `json:"description,omitempty"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` // If for_each_task, indicates that this task must execute the nested task // within it. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. Libraries []compute.Library `json:"libraries,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` // If pipeline_task, indicates that this task must execute a Pipeline. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` // If python_wheel_task, indicates that this job must execute a PythonWheel. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf RunIf `json:"run_if,omitempty"` // If run_job_task, indicates that this task must execute another job. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` // If spark_jar_task, indicates that this task must run a JAR. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` // If spark_python_task, indicates that this task must run a Python file. SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python // libraries and `--conf` to set the Spark configurations. // // `master`, `deploy-mode`, and `executor-cores` are automatically // configured by Databricks; you _cannot_ specify them in parameters. // // By default, the Spark submit job uses all available memory (excluding // reserved memory for Databricks services). You can set `--driver-memory`, // and `--executor-memory` to a smaller value to leave some room for // off-heap usage. // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` // If sql_task, indicates that this job must execute a SQL task. SqlTask *SqlTask `json:"sql_task,omitempty"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be // updated or reset. TaskKey string `json:"task_key"` // An optional timeout applied to each run of this job task. A value of `0` // means no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` ForceSendFields []string `json:"-"` }
func (SubmitTask) MarshalJSON ¶ added in v0.23.0
func (s SubmitTask) MarshalJSON() ([]byte, error)
func (*SubmitTask) UnmarshalJSON ¶ added in v0.23.0
func (s *SubmitTask) UnmarshalJSON(b []byte) error
type TableTriggerConfiguration ¶ added in v0.32.0
type TableTriggerConfiguration struct { // The table(s) condition based on which to trigger a job run. Condition Condition `json:"condition,omitempty"` // If set, the trigger starts a run only after the specified amount of time // has passed since the last time the trigger fired. The minimum allowed // value is 60 seconds. MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` // A list of Delta tables to monitor for changes. The table name must be in // the format `catalog_name.schema_name.table_name`. TableNames []string `json:"table_names,omitempty"` // If set, the trigger starts a run only after no table updates have // occurred for the specified time and can be used to wait for a series of // table updates before triggering a run. The minimum allowed value is 60 // seconds. WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` ForceSendFields []string `json:"-"` }
func (TableTriggerConfiguration) MarshalJSON ¶ added in v0.32.0
func (s TableTriggerConfiguration) MarshalJSON() ([]byte, error)
func (*TableTriggerConfiguration) UnmarshalJSON ¶ added in v0.32.0
func (s *TableTriggerConfiguration) UnmarshalJSON(b []byte) error
type Task ¶ added in v0.11.0
type Task struct { // The key of the compute requirement, specified in `job.settings.compute`, // to use for execution of this task. ComputeKey string `json:"compute_key,omitempty"` // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this // task. The task will run only if the `run_if` condition is true. The key // is `task_key`, and the value is the name assigned to the dependent task. DependsOn []TaskDependency `json:"depends_on,omitempty"` // An optional description for this task. Description string `json:"description,omitempty"` // An option to disable auto optimization in serverless DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` // An optional set of email addresses that is notified when runs of this // task begin or complete as well as when this task is deleted. The default // behavior is to not send any emails. EmailNotifications *TaskEmailNotifications `json:"email_notifications,omitempty"` // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` // If for_each_task, indicates that this task must execute the nested task // within it. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey string `json:"job_cluster_key,omitempty"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. Libraries []compute.Library `json:"libraries,omitempty"` // An optional maximum number of times to retry an unsuccessful run. A run // is considered to be unsuccessful if it completes with the `FAILED` // result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means // to retry indefinitely and the value `0` means to never retry. MaxRetries int `json:"max_retries,omitempty"` // An optional minimal interval in milliseconds between the start of the // failed run and the subsequent retry run. The default behavior is that // unsuccessful runs are immediately retried. MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` // If pipeline_task, indicates that this task must execute a Pipeline. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` // If python_wheel_task, indicates that this job must execute a PythonWheel. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` // An optional value specifying the condition determining whether the task // is run once its dependencies have been completed. // // * `ALL_SUCCESS`: All dependencies have executed and succeeded * // `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded * // `NONE_FAILED`: None of the dependencies have failed and at least one was // executed * `ALL_DONE`: All dependencies have been completed * // `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl // dependencies have failed RunIf RunIf `json:"run_if,omitempty"` // If run_job_task, indicates that this task must execute another job. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` // If spark_jar_task, indicates that this task must run a JAR. SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` // If spark_python_task, indicates that this task must run a Python file. SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python // libraries and `--conf` to set the Spark configurations. // // `master`, `deploy-mode`, and `executor-cores` are automatically // configured by Databricks; you _cannot_ specify them in parameters. // // By default, the Spark submit job uses all available memory (excluding // reserved memory for Databricks services). You can set `--driver-memory`, // and `--executor-memory` to a smaller value to leave some room for // off-heap usage. // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` // If sql_task, indicates that this job must execute a SQL task. SqlTask *SqlTask `json:"sql_task,omitempty"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be // updated or reset. TaskKey string `json:"task_key"` // An optional timeout applied to each run of this job task. A value of `0` // means no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` // A collection of system notification IDs to notify when runs of this task // begin or complete. The default behavior is to not send any system // notifications. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` ForceSendFields []string `json:"-"` }
func (Task) MarshalJSON ¶ added in v0.23.0
func (*Task) UnmarshalJSON ¶ added in v0.23.0
type TaskDependency ¶ added in v0.11.0
type TaskDependency struct { // Can only be specified on condition task dependencies. The outcome of the // dependent task that must be met for this task to run. Outcome string `json:"outcome,omitempty"` // The name of the task this task depends on. TaskKey string `json:"task_key"` ForceSendFields []string `json:"-"` }
func (TaskDependency) MarshalJSON ¶ added in v0.23.0
func (s TaskDependency) MarshalJSON() ([]byte, error)
func (*TaskDependency) UnmarshalJSON ¶ added in v0.23.0
func (s *TaskDependency) UnmarshalJSON(b []byte) error
type TaskEmailNotifications ¶ added in v0.9.0
type TaskEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the // run is skipped. NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` // A list of email addresses to be notified when the duration of a run // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in // the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is // specified in the `health` field for the job, notifications are not sent. OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` // A list of email addresses to be notified when a run unsuccessfully // completes. A run is considered to have completed unsuccessfully if it // ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or // `TIMED_OUT` result_state. If this is not specified on job creation, // reset, or update the list is empty, and notifications are not sent. OnFailure []string `json:"on_failure,omitempty"` // A list of email addresses to be notified when a run begins. If not // specified on job creation, reset, or update, the list is empty, and // notifications are not sent. OnStart []string `json:"on_start,omitempty"` // A list of email addresses to be notified when a run successfully // completes. A run is considered to have completed successfully if it ends // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If // not specified on job creation, reset, or update, the list is empty, and // notifications are not sent. OnSuccess []string `json:"on_success,omitempty"` ForceSendFields []string `json:"-"` }
func (TaskEmailNotifications) MarshalJSON ¶ added in v0.37.0
func (s TaskEmailNotifications) MarshalJSON() ([]byte, error)
func (*TaskEmailNotifications) UnmarshalJSON ¶ added in v0.37.0
func (s *TaskEmailNotifications) UnmarshalJSON(b []byte) error
type TaskNotificationSettings ¶ added in v0.9.0
type TaskNotificationSettings struct { // If true, do not send notifications to recipients specified in `on_start` // for the retried runs and do not send notifications to recipients // specified in `on_failure` until the last retry of the run. AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` // If true, do not send notifications to recipients specified in // `on_failure` if the run is canceled. NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` // If true, do not send notifications to recipients specified in // `on_failure` if the run is skipped. NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` ForceSendFields []string `json:"-"` }
func (TaskNotificationSettings) MarshalJSON ¶ added in v0.23.0
func (s TaskNotificationSettings) MarshalJSON() ([]byte, error)
func (*TaskNotificationSettings) UnmarshalJSON ¶ added in v0.23.0
func (s *TaskNotificationSettings) UnmarshalJSON(b []byte) error
type TriggerInfo ¶ added in v0.13.0
type TriggerInfo struct { // The run id of the Run Job task run RunId int64 `json:"run_id,omitempty"` ForceSendFields []string `json:"-"` }
Additional details about what triggered the run
func (TriggerInfo) MarshalJSON ¶ added in v0.23.0
func (s TriggerInfo) MarshalJSON() ([]byte, error)
func (*TriggerInfo) UnmarshalJSON ¶ added in v0.23.0
func (s *TriggerInfo) UnmarshalJSON(b []byte) error
type TriggerSettings ¶ added in v0.4.0
type TriggerSettings struct { // File arrival trigger settings. FileArrival *FileArrivalTriggerConfiguration `json:"file_arrival,omitempty"` // Whether this trigger is paused or not. PauseStatus PauseStatus `json:"pause_status,omitempty"` // Old table trigger settings name. Deprecated in favor of `table_update`. Table *TableTriggerConfiguration `json:"table,omitempty"` TableUpdate *TableTriggerConfiguration `json:"table_update,omitempty"` }
type TriggerType ¶
type TriggerType string
The type of trigger that fired this run.
* `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler. * `ONE_TIME`: One time triggers that fire a single run. This occurs you triggered a single run on demand through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a previously failed run. This occurs when you request to re-run the job in case of failures. * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`: Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is triggered by a table update.
const TriggerTypeFileArrival TriggerType = `FILE_ARRIVAL`
Indicates a run that is triggered by a file arrival.
const TriggerTypeOneTime TriggerType = `ONE_TIME`
One time triggers that fire a single run. This occurs you triggered a single run on demand through the UI or the API.
const TriggerTypePeriodic TriggerType = `PERIODIC`
Schedules that periodically trigger runs, such as a cron scheduler.
const TriggerTypeRetry TriggerType = `RETRY`
Indicates a run that is triggered as a retry of a previously failed run. This occurs when you request to re-run the job in case of failures.
const TriggerTypeRunJobTask TriggerType = `RUN_JOB_TASK`
Indicates a run that is triggered using a Run Job task.
const TriggerTypeTable TriggerType = `TABLE`
Indicates a run that is triggered by a table update.
func (*TriggerType) Set ¶ added in v0.2.0
func (f *TriggerType) Set(v string) error
Set raw string value and validate it against allowed values
func (*TriggerType) String ¶ added in v0.2.0
func (f *TriggerType) String() string
String representation for fmt.Print
func (*TriggerType) Type ¶ added in v0.2.0
func (f *TriggerType) Type() string
Type always returns TriggerType to satisfy [pflag.Value] interface
type UpdateJob ¶
type UpdateJob struct { // Remove top-level fields in the job settings. Removing nested fields is // not supported, except for tasks and job clusters (`tasks/task_1`). This // field is optional. FieldsToRemove []string `json:"fields_to_remove,omitempty"` // The canonical identifier of the job to update. This field is required. JobId int64 `json:"job_id"` // The new settings for the job. // // Top-level fields specified in `new_settings` are completely replaced, // except for arrays which are merged. That is, new and existing entries are // completely replaced based on the respective key fields, i.e. `task_key` // or `job_cluster_key`, while previous entries are kept. // // Partially updating nested fields is not supported. // // Changes to the field `JobSettings.timeout_seconds` are applied to active // runs. Changes to other fields are applied to future runs only. NewSettings *JobSettings `json:"new_settings,omitempty"` }
type UpdateResponse ¶ added in v0.34.0
type UpdateResponse struct { }
type ViewItem ¶
type ViewItem struct { // Content of the view. Content string `json:"content,omitempty"` // Name of the view item. In the case of code view, it would be the // notebook’s name. In the case of dashboard view, it would be the // dashboard’s name. Name string `json:"name,omitempty"` // Type of the view item. Type ViewType `json:"type,omitempty"` ForceSendFields []string `json:"-"` }
func (ViewItem) MarshalJSON ¶ added in v0.23.0
func (*ViewItem) UnmarshalJSON ¶ added in v0.23.0
type ViewType ¶
type ViewType string
* `NOTEBOOK`: Notebook view item. * `DASHBOARD`: Dashboard view item.
const ViewTypeDashboard ViewType = `DASHBOARD`
Dashboard view item.
const ViewTypeNotebook ViewType = `NOTEBOOK`
Notebook view item.
type ViewsToExport ¶
type ViewsToExport string
* `CODE`: Code view of the notebook. * `DASHBOARDS`: All dashboard views of the notebook. * `ALL`: All views of the notebook.
const ViewsToExportAll ViewsToExport = `ALL`
All views of the notebook.
const ViewsToExportCode ViewsToExport = `CODE`
Code view of the notebook.
const ViewsToExportDashboards ViewsToExport = `DASHBOARDS`
All dashboard views of the notebook.
func (*ViewsToExport) Set ¶ added in v0.2.0
func (f *ViewsToExport) Set(v string) error
Set raw string value and validate it against allowed values
func (*ViewsToExport) String ¶ added in v0.2.0
func (f *ViewsToExport) String() string
String representation for fmt.Print
func (*ViewsToExport) Type ¶ added in v0.2.0
func (f *ViewsToExport) Type() string
Type always returns ViewsToExport to satisfy [pflag.Value] interface
type WaitGetRunJobTerminatedOrSkipped ¶ added in v0.10.0
type WaitGetRunJobTerminatedOrSkipped[R any] struct { Response *R RunId int64 `json:"run_id"` Poll func(time.Duration, func(*Run)) (*Run, error) // contains filtered or unexported fields }
WaitGetRunJobTerminatedOrSkipped is a wrapper that calls JobsAPI.WaitGetRunJobTerminatedOrSkipped and waits to reach TERMINATED or SKIPPED state.
func (*WaitGetRunJobTerminatedOrSkipped[R]) Get ¶ added in v0.10.0
func (w *WaitGetRunJobTerminatedOrSkipped[R]) Get() (*Run, error)
Get the Run with the default timeout of 20 minutes.
func (*WaitGetRunJobTerminatedOrSkipped[R]) GetWithTimeout ¶ added in v0.10.0
func (w *WaitGetRunJobTerminatedOrSkipped[R]) GetWithTimeout(timeout time.Duration) (*Run, error)
Get the Run with custom timeout.
func (*WaitGetRunJobTerminatedOrSkipped[R]) OnProgress ¶ added in v0.10.0
func (w *WaitGetRunJobTerminatedOrSkipped[R]) OnProgress(callback func(*Run)) *WaitGetRunJobTerminatedOrSkipped[R]
OnProgress invokes a callback every time it polls for the status update.
type WebhookNotifications ¶ added in v0.11.0
type WebhookNotifications struct { // An optional list of system notification IDs to call when the duration of // a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` // metric in the `health` field. A maximum of 3 destinations can be // specified for the `on_duration_warning_threshold_exceeded` property. OnDurationWarningThresholdExceeded []Webhook `json:"on_duration_warning_threshold_exceeded,omitempty"` // An optional list of system notification IDs to call when the run fails. A // maximum of 3 destinations can be specified for the `on_failure` property. OnFailure []Webhook `json:"on_failure,omitempty"` // An optional list of system notification IDs to call when the run starts. // A maximum of 3 destinations can be specified for the `on_start` property. OnStart []Webhook `json:"on_start,omitempty"` // An optional list of system notification IDs to call when the run // completes successfully. A maximum of 3 destinations can be specified for // the `on_success` property. OnSuccess []Webhook `json:"on_success,omitempty"` }