Documentation ¶
Overview ¶
The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines.
Index ¶
- type CreatePipeline
- type CreatePipelineResponse
- type CronTrigger
- type DataPlaneId
- type DeletePipelineRequest
- type DeletePipelineResponse
- type DeploymentKind
- type EditPipeline
- type EditPipelineResponse
- type ErrorDetail
- type EventLevel
- type FileLibrary
- type Filters
- type GetPipelinePermissionLevelsRequest
- type GetPipelinePermissionLevelsResponse
- type GetPipelinePermissionsRequest
- type GetPipelineRequest
- type GetPipelineResponse
- type GetPipelineResponseHealth
- type GetUpdateRequest
- type GetUpdateResponse
- type IngestionConfig
- type IngestionGatewayPipelineDefinition
- type IngestionPipelineDefinition
- type ListPipelineEventsRequest
- type ListPipelineEventsResponse
- type ListPipelinesRequest
- type ListPipelinesResponse
- type ListUpdatesRequest
- type ListUpdatesResponse
- type ManualTrigger
- type MaturityLevel
- type NotebookLibrary
- type Notifications
- type Origin
- type PipelineAccessControlRequest
- type PipelineAccessControlResponse
- type PipelineCluster
- type PipelineClusterAutoscale
- type PipelineClusterAutoscaleMode
- type PipelineDeployment
- type PipelineEvent
- type PipelineLibrary
- type PipelinePermission
- type PipelinePermissionLevel
- type PipelinePermissions
- type PipelinePermissionsDescription
- type PipelinePermissionsRequest
- type PipelineSpec
- type PipelineState
- type PipelineStateInfo
- type PipelineStateInfoHealth
- type PipelineTrigger
- type PipelinesAPI
- func (a *PipelinesAPI) Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error)
- func (a *PipelinesAPI) Delete(ctx context.Context, request DeletePipelineRequest) error
- func (a *PipelinesAPI) DeleteByPipelineId(ctx context.Context, pipelineId string) error
- func (a *PipelinesAPI) Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error)
- func (a *PipelinesAPI) GetByName(ctx context.Context, name string) (*PipelineStateInfo, error)
- func (a *PipelinesAPI) GetByPipelineId(ctx context.Context, pipelineId string) (*GetPipelineResponse, error)
- func (a *PipelinesAPI) GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error)
- func (a *PipelinesAPI) GetPermissionLevelsByPipelineId(ctx context.Context, pipelineId string) (*GetPipelinePermissionLevelsResponse, error)
- func (a *PipelinesAPI) GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error)
- func (a *PipelinesAPI) GetPermissionsByPipelineId(ctx context.Context, pipelineId string) (*PipelinePermissions, error)
- func (a *PipelinesAPI) GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error)
- func (a *PipelinesAPI) GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pipelineId string, updateId string) (*GetUpdateResponse, error)
- func (a *PipelinesAPI) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent]
- func (a *PipelinesAPI) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error)
- func (a *PipelinesAPI) ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error)
- func (a *PipelinesAPI) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo]
- func (a *PipelinesAPI) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error)
- func (a *PipelinesAPI) ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error)
- func (a *PipelinesAPI) ListUpdatesByPipelineId(ctx context.Context, pipelineId string) (*ListUpdatesResponse, error)
- func (a *PipelinesAPI) PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error)
- func (a *PipelinesAPI) SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error)
- func (a *PipelinesAPI) StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error)
- func (a *PipelinesAPI) Stop(ctx context.Context, stopRequest StopRequest) (*WaitGetPipelineIdle[struct{}], error)
- func (a *PipelinesAPI) StopAndWait(ctx context.Context, stopRequest StopRequest, ...) (*GetPipelineResponse, error)deprecated
- func (a *PipelinesAPI) Update(ctx context.Context, request EditPipeline) error
- func (a *PipelinesAPI) UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error)
- func (a *PipelinesAPI) WaitGetPipelineIdle(ctx context.Context, pipelineId string, timeout time.Duration, ...) (*GetPipelineResponse, error)
- func (a *PipelinesAPI) WaitGetPipelineRunning(ctx context.Context, pipelineId string, timeout time.Duration, ...) (*GetPipelineResponse, error)
- type PipelinesInterface
- type PipelinesService
- type ReportSpec
- type SchemaSpec
- type Sequencing
- type SerializedException
- type StackFrame
- type StartUpdate
- type StartUpdateCause
- type StartUpdateResponse
- type StopPipelineResponse
- type StopRequest
- type TableSpec
- type TableSpecificConfig
- type TableSpecificConfigScdType
- type UpdateInfo
- type UpdateInfoCause
- type UpdateInfoState
- type UpdateStateInfo
- type UpdateStateInfoState
- type WaitGetPipelineIdle
- type WaitGetPipelineRunning
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type CreatePipeline ¶
type CreatePipeline struct { // If false, deployment will fail if name conflicts with that of another // pipeline. AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` // Budget policy of this pipeline. BudgetPolicyId string `json:"budget_policy_id,omitempty"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, // `catalog`.`target`.`table`). If `target` is not specified, no data is // published to Unity Catalog. Catalog string `json:"catalog,omitempty"` // DLT Release Channel that specifies which version to use. Channel string `json:"channel,omitempty"` // Cluster settings for this pipeline deployment. Clusters []PipelineCluster `json:"clusters,omitempty"` // String-String configuration for this pipeline execution. Configuration map[string]string `json:"configuration,omitempty"` // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous bool `json:"continuous,omitempty"` // Deployment type of this pipeline. Deployment *PipelineDeployment `json:"deployment,omitempty"` // Whether the pipeline is in Development mode. Defaults to false. Development bool `json:"development,omitempty"` DryRun bool `json:"dry_run,omitempty"` // Pipeline product edition. Edition string `json:"edition,omitempty"` // Filters on which Pipeline packages to include in the deployed graph. Filters *Filters `json:"filters,omitempty"` // The definition of a gateway pipeline to support CDC. GatewayDefinition *IngestionGatewayPipelineDefinition `json:"gateway_definition,omitempty"` // Unique identifier for this pipeline. Id string `json:"id,omitempty"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` // Friendly identifier for this pipeline. Name string `json:"name,omitempty"` // List of notification settings for this pipeline. Notifications []Notifications `json:"notifications,omitempty"` // Whether Photon is enabled for this pipeline. Photon bool `json:"photon,omitempty"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. Schema string `json:"schema,omitempty"` // Whether serverless compute is enabled for this pipeline. Serverless bool `json:"serverless,omitempty"` // DBFS root directory for storing checkpoints and tables. Storage string `json:"storage,omitempty"` // Target schema (database) to add tables in this pipeline to. If not // specified, no data is published to the Hive metastore or Unity Catalog. // To publish to Unity Catalog, also specify `catalog`. Target string `json:"target,omitempty"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` ForceSendFields []string `json:"-"` }
func (CreatePipeline) MarshalJSON ¶ added in v0.23.0
func (s CreatePipeline) MarshalJSON() ([]byte, error)
func (*CreatePipeline) UnmarshalJSON ¶ added in v0.23.0
func (s *CreatePipeline) UnmarshalJSON(b []byte) error
type CreatePipelineResponse ¶
type CreatePipelineResponse struct { // Only returned when dry_run is true. EffectiveSettings *PipelineSpec `json:"effective_settings,omitempty"` // The unique identifier for the newly created pipeline. Only returned when // dry_run is false. PipelineId string `json:"pipeline_id,omitempty"` ForceSendFields []string `json:"-"` }
func (CreatePipelineResponse) MarshalJSON ¶ added in v0.23.0
func (s CreatePipelineResponse) MarshalJSON() ([]byte, error)
func (*CreatePipelineResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *CreatePipelineResponse) UnmarshalJSON(b []byte) error
type CronTrigger ¶
type CronTrigger struct { QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"` TimezoneId string `json:"timezone_id,omitempty"` ForceSendFields []string `json:"-"` }
func (CronTrigger) MarshalJSON ¶ added in v0.23.0
func (s CronTrigger) MarshalJSON() ([]byte, error)
func (*CronTrigger) UnmarshalJSON ¶ added in v0.23.0
func (s *CronTrigger) UnmarshalJSON(b []byte) error
type DataPlaneId ¶ added in v0.5.0
type DataPlaneId struct { // The instance name of the data plane emitting an event. Instance string `json:"instance,omitempty"` // A sequence number, unique and increasing within the data plane instance. SeqNo int `json:"seq_no,omitempty"` ForceSendFields []string `json:"-"` }
func (DataPlaneId) MarshalJSON ¶ added in v0.23.0
func (s DataPlaneId) MarshalJSON() ([]byte, error)
func (*DataPlaneId) UnmarshalJSON ¶ added in v0.23.0
func (s *DataPlaneId) UnmarshalJSON(b []byte) error
type DeletePipelineRequest ¶ added in v0.8.0
type DeletePipelineRequest struct {
PipelineId string `json:"-" url:"-"`
}
Delete a pipeline
type DeletePipelineResponse ¶ added in v0.34.0
type DeletePipelineResponse struct { }
type DeploymentKind ¶ added in v0.39.0
type DeploymentKind string
The deployment method that manages the pipeline: - BUNDLE: The pipeline is managed by a Databricks Asset Bundle.
const DeploymentKindBundle DeploymentKind = `BUNDLE`
func (*DeploymentKind) Set ¶ added in v0.39.0
func (f *DeploymentKind) Set(v string) error
Set raw string value and validate it against allowed values
func (*DeploymentKind) String ¶ added in v0.39.0
func (f *DeploymentKind) String() string
String representation for fmt.Print
func (*DeploymentKind) Type ¶ added in v0.39.0
func (f *DeploymentKind) Type() string
Type always returns DeploymentKind to satisfy [pflag.Value] interface
type EditPipeline ¶
type EditPipeline struct { // If false, deployment will fail if name has changed and conflicts the name // of another pipeline. AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` // Budget policy of this pipeline. BudgetPolicyId string `json:"budget_policy_id,omitempty"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, // `catalog`.`target`.`table`). If `target` is not specified, no data is // published to Unity Catalog. Catalog string `json:"catalog,omitempty"` // DLT Release Channel that specifies which version to use. Channel string `json:"channel,omitempty"` // Cluster settings for this pipeline deployment. Clusters []PipelineCluster `json:"clusters,omitempty"` // String-String configuration for this pipeline execution. Configuration map[string]string `json:"configuration,omitempty"` // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous bool `json:"continuous,omitempty"` // Deployment type of this pipeline. Deployment *PipelineDeployment `json:"deployment,omitempty"` // Whether the pipeline is in Development mode. Defaults to false. Development bool `json:"development,omitempty"` // Pipeline product edition. Edition string `json:"edition,omitempty"` // If present, the last-modified time of the pipeline settings before the // edit. If the settings were modified after that time, then the request // will fail with a conflict. ExpectedLastModified int64 `json:"expected_last_modified,omitempty"` // Filters on which Pipeline packages to include in the deployed graph. Filters *Filters `json:"filters,omitempty"` // The definition of a gateway pipeline to support CDC. GatewayDefinition *IngestionGatewayPipelineDefinition `json:"gateway_definition,omitempty"` // Unique identifier for this pipeline. Id string `json:"id,omitempty"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` // Friendly identifier for this pipeline. Name string `json:"name,omitempty"` // List of notification settings for this pipeline. Notifications []Notifications `json:"notifications,omitempty"` // Whether Photon is enabled for this pipeline. Photon bool `json:"photon,omitempty"` // Unique identifier for this pipeline. PipelineId string `json:"pipeline_id,omitempty" url:"-"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. Schema string `json:"schema,omitempty"` // Whether serverless compute is enabled for this pipeline. Serverless bool `json:"serverless,omitempty"` // DBFS root directory for storing checkpoints and tables. Storage string `json:"storage,omitempty"` // Target schema (database) to add tables in this pipeline to. If not // specified, no data is published to the Hive metastore or Unity Catalog. // To publish to Unity Catalog, also specify `catalog`. Target string `json:"target,omitempty"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` ForceSendFields []string `json:"-"` }
func (EditPipeline) MarshalJSON ¶ added in v0.23.0
func (s EditPipeline) MarshalJSON() ([]byte, error)
func (*EditPipeline) UnmarshalJSON ¶ added in v0.23.0
func (s *EditPipeline) UnmarshalJSON(b []byte) error
type EditPipelineResponse ¶ added in v0.34.0
type EditPipelineResponse struct { }
type ErrorDetail ¶ added in v0.5.0
type ErrorDetail struct { // The exception thrown for this error, with its chain of cause. Exceptions []SerializedException `json:"exceptions,omitempty"` // Whether this error is considered fatal, that is, unrecoverable. Fatal bool `json:"fatal,omitempty"` ForceSendFields []string `json:"-"` }
func (ErrorDetail) MarshalJSON ¶ added in v0.23.0
func (s ErrorDetail) MarshalJSON() ([]byte, error)
func (*ErrorDetail) UnmarshalJSON ¶ added in v0.23.0
func (s *ErrorDetail) UnmarshalJSON(b []byte) error
type EventLevel ¶ added in v0.5.0
type EventLevel string
The severity level of the event.
const EventLevelError EventLevel = `ERROR`
const EventLevelInfo EventLevel = `INFO`
const EventLevelMetrics EventLevel = `METRICS`
const EventLevelWarn EventLevel = `WARN`
func (*EventLevel) Set ¶ added in v0.5.0
func (f *EventLevel) Set(v string) error
Set raw string value and validate it against allowed values
func (*EventLevel) String ¶ added in v0.5.0
func (f *EventLevel) String() string
String representation for fmt.Print
func (*EventLevel) Type ¶ added in v0.5.0
func (f *EventLevel) Type() string
Type always returns EventLevel to satisfy [pflag.Value] interface
type FileLibrary ¶ added in v0.7.0
type FileLibrary struct { // The absolute path of the file. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-"` }
func (FileLibrary) MarshalJSON ¶ added in v0.23.0
func (s FileLibrary) MarshalJSON() ([]byte, error)
func (*FileLibrary) UnmarshalJSON ¶ added in v0.23.0
func (s *FileLibrary) UnmarshalJSON(b []byte) error
type GetPipelinePermissionLevelsRequest ¶ added in v0.15.0
type GetPipelinePermissionLevelsRequest struct { // The pipeline for which to get or manage permissions. PipelineId string `json:"-" url:"-"` }
Get pipeline permission levels
type GetPipelinePermissionLevelsResponse ¶ added in v0.15.0
type GetPipelinePermissionLevelsResponse struct { // Specific permission levels PermissionLevels []PipelinePermissionsDescription `json:"permission_levels,omitempty"` }
type GetPipelinePermissionsRequest ¶ added in v0.15.0
type GetPipelinePermissionsRequest struct { // The pipeline for which to get or manage permissions. PipelineId string `json:"-" url:"-"` }
Get pipeline permissions
type GetPipelineRequest ¶ added in v0.8.0
type GetPipelineRequest struct {
PipelineId string `json:"-" url:"-"`
}
Get a pipeline
type GetPipelineResponse ¶
type GetPipelineResponse struct { // An optional message detailing the cause of the pipeline state. Cause string `json:"cause,omitempty"` // The ID of the cluster that the pipeline is running on. ClusterId string `json:"cluster_id,omitempty"` // The username of the pipeline creator. CreatorUserName string `json:"creator_user_name,omitempty"` // Serverless budget policy ID of this pipeline. EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` // The health of a pipeline. Health GetPipelineResponseHealth `json:"health,omitempty"` // The last time the pipeline settings were modified or created. LastModified int64 `json:"last_modified,omitempty"` // Status of the latest updates for the pipeline. Ordered with the newest // update first. LatestUpdates []UpdateStateInfo `json:"latest_updates,omitempty"` // A human friendly identifier for the pipeline, taken from the `spec`. Name string `json:"name,omitempty"` // The ID of the pipeline. PipelineId string `json:"pipeline_id,omitempty"` // Username of the user that the pipeline will run on behalf of. RunAsUserName string `json:"run_as_user_name,omitempty"` // The pipeline specification. This field is not returned when called by // `ListPipelines`. Spec *PipelineSpec `json:"spec,omitempty"` // The pipeline state. State PipelineState `json:"state,omitempty"` ForceSendFields []string `json:"-"` }
func (GetPipelineResponse) MarshalJSON ¶ added in v0.23.0
func (s GetPipelineResponse) MarshalJSON() ([]byte, error)
func (*GetPipelineResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *GetPipelineResponse) UnmarshalJSON(b []byte) error
type GetPipelineResponseHealth ¶
type GetPipelineResponseHealth string
The health of a pipeline.
const GetPipelineResponseHealthHealthy GetPipelineResponseHealth = `HEALTHY`
const GetPipelineResponseHealthUnhealthy GetPipelineResponseHealth = `UNHEALTHY`
func (*GetPipelineResponseHealth) Set ¶ added in v0.2.0
func (f *GetPipelineResponseHealth) Set(v string) error
Set raw string value and validate it against allowed values
func (*GetPipelineResponseHealth) String ¶ added in v0.2.0
func (f *GetPipelineResponseHealth) String() string
String representation for fmt.Print
func (*GetPipelineResponseHealth) Type ¶ added in v0.2.0
func (f *GetPipelineResponseHealth) Type() string
Type always returns GetPipelineResponseHealth to satisfy [pflag.Value] interface
type GetUpdateRequest ¶ added in v0.8.0
type GetUpdateRequest struct { // The ID of the pipeline. PipelineId string `json:"-" url:"-"` // The ID of the update. UpdateId string `json:"-" url:"-"` }
Get a pipeline update
type GetUpdateResponse ¶
type GetUpdateResponse struct { // The current update info. Update *UpdateInfo `json:"update,omitempty"` }
type IngestionConfig ¶ added in v0.39.0
type IngestionConfig struct { // Select tables from a specific source report. Report *ReportSpec `json:"report,omitempty"` // Select tables from a specific source schema. Schema *SchemaSpec `json:"schema,omitempty"` // Select tables from a specific source table. Table *TableSpec `json:"table,omitempty"` }
type IngestionGatewayPipelineDefinition ¶ added in v0.41.0
type IngestionGatewayPipelineDefinition struct { // Immutable. The Unity Catalog connection this gateway pipeline uses to // communicate with the source. ConnectionId string `json:"connection_id,omitempty"` // Required, Immutable. The name of the catalog for the gateway pipeline's // storage location. GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"` // Optional. The Unity Catalog-compatible name for the gateway storage // location. This is the destination to use for the data that is extracted // by the gateway. Delta Live Tables system will automatically create the // storage location under the catalog and schema. GatewayStorageName string `json:"gateway_storage_name,omitempty"` // Required, Immutable. The name of the schema for the gateway pipelines's // storage location. GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` ForceSendFields []string `json:"-"` }
func (IngestionGatewayPipelineDefinition) MarshalJSON ¶ added in v0.41.0
func (s IngestionGatewayPipelineDefinition) MarshalJSON() ([]byte, error)
func (*IngestionGatewayPipelineDefinition) UnmarshalJSON ¶ added in v0.41.0
func (s *IngestionGatewayPipelineDefinition) UnmarshalJSON(b []byte) error
type IngestionPipelineDefinition ¶ added in v0.44.0
type IngestionPipelineDefinition struct { // Immutable. The Unity Catalog connection this ingestion pipeline uses to // communicate with the source. Specify either ingestion_gateway_id or // connection_name. ConnectionName string `json:"connection_name,omitempty"` // Immutable. Identifier for the ingestion gateway used by this ingestion // pipeline to communicate with the source. Specify either // ingestion_gateway_id or connection_name. IngestionGatewayId string `json:"ingestion_gateway_id,omitempty"` // Required. Settings specifying tables to replicate and the destination for // the replicated tables. Objects []IngestionConfig `json:"objects,omitempty"` // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in the pipeline. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` ForceSendFields []string `json:"-"` }
func (IngestionPipelineDefinition) MarshalJSON ¶ added in v0.44.0
func (s IngestionPipelineDefinition) MarshalJSON() ([]byte, error)
func (*IngestionPipelineDefinition) UnmarshalJSON ¶ added in v0.44.0
func (s *IngestionPipelineDefinition) UnmarshalJSON(b []byte) error
type ListPipelineEventsRequest ¶ added in v0.8.0
type ListPipelineEventsRequest struct { // Criteria to select a subset of results, expressed using a SQL-like // syntax. The supported filters are: 1. level='INFO' (or WARN or ERROR) 2. // level in ('INFO', 'WARN') 3. id='[event-id]' 4. timestamp > 'TIMESTAMP' // (or >=,<,<=,=) // // Composite expressions are supported, for example: level in ('ERROR', // 'WARN') AND timestamp> '2021-07-22T06:37:33.083Z' Filter string `json:"-" url:"filter,omitempty"` // Max number of entries to return in a single page. The system may return // fewer than max_results events in a response, even if there are more // events available. MaxResults int `json:"-" url:"max_results,omitempty"` // A string indicating a sort order by timestamp for the results, for // example, ["timestamp asc"]. The sort order can be ascending or // descending. By default, events are returned in descending order by // timestamp. OrderBy []string `json:"-" url:"order_by,omitempty"` // Page token returned by previous call. This field is mutually exclusive // with all fields in this request except max_results. An error is returned // if any fields other than max_results are set when this field is set. PageToken string `json:"-" url:"page_token,omitempty"` PipelineId string `json:"-" url:"-"` ForceSendFields []string `json:"-"` }
List pipeline events
func (ListPipelineEventsRequest) MarshalJSON ¶ added in v0.23.0
func (s ListPipelineEventsRequest) MarshalJSON() ([]byte, error)
func (*ListPipelineEventsRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *ListPipelineEventsRequest) UnmarshalJSON(b []byte) error
type ListPipelineEventsResponse ¶ added in v0.5.0
type ListPipelineEventsResponse struct { // The list of events matching the request criteria. Events []PipelineEvent `json:"events,omitempty"` // If present, a token to fetch the next page of events. NextPageToken string `json:"next_page_token,omitempty"` // If present, a token to fetch the previous page of events. PrevPageToken string `json:"prev_page_token,omitempty"` ForceSendFields []string `json:"-"` }
func (ListPipelineEventsResponse) MarshalJSON ¶ added in v0.23.0
func (s ListPipelineEventsResponse) MarshalJSON() ([]byte, error)
func (*ListPipelineEventsResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *ListPipelineEventsResponse) UnmarshalJSON(b []byte) error
type ListPipelinesRequest ¶ added in v0.8.0
type ListPipelinesRequest struct { // Select a subset of results based on the specified criteria. The supported // filters are: // // * `notebook='<path>'` to select pipelines that reference the provided // notebook path. * `name LIKE '[pattern]'` to select pipelines with a name // that matches pattern. Wildcards are supported, for example: `name LIKE // '%shopping%'` // // Composite filters are not supported. This field is optional. Filter string `json:"-" url:"filter,omitempty"` // The maximum number of entries to return in a single page. The system may // return fewer than max_results events in a response, even if there are // more events available. This field is optional. The default value is 25. // The maximum value is 100. An error is returned if the value of // max_results is greater than 100. MaxResults int `json:"-" url:"max_results,omitempty"` // A list of strings specifying the order of results. Supported order_by // fields are id and name. The default is id asc. This field is optional. OrderBy []string `json:"-" url:"order_by,omitempty"` // Page token returned by previous call PageToken string `json:"-" url:"page_token,omitempty"` ForceSendFields []string `json:"-"` }
List pipelines
func (ListPipelinesRequest) MarshalJSON ¶ added in v0.23.0
func (s ListPipelinesRequest) MarshalJSON() ([]byte, error)
func (*ListPipelinesRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *ListPipelinesRequest) UnmarshalJSON(b []byte) error
type ListPipelinesResponse ¶
type ListPipelinesResponse struct { // If present, a token to fetch the next page of events. NextPageToken string `json:"next_page_token,omitempty"` // The list of events matching the request criteria. Statuses []PipelineStateInfo `json:"statuses,omitempty"` ForceSendFields []string `json:"-"` }
func (ListPipelinesResponse) MarshalJSON ¶ added in v0.23.0
func (s ListPipelinesResponse) MarshalJSON() ([]byte, error)
func (*ListPipelinesResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *ListPipelinesResponse) UnmarshalJSON(b []byte) error
type ListUpdatesRequest ¶ added in v0.8.0
type ListUpdatesRequest struct { // Max number of entries to return in a single page. MaxResults int `json:"-" url:"max_results,omitempty"` // Page token returned by previous call PageToken string `json:"-" url:"page_token,omitempty"` // The pipeline to return updates for. PipelineId string `json:"-" url:"-"` // If present, returns updates until and including this update_id. UntilUpdateId string `json:"-" url:"until_update_id,omitempty"` ForceSendFields []string `json:"-"` }
List pipeline updates
func (ListUpdatesRequest) MarshalJSON ¶ added in v0.23.0
func (s ListUpdatesRequest) MarshalJSON() ([]byte, error)
func (*ListUpdatesRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *ListUpdatesRequest) UnmarshalJSON(b []byte) error
type ListUpdatesResponse ¶
type ListUpdatesResponse struct { // If present, then there are more results, and this a token to be used in a // subsequent request to fetch the next page. NextPageToken string `json:"next_page_token,omitempty"` // If present, then this token can be used in a subsequent request to fetch // the previous page. PrevPageToken string `json:"prev_page_token,omitempty"` Updates []UpdateInfo `json:"updates,omitempty"` ForceSendFields []string `json:"-"` }
func (ListUpdatesResponse) MarshalJSON ¶ added in v0.23.0
func (s ListUpdatesResponse) MarshalJSON() ([]byte, error)
func (*ListUpdatesResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *ListUpdatesResponse) UnmarshalJSON(b []byte) error
type ManualTrigger ¶ added in v0.34.0
type ManualTrigger struct { }
type MaturityLevel ¶ added in v0.5.0
type MaturityLevel string
Maturity level for EventDetails.
const MaturityLevelDeprecated MaturityLevel = `DEPRECATED`
const MaturityLevelEvolving MaturityLevel = `EVOLVING`
const MaturityLevelStable MaturityLevel = `STABLE`
func (*MaturityLevel) Set ¶ added in v0.5.0
func (f *MaturityLevel) Set(v string) error
Set raw string value and validate it against allowed values
func (*MaturityLevel) String ¶ added in v0.5.0
func (f *MaturityLevel) String() string
String representation for fmt.Print
func (*MaturityLevel) Type ¶ added in v0.5.0
func (f *MaturityLevel) Type() string
Type always returns MaturityLevel to satisfy [pflag.Value] interface
type NotebookLibrary ¶
type NotebookLibrary struct { // The absolute path of the notebook. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-"` }
func (NotebookLibrary) MarshalJSON ¶ added in v0.23.0
func (s NotebookLibrary) MarshalJSON() ([]byte, error)
func (*NotebookLibrary) UnmarshalJSON ¶ added in v0.23.0
func (s *NotebookLibrary) UnmarshalJSON(b []byte) error
type Notifications ¶ added in v0.21.0
type Notifications struct { // A list of alerts that trigger the sending of notifications to the // configured destinations. The supported alerts are: // // * `on-update-success`: A pipeline update completes successfully. * // `on-update-failure`: Each time a pipeline update fails. * // `on-update-fatal-failure`: A pipeline update fails with a non-retryable // (fatal) error. * `on-flow-failure`: A single data flow fails. Alerts []string `json:"alerts,omitempty"` // A list of email addresses notified when a configured alert is triggered. EmailRecipients []string `json:"email_recipients,omitempty"` }
type Origin ¶ added in v0.5.0
type Origin struct { // The id of a batch. Unique within a flow. BatchId int `json:"batch_id,omitempty"` // The cloud provider, e.g., AWS or Azure. Cloud string `json:"cloud,omitempty"` // The id of the cluster where an execution happens. Unique within a region. ClusterId string `json:"cluster_id,omitempty"` // The name of a dataset. Unique within a pipeline. DatasetName string `json:"dataset_name,omitempty"` // The id of the flow. Globally unique. Incremental queries will generally // reuse the same id while complete queries will have a new id per update. FlowId string `json:"flow_id,omitempty"` // The name of the flow. Not unique. FlowName string `json:"flow_name,omitempty"` // The optional host name where the event was triggered Host string `json:"host,omitempty"` // The id of a maintenance run. Globally unique. MaintenanceId string `json:"maintenance_id,omitempty"` // Materialization name. MaterializationName string `json:"materialization_name,omitempty"` // The org id of the user. Unique within a cloud. OrgId int `json:"org_id,omitempty"` // The id of the pipeline. Globally unique. PipelineId string `json:"pipeline_id,omitempty"` // The name of the pipeline. Not unique. PipelineName string `json:"pipeline_name,omitempty"` // The cloud region. Region string `json:"region,omitempty"` // The id of the request that caused an update. RequestId string `json:"request_id,omitempty"` // The id of a (delta) table. Globally unique. TableId string `json:"table_id,omitempty"` // The Unity Catalog id of the MV or ST being updated. UcResourceId string `json:"uc_resource_id,omitempty"` // The id of an execution. Globally unique. UpdateId string `json:"update_id,omitempty"` ForceSendFields []string `json:"-"` }
func (Origin) MarshalJSON ¶ added in v0.23.0
func (*Origin) UnmarshalJSON ¶ added in v0.23.0
type PipelineAccessControlRequest ¶ added in v0.15.0
type PipelineAccessControlRequest struct { // name of the group GroupName string `json:"group_name,omitempty"` // Permission level PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` // application ID of a service principal ServicePrincipalName string `json:"service_principal_name,omitempty"` // name of the user UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineAccessControlRequest) MarshalJSON ¶ added in v0.23.0
func (s PipelineAccessControlRequest) MarshalJSON() ([]byte, error)
func (*PipelineAccessControlRequest) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineAccessControlRequest) UnmarshalJSON(b []byte) error
type PipelineAccessControlResponse ¶ added in v0.15.0
type PipelineAccessControlResponse struct { // All permissions. AllPermissions []PipelinePermission `json:"all_permissions,omitempty"` // Display name of the user or service principal. DisplayName string `json:"display_name,omitempty"` // name of the group GroupName string `json:"group_name,omitempty"` // Name of the service principal. ServicePrincipalName string `json:"service_principal_name,omitempty"` // name of the user UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineAccessControlResponse) MarshalJSON ¶ added in v0.23.0
func (s PipelineAccessControlResponse) MarshalJSON() ([]byte, error)
func (*PipelineAccessControlResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineAccessControlResponse) UnmarshalJSON(b []byte) error
type PipelineCluster ¶
type PipelineCluster struct { // Note: This field won't be persisted. Only API users will check this // field. ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. Autoscale *PipelineClusterAutoscale `json:"autoscale,omitempty"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. AwsAttributes *compute.AwsAttributes `json:"aws_attributes,omitempty"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. AzureAttributes *compute.AzureAttributes `json:"azure_attributes,omitempty"` // The configuration for delivering spark logs to a long-term storage // destination. Only dbfs destinations are supported. Only one destination // can be specified for one cluster. If the conf is given, the logs will be // delivered to the destination every `5 mins`. The destination of driver // logs is `$destination/$clusterId/driver`, while the destination of // executor logs is `$destination/$clusterId/executor`. ClusterLogConf *compute.ClusterLogConf `json:"cluster_log_conf,omitempty"` // Additional tags for cluster resources. Databricks will tag all cluster // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: // // - Currently, Databricks allows at most 45 custom tags // // - Clusters can only reuse cloud resources if the resources' tags are a // subset of the cluster tags CustomTags map[string]string `json:"custom_tags,omitempty"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` // The node type of the Spark driver. Note that this field is optional; if // unset, the driver node type will be set as the same value as // `node_type_id` defined above. DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` // Whether to enable local disk encryption for the cluster. EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. GcpAttributes *compute.GcpAttributes `json:"gcp_attributes,omitempty"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent // to `<destination>/<cluster-ID>/init_scripts`. InitScripts []compute.InitScriptInfo `json:"init_scripts,omitempty"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId string `json:"instance_pool_id,omitempty"` // A label for the cluster specification, either `default` to configure the // default cluster, or `maintenance` to configure the maintenance cluster. // This field is optional. The default value is `default`. Label string `json:"label,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads. A // list of available node types can be retrieved by using the // :method:clusters/listNodeTypes API call. NodeTypeId string `json:"node_type_id,omitempty"` // Number of worker nodes that this cluster should have. A cluster has one // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 // Spark nodes. // // Note: When reading the properties of a cluster, this field reflects the // desired number of workers rather than the actual current number of // workers. For instance, if a cluster is resized from 5 to 10 workers, this // field will immediately be updated to reflect the target size of 10 // workers, whereas the workers listed in `spark_info` will gradually // increase from 5 to 10 as the new nodes are provisioned. NumWorkers int `json:"num_workers,omitempty"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty"` // An object containing a set of optional, user-specified Spark // configuration key-value pairs. See :method:clusters/create for more // details. SparkConf map[string]string `json:"spark_conf,omitempty"` // An object containing a set of optional, user-specified environment // variable key-value pairs. Please note that key-value pair of the form // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the // driver and workers. // // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the // example below. This ensures that all default databricks managed // environmental variables are included as well. // // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` // SSH public key contents that will be added to each Spark node in this // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []string `json:"ssh_public_keys,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineCluster) MarshalJSON ¶ added in v0.23.0
func (s PipelineCluster) MarshalJSON() ([]byte, error)
func (*PipelineCluster) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineCluster) UnmarshalJSON(b []byte) error
type PipelineClusterAutoscale ¶ added in v0.31.0
type PipelineClusterAutoscale struct { // The maximum number of workers to which the cluster can scale up when // overloaded. `max_workers` must be strictly greater than `min_workers`. MaxWorkers int `json:"max_workers"` // The minimum number of workers the cluster can scale down to when // underutilized. It is also the initial number of workers the cluster will // have after creation. MinWorkers int `json:"min_workers"` // Databricks Enhanced Autoscaling optimizes cluster utilization by // automatically allocating cluster resources based on workload volume, with // minimal impact to the data processing latency of your pipelines. Enhanced // Autoscaling is available for `updates` clusters only. The legacy // autoscaling feature is used for `maintenance` clusters. Mode PipelineClusterAutoscaleMode `json:"mode,omitempty"` }
type PipelineClusterAutoscaleMode ¶ added in v0.31.0
type PipelineClusterAutoscaleMode string
Databricks Enhanced Autoscaling optimizes cluster utilization by automatically allocating cluster resources based on workload volume, with minimal impact to the data processing latency of your pipelines. Enhanced Autoscaling is available for `updates` clusters only. The legacy autoscaling feature is used for `maintenance` clusters.
const PipelineClusterAutoscaleModeEnhanced PipelineClusterAutoscaleMode = `ENHANCED`
const PipelineClusterAutoscaleModeLegacy PipelineClusterAutoscaleMode = `LEGACY`
func (*PipelineClusterAutoscaleMode) Set ¶ added in v0.31.0
func (f *PipelineClusterAutoscaleMode) Set(v string) error
Set raw string value and validate it against allowed values
func (*PipelineClusterAutoscaleMode) String ¶ added in v0.31.0
func (f *PipelineClusterAutoscaleMode) String() string
String representation for fmt.Print
func (*PipelineClusterAutoscaleMode) Type ¶ added in v0.31.0
func (f *PipelineClusterAutoscaleMode) Type() string
Type always returns PipelineClusterAutoscaleMode to satisfy [pflag.Value] interface
type PipelineDeployment ¶ added in v0.39.0
type PipelineDeployment struct { // The deployment method that manages the pipeline. Kind DeploymentKind `json:"kind,omitempty"` // The path to the file containing metadata about the deployment. MetadataFilePath string `json:"metadata_file_path,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineDeployment) MarshalJSON ¶ added in v0.39.0
func (s PipelineDeployment) MarshalJSON() ([]byte, error)
func (*PipelineDeployment) UnmarshalJSON ¶ added in v0.39.0
func (s *PipelineDeployment) UnmarshalJSON(b []byte) error
type PipelineEvent ¶ added in v0.5.0
type PipelineEvent struct { // Information about an error captured by the event. Error *ErrorDetail `json:"error,omitempty"` // The event type. Should always correspond to the details EventType string `json:"event_type,omitempty"` // A time-based, globally unique id. Id string `json:"id,omitempty"` // The severity level of the event. Level EventLevel `json:"level,omitempty"` // Maturity level for event_type. MaturityLevel MaturityLevel `json:"maturity_level,omitempty"` // The display message associated with the event. Message string `json:"message,omitempty"` // Describes where the event originates from. Origin *Origin `json:"origin,omitempty"` // A sequencing object to identify and order events. Sequence *Sequencing `json:"sequence,omitempty"` // The time of the event. Timestamp string `json:"timestamp,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineEvent) MarshalJSON ¶ added in v0.23.0
func (s PipelineEvent) MarshalJSON() ([]byte, error)
func (*PipelineEvent) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineEvent) UnmarshalJSON(b []byte) error
type PipelineLibrary ¶
type PipelineLibrary struct { // The path to a file that defines a pipeline and is stored in the // Databricks Repos. File *FileLibrary `json:"file,omitempty"` // URI of the jar to be installed. Currently only DBFS is supported. Jar string `json:"jar,omitempty"` // Specification of a maven library to be installed. Maven *compute.MavenLibrary `json:"maven,omitempty"` // The path to a notebook that defines a pipeline and is stored in the // Databricks workspace. Notebook *NotebookLibrary `json:"notebook,omitempty"` // URI of the whl to be installed. Whl string `json:"whl,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineLibrary) MarshalJSON ¶ added in v0.23.0
func (s PipelineLibrary) MarshalJSON() ([]byte, error)
func (*PipelineLibrary) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineLibrary) UnmarshalJSON(b []byte) error
type PipelinePermission ¶ added in v0.15.0
type PipelinePermission struct { Inherited bool `json:"inherited,omitempty"` InheritedFromObject []string `json:"inherited_from_object,omitempty"` // Permission level PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelinePermission) MarshalJSON ¶ added in v0.23.0
func (s PipelinePermission) MarshalJSON() ([]byte, error)
func (*PipelinePermission) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelinePermission) UnmarshalJSON(b []byte) error
type PipelinePermissionLevel ¶ added in v0.15.0
type PipelinePermissionLevel string
Permission level
const PipelinePermissionLevelCanManage PipelinePermissionLevel = `CAN_MANAGE`
const PipelinePermissionLevelCanRun PipelinePermissionLevel = `CAN_RUN`
const PipelinePermissionLevelCanView PipelinePermissionLevel = `CAN_VIEW`
const PipelinePermissionLevelIsOwner PipelinePermissionLevel = `IS_OWNER`
func (*PipelinePermissionLevel) Set ¶ added in v0.15.0
func (f *PipelinePermissionLevel) Set(v string) error
Set raw string value and validate it against allowed values
func (*PipelinePermissionLevel) String ¶ added in v0.15.0
func (f *PipelinePermissionLevel) String() string
String representation for fmt.Print
func (*PipelinePermissionLevel) Type ¶ added in v0.15.0
func (f *PipelinePermissionLevel) Type() string
Type always returns PipelinePermissionLevel to satisfy [pflag.Value] interface
type PipelinePermissions ¶ added in v0.15.0
type PipelinePermissions struct { AccessControlList []PipelineAccessControlResponse `json:"access_control_list,omitempty"` ObjectId string `json:"object_id,omitempty"` ObjectType string `json:"object_type,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelinePermissions) MarshalJSON ¶ added in v0.23.0
func (s PipelinePermissions) MarshalJSON() ([]byte, error)
func (*PipelinePermissions) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelinePermissions) UnmarshalJSON(b []byte) error
type PipelinePermissionsDescription ¶ added in v0.15.0
type PipelinePermissionsDescription struct { Description string `json:"description,omitempty"` // Permission level PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelinePermissionsDescription) MarshalJSON ¶ added in v0.23.0
func (s PipelinePermissionsDescription) MarshalJSON() ([]byte, error)
func (*PipelinePermissionsDescription) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelinePermissionsDescription) UnmarshalJSON(b []byte) error
type PipelinePermissionsRequest ¶ added in v0.15.0
type PipelinePermissionsRequest struct { AccessControlList []PipelineAccessControlRequest `json:"access_control_list,omitempty"` // The pipeline for which to get or manage permissions. PipelineId string `json:"-" url:"-"` }
type PipelineSpec ¶
type PipelineSpec struct { // Budget policy of this pipeline. BudgetPolicyId string `json:"budget_policy_id,omitempty"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, // `catalog`.`target`.`table`). If `target` is not specified, no data is // published to Unity Catalog. Catalog string `json:"catalog,omitempty"` // DLT Release Channel that specifies which version to use. Channel string `json:"channel,omitempty"` // Cluster settings for this pipeline deployment. Clusters []PipelineCluster `json:"clusters,omitempty"` // String-String configuration for this pipeline execution. Configuration map[string]string `json:"configuration,omitempty"` // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous bool `json:"continuous,omitempty"` // Deployment type of this pipeline. Deployment *PipelineDeployment `json:"deployment,omitempty"` // Whether the pipeline is in Development mode. Defaults to false. Development bool `json:"development,omitempty"` // Pipeline product edition. Edition string `json:"edition,omitempty"` // Filters on which Pipeline packages to include in the deployed graph. Filters *Filters `json:"filters,omitempty"` // The definition of a gateway pipeline to support CDC. GatewayDefinition *IngestionGatewayPipelineDefinition `json:"gateway_definition,omitempty"` // Unique identifier for this pipeline. Id string `json:"id,omitempty"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` // Friendly identifier for this pipeline. Name string `json:"name,omitempty"` // List of notification settings for this pipeline. Notifications []Notifications `json:"notifications,omitempty"` // Whether Photon is enabled for this pipeline. Photon bool `json:"photon,omitempty"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. Schema string `json:"schema,omitempty"` // Whether serverless compute is enabled for this pipeline. Serverless bool `json:"serverless,omitempty"` // DBFS root directory for storing checkpoints and tables. Storage string `json:"storage,omitempty"` // Target schema (database) to add tables in this pipeline to. If not // specified, no data is published to the Hive metastore or Unity Catalog. // To publish to Unity Catalog, also specify `catalog`. Target string `json:"target,omitempty"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineSpec) MarshalJSON ¶ added in v0.23.0
func (s PipelineSpec) MarshalJSON() ([]byte, error)
func (*PipelineSpec) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineSpec) UnmarshalJSON(b []byte) error
type PipelineState ¶
type PipelineState string
The pipeline state.
const PipelineStateDeleted PipelineState = `DELETED`
const PipelineStateDeploying PipelineState = `DEPLOYING`
const PipelineStateFailed PipelineState = `FAILED`
const PipelineStateIdle PipelineState = `IDLE`
const PipelineStateRecovering PipelineState = `RECOVERING`
const PipelineStateResetting PipelineState = `RESETTING`
const PipelineStateRunning PipelineState = `RUNNING`
const PipelineStateStarting PipelineState = `STARTING`
const PipelineStateStopping PipelineState = `STOPPING`
func (*PipelineState) Set ¶ added in v0.2.0
func (f *PipelineState) Set(v string) error
Set raw string value and validate it against allowed values
func (*PipelineState) String ¶ added in v0.2.0
func (f *PipelineState) String() string
String representation for fmt.Print
func (*PipelineState) Type ¶ added in v0.2.0
func (f *PipelineState) Type() string
Type always returns PipelineState to satisfy [pflag.Value] interface
type PipelineStateInfo ¶
type PipelineStateInfo struct { // The unique identifier of the cluster running the pipeline. ClusterId string `json:"cluster_id,omitempty"` // The username of the pipeline creator. CreatorUserName string `json:"creator_user_name,omitempty"` // The health of a pipeline. Health PipelineStateInfoHealth `json:"health,omitempty"` // Status of the latest updates for the pipeline. Ordered with the newest // update first. LatestUpdates []UpdateStateInfo `json:"latest_updates,omitempty"` // The user-friendly name of the pipeline. Name string `json:"name,omitempty"` // The unique identifier of the pipeline. PipelineId string `json:"pipeline_id,omitempty"` // The username that the pipeline runs as. This is a read only value derived // from the pipeline owner. RunAsUserName string `json:"run_as_user_name,omitempty"` // The pipeline state. State PipelineState `json:"state,omitempty"` ForceSendFields []string `json:"-"` }
func (PipelineStateInfo) MarshalJSON ¶ added in v0.23.0
func (s PipelineStateInfo) MarshalJSON() ([]byte, error)
func (*PipelineStateInfo) UnmarshalJSON ¶ added in v0.23.0
func (s *PipelineStateInfo) UnmarshalJSON(b []byte) error
type PipelineStateInfoHealth ¶ added in v0.44.0
type PipelineStateInfoHealth string
The health of a pipeline.
const PipelineStateInfoHealthHealthy PipelineStateInfoHealth = `HEALTHY`
const PipelineStateInfoHealthUnhealthy PipelineStateInfoHealth = `UNHEALTHY`
func (*PipelineStateInfoHealth) Set ¶ added in v0.44.0
func (f *PipelineStateInfoHealth) Set(v string) error
Set raw string value and validate it against allowed values
func (*PipelineStateInfoHealth) String ¶ added in v0.44.0
func (f *PipelineStateInfoHealth) String() string
String representation for fmt.Print
func (*PipelineStateInfoHealth) Type ¶ added in v0.44.0
func (f *PipelineStateInfoHealth) Type() string
Type always returns PipelineStateInfoHealth to satisfy [pflag.Value] interface
type PipelineTrigger ¶
type PipelineTrigger struct { Cron *CronTrigger `json:"cron,omitempty"` Manual *ManualTrigger `json:"manual,omitempty"` }
type PipelinesAPI ¶
type PipelinesAPI struct {
// contains filtered or unexported fields
}
The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines.
Delta Live Tables is a framework for building reliable, maintainable, and testable data processing pipelines. You define the transformations to perform on your data, and Delta Live Tables manages task orchestration, cluster management, monitoring, data quality, and error handling.
Instead of defining your data pipelines using a series of separate Apache Spark tasks, Delta Live Tables manages how your data is transformed based on a target schema you define for each processing step. You can also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected data quality and specify how to handle records that fail those expectations.
func NewPipelines ¶
func NewPipelines(client *client.DatabricksClient) *PipelinesAPI
func (*PipelinesAPI) Create ¶ added in v0.2.0
func (a *PipelinesAPI) Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error)
Example (Pipelines) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() created, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Continuous: false, Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Libraries: []pipelines.PipelineLibrary{pipelines.PipelineLibrary{ Notebook: &pipelines.NotebookLibrary{ Path: notebookPath, }, }}, Clusters: []pipelines.PipelineCluster{pipelines.PipelineCluster{ InstancePoolId: os.Getenv("TEST_INSTANCE_POOL_ID"), Label: "default", NumWorkers: 1, CustomTags: map[string]string{"cluster_type": "default"}, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) // cleanup err = w.Pipelines.DeleteByPipelineId(ctx, created.PipelineId) if err != nil { panic(err) }
Output:
func (*PipelinesAPI) Delete ¶ added in v0.2.0
func (a *PipelinesAPI) Delete(ctx context.Context, request DeletePipelineRequest) error
func (*PipelinesAPI) DeleteByPipelineId ¶ added in v0.2.0
func (a *PipelinesAPI) DeleteByPipelineId(ctx context.Context, pipelineId string) error
Delete a pipeline.
Deletes a pipeline.
func (*PipelinesAPI) Get ¶ added in v0.2.0
func (a *PipelinesAPI) Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error)
Example (Pipelines) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() created, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Continuous: false, Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Libraries: []pipelines.PipelineLibrary{pipelines.PipelineLibrary{ Notebook: &pipelines.NotebookLibrary{ Path: notebookPath, }, }}, Clusters: []pipelines.PipelineCluster{pipelines.PipelineCluster{ InstancePoolId: os.Getenv("TEST_INSTANCE_POOL_ID"), Label: "default", NumWorkers: 1, CustomTags: map[string]string{"cluster_type": "default"}, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) byId, err := w.Pipelines.GetByPipelineId(ctx, created.PipelineId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) // cleanup err = w.Pipelines.DeleteByPipelineId(ctx, created.PipelineId) if err != nil { panic(err) }
Output:
func (*PipelinesAPI) GetByName ¶
func (a *PipelinesAPI) GetByName(ctx context.Context, name string) (*PipelineStateInfo, error)
GetByName calls PipelinesAPI.PipelineStateInfoNameToPipelineIdMap and returns a single PipelineStateInfo.
Returns an error if there's more than one PipelineStateInfo with the same .Name.
Note: All PipelineStateInfo instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*PipelinesAPI) GetByPipelineId ¶ added in v0.2.0
func (a *PipelinesAPI) GetByPipelineId(ctx context.Context, pipelineId string) (*GetPipelineResponse, error)
Get a pipeline.
func (*PipelinesAPI) GetPermissionLevels ¶ added in v0.19.0
func (a *PipelinesAPI) GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error)
func (*PipelinesAPI) GetPermissionLevelsByPipelineId ¶ added in v0.19.0
func (a *PipelinesAPI) GetPermissionLevelsByPipelineId(ctx context.Context, pipelineId string) (*GetPipelinePermissionLevelsResponse, error)
Get pipeline permission levels.
Gets the permission levels that a user can have on an object.
func (*PipelinesAPI) GetPermissions ¶ added in v0.19.0
func (a *PipelinesAPI) GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error)
func (*PipelinesAPI) GetPermissionsByPipelineId ¶ added in v0.19.0
func (a *PipelinesAPI) GetPermissionsByPipelineId(ctx context.Context, pipelineId string) (*PipelinePermissions, error)
Get pipeline permissions.
Gets the permissions of a pipeline. Pipelines can inherit permissions from their root object.
func (*PipelinesAPI) GetUpdate ¶
func (a *PipelinesAPI) GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error)
func (*PipelinesAPI) GetUpdateByPipelineIdAndUpdateId ¶
func (a *PipelinesAPI) GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pipelineId string, updateId string) (*GetUpdateResponse, error)
Get a pipeline update.
Gets an update from an active pipeline.
func (*PipelinesAPI) ListPipelineEvents ¶ added in v0.24.0
func (a *PipelinesAPI) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent]
List pipeline events.
Retrieves events for a pipeline.
This method is generated by Databricks SDK Code Generator.
Example (Pipelines) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() created, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Continuous: false, Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Libraries: []pipelines.PipelineLibrary{pipelines.PipelineLibrary{ Notebook: &pipelines.NotebookLibrary{ Path: notebookPath, }, }}, Clusters: []pipelines.PipelineCluster{pipelines.PipelineCluster{ InstancePoolId: os.Getenv("TEST_INSTANCE_POOL_ID"), Label: "default", NumWorkers: 1, CustomTags: map[string]string{"cluster_type": "default"}, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) events, err := w.Pipelines.ListPipelineEventsAll(ctx, pipelines.ListPipelineEventsRequest{ PipelineId: created.PipelineId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", events) // cleanup err = w.Pipelines.DeleteByPipelineId(ctx, created.PipelineId) if err != nil { panic(err) }
Output:
func (*PipelinesAPI) ListPipelineEventsAll ¶ added in v0.5.0
func (a *PipelinesAPI) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error)
List pipeline events.
Retrieves events for a pipeline.
This method is generated by Databricks SDK Code Generator.
func (*PipelinesAPI) ListPipelineEventsByPipelineId ¶ added in v0.5.0
func (a *PipelinesAPI) ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error)
List pipeline events.
Retrieves events for a pipeline.
func (*PipelinesAPI) ListPipelines ¶ added in v0.24.0
func (a *PipelinesAPI) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo]
List pipelines.
Lists pipelines defined in the Delta Live Tables system.
This method is generated by Databricks SDK Code Generator.
Example (Pipelines) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } all, err := w.Pipelines.ListPipelinesAll(ctx, pipelines.ListPipelinesRequest{}) if err != nil { panic(err) } logger.Infof(ctx, "found %v", all)
Output:
func (*PipelinesAPI) ListPipelinesAll ¶
func (a *PipelinesAPI) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error)
List pipelines.
Lists pipelines defined in the Delta Live Tables system.
This method is generated by Databricks SDK Code Generator.
func (*PipelinesAPI) ListUpdates ¶
func (a *PipelinesAPI) ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error)
func (*PipelinesAPI) ListUpdatesByPipelineId ¶
func (a *PipelinesAPI) ListUpdatesByPipelineId(ctx context.Context, pipelineId string) (*ListUpdatesResponse, error)
List pipeline updates.
List updates for an active pipeline.
func (*PipelinesAPI) PipelineStateInfoNameToPipelineIdMap ¶
func (a *PipelinesAPI) PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error)
PipelineStateInfoNameToPipelineIdMap calls PipelinesAPI.ListPipelinesAll and creates a map of results with PipelineStateInfo.Name as key and PipelineStateInfo.PipelineId as value.
Returns an error if there's more than one PipelineStateInfo with the same .Name.
Note: All PipelineStateInfo instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
func (*PipelinesAPI) SetPermissions ¶ added in v0.19.0
func (a *PipelinesAPI) SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error)
func (*PipelinesAPI) StartUpdate ¶
func (a *PipelinesAPI) StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error)
func (*PipelinesAPI) Stop ¶ added in v0.2.0
func (a *PipelinesAPI) Stop(ctx context.Context, stopRequest StopRequest) (*WaitGetPipelineIdle[struct{}], error)
Stop a pipeline.
Stops the pipeline by canceling the active update. If there is no active update for the pipeline, this request is a no-op.
func (*PipelinesAPI) StopAndWait
deprecated
added in
v0.2.0
func (a *PipelinesAPI) StopAndWait(ctx context.Context, stopRequest StopRequest, options ...retries.Option[GetPipelineResponse]) (*GetPipelineResponse, error)
Calls PipelinesAPI.Stop and waits to reach IDLE state
You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetPipelineResponse](60*time.Minute) functional option.
Deprecated: use PipelinesAPI.Stop.Get() or PipelinesAPI.WaitGetPipelineIdle
func (*PipelinesAPI) Update ¶ added in v0.2.0
func (a *PipelinesAPI) Update(ctx context.Context, request EditPipeline) error
Example (Pipelines) ¶
ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { panic(err) } notebookPath := func() string { me, err := w.CurrentUser.Me(ctx) if err != nil { panic(err) } return filepath.Join("/Users", me.UserName, fmt.Sprintf("sdk-%x", time.Now().UnixNano())) }() created, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Continuous: false, Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Libraries: []pipelines.PipelineLibrary{pipelines.PipelineLibrary{ Notebook: &pipelines.NotebookLibrary{ Path: notebookPath, }, }}, Clusters: []pipelines.PipelineCluster{pipelines.PipelineCluster{ InstancePoolId: os.Getenv("TEST_INSTANCE_POOL_ID"), Label: "default", NumWorkers: 1, CustomTags: map[string]string{"cluster_type": "default"}, }}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) err = w.Pipelines.Update(ctx, pipelines.EditPipeline{ PipelineId: created.PipelineId, Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Libraries: []pipelines.PipelineLibrary{pipelines.PipelineLibrary{ Notebook: &pipelines.NotebookLibrary{ Path: notebookPath, }, }}, Clusters: []pipelines.PipelineCluster{pipelines.PipelineCluster{ InstancePoolId: os.Getenv("TEST_INSTANCE_POOL_ID"), Label: "default", NumWorkers: 1, CustomTags: map[string]string{"cluster_type": "default"}, }}, }) if err != nil { panic(err) } // cleanup err = w.Pipelines.DeleteByPipelineId(ctx, created.PipelineId) if err != nil { panic(err) }
Output:
func (*PipelinesAPI) UpdatePermissions ¶ added in v0.19.0
func (a *PipelinesAPI) UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error)
func (*PipelinesAPI) WaitGetPipelineIdle ¶ added in v0.10.0
func (a *PipelinesAPI) WaitGetPipelineIdle(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error)
WaitGetPipelineIdle repeatedly calls PipelinesAPI.Get and waits to reach IDLE state
func (*PipelinesAPI) WaitGetPipelineRunning ¶ added in v0.10.0
func (a *PipelinesAPI) WaitGetPipelineRunning(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error)
WaitGetPipelineRunning repeatedly calls PipelinesAPI.Get and waits to reach RUNNING state
type PipelinesInterface ¶ added in v0.29.0
type PipelinesInterface interface { // WaitGetPipelineRunning repeatedly calls [PipelinesAPI.Get] and waits to reach RUNNING state WaitGetPipelineRunning(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) // WaitGetPipelineIdle repeatedly calls [PipelinesAPI.Get] and waits to reach IDLE state WaitGetPipelineIdle(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) // Create a pipeline. // // Creates a new data processing pipeline based on the requested configuration. // If successful, this method returns the ID of the new pipeline. Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) // Delete a pipeline. // // Deletes a pipeline. Delete(ctx context.Context, request DeletePipelineRequest) error // Delete a pipeline. // // Deletes a pipeline. DeleteByPipelineId(ctx context.Context, pipelineId string) error // Get a pipeline. Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error) // Get a pipeline. GetByPipelineId(ctx context.Context, pipelineId string) (*GetPipelineResponse, error) // Get pipeline permission levels. // // Gets the permission levels that a user can have on an object. GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error) // Get pipeline permission levels. // // Gets the permission levels that a user can have on an object. GetPermissionLevelsByPipelineId(ctx context.Context, pipelineId string) (*GetPipelinePermissionLevelsResponse, error) // Get pipeline permissions. // // Gets the permissions of a pipeline. Pipelines can inherit permissions from // their root object. GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error) // Get pipeline permissions. // // Gets the permissions of a pipeline. Pipelines can inherit permissions from // their root object. GetPermissionsByPipelineId(ctx context.Context, pipelineId string) (*PipelinePermissions, error) // Get a pipeline update. // // Gets an update from an active pipeline. GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error) // Get a pipeline update. // // Gets an update from an active pipeline. GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pipelineId string, updateId string) (*GetUpdateResponse, error) // List pipeline events. // // Retrieves events for a pipeline. // // This method is generated by Databricks SDK Code Generator. ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent] // List pipeline events. // // Retrieves events for a pipeline. // // This method is generated by Databricks SDK Code Generator. ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) // List pipeline events. // // Retrieves events for a pipeline. ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) // List pipelines. // // Lists pipelines defined in the Delta Live Tables system. // // This method is generated by Databricks SDK Code Generator. ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] // List pipelines. // // Lists pipelines defined in the Delta Live Tables system. // // This method is generated by Databricks SDK Code Generator. ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) // PipelineStateInfoNameToPipelineIdMap calls [PipelinesAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. // // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. // // Note: All [PipelineStateInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error) // GetByName calls [PipelinesAPI.PipelineStateInfoNameToPipelineIdMap] and returns a single [PipelineStateInfo]. // // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. // // Note: All [PipelineStateInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. GetByName(ctx context.Context, name string) (*PipelineStateInfo, error) // List pipeline updates. // // List updates for an active pipeline. ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error) // List pipeline updates. // // List updates for an active pipeline. ListUpdatesByPipelineId(ctx context.Context, pipelineId string) (*ListUpdatesResponse, error) // Set pipeline permissions. // // Sets permissions on a pipeline. Pipelines can inherit permissions from their // root object. SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) // Start a pipeline. // // Starts a new update for the pipeline. If there is already an active update // for the pipeline, the request will fail and the active update will remain // running. StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error) // Stop a pipeline. // // Stops the pipeline by canceling the active update. If there is no active // update for the pipeline, this request is a no-op. Stop(ctx context.Context, stopRequest StopRequest) (*WaitGetPipelineIdle[struct{}], error) // Calls [PipelinesAPIInterface.Stop] and waits to reach IDLE state // // You can override the default timeout of 20 minutes by calling adding // retries.Timeout[GetPipelineResponse](60*time.Minute) functional option. // // Deprecated: use [PipelinesAPIInterface.Stop].Get() or [PipelinesAPIInterface.WaitGetPipelineIdle] StopAndWait(ctx context.Context, stopRequest StopRequest, options ...retries.Option[GetPipelineResponse]) (*GetPipelineResponse, error) // Edit a pipeline. // // Updates a pipeline with the supplied configuration. Update(ctx context.Context, request EditPipeline) error // Update pipeline permissions. // // Updates the permissions on a pipeline. Pipelines can inherit permissions from // their root object. UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) }
type PipelinesService ¶
type PipelinesService interface { // Create a pipeline. // // Creates a new data processing pipeline based on the requested // configuration. If successful, this method returns the ID of the new // pipeline. Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) // Delete a pipeline. // // Deletes a pipeline. Delete(ctx context.Context, request DeletePipelineRequest) error // Get a pipeline. Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error) // Get pipeline permission levels. // // Gets the permission levels that a user can have on an object. GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error) // Get pipeline permissions. // // Gets the permissions of a pipeline. Pipelines can inherit permissions // from their root object. GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error) // Get a pipeline update. // // Gets an update from an active pipeline. GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error) // List pipeline events. // // Retrieves events for a pipeline. // // Use ListPipelineEventsAll() to get all PipelineEvent instances, which will iterate over every result page. ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) // List pipelines. // // Lists pipelines defined in the Delta Live Tables system. // // Use ListPipelinesAll() to get all PipelineStateInfo instances, which will iterate over every result page. ListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) // List pipeline updates. // // List updates for an active pipeline. ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error) // Set pipeline permissions. // // Sets permissions on a pipeline. Pipelines can inherit permissions from // their root object. SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) // Start a pipeline. // // Starts a new update for the pipeline. If there is already an active // update for the pipeline, the request will fail and the active update will // remain running. StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error) // Stop a pipeline. // // Stops the pipeline by canceling the active update. If there is no active // update for the pipeline, this request is a no-op. Stop(ctx context.Context, request StopRequest) error // Edit a pipeline. // // Updates a pipeline with the supplied configuration. Update(ctx context.Context, request EditPipeline) error // Update pipeline permissions. // // Updates the permissions on a pipeline. Pipelines can inherit permissions // from their root object. UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) }
The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines.
Delta Live Tables is a framework for building reliable, maintainable, and testable data processing pipelines. You define the transformations to perform on your data, and Delta Live Tables manages task orchestration, cluster management, monitoring, data quality, and error handling.
Instead of defining your data pipelines using a series of separate Apache Spark tasks, Delta Live Tables manages how your data is transformed based on a target schema you define for each processing step. You can also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected data quality and specify how to handle records that fail those expectations.
type ReportSpec ¶ added in v0.49.0
type ReportSpec struct { // Required. Destination catalog to store table. DestinationCatalog string `json:"destination_catalog,omitempty"` // Required. Destination schema to store table. DestinationSchema string `json:"destination_schema,omitempty"` // Required. Destination table name. The pipeline fails if a table with that // name already exists. DestinationTable string `json:"destination_table,omitempty"` // Required. Report URL in the source system. SourceUrl string `json:"source_url,omitempty"` // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` ForceSendFields []string `json:"-"` }
func (ReportSpec) MarshalJSON ¶ added in v0.49.0
func (s ReportSpec) MarshalJSON() ([]byte, error)
func (*ReportSpec) UnmarshalJSON ¶ added in v0.49.0
func (s *ReportSpec) UnmarshalJSON(b []byte) error
type SchemaSpec ¶ added in v0.39.0
type SchemaSpec struct { // Required. Destination catalog to store tables. DestinationCatalog string `json:"destination_catalog,omitempty"` // Required. Destination schema to store tables in. Tables with the same // name as the source tables are created in this destination schema. The // pipeline fails If a table with the same name already exists. DestinationSchema string `json:"destination_schema,omitempty"` // The source catalog name. Might be optional depending on the type of // source. SourceCatalog string `json:"source_catalog,omitempty"` // Required. Schema name in the source database. SourceSchema string `json:"source_schema,omitempty"` // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in this schema and override the // table_configuration defined in the IngestionPipelineDefinition object. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` ForceSendFields []string `json:"-"` }
func (SchemaSpec) MarshalJSON ¶ added in v0.39.0
func (s SchemaSpec) MarshalJSON() ([]byte, error)
func (*SchemaSpec) UnmarshalJSON ¶ added in v0.39.0
func (s *SchemaSpec) UnmarshalJSON(b []byte) error
type Sequencing ¶ added in v0.5.0
type Sequencing struct { // A sequence number, unique and increasing within the control plane. ControlPlaneSeqNo int `json:"control_plane_seq_no,omitempty"` // the ID assigned by the data plane. DataPlaneId *DataPlaneId `json:"data_plane_id,omitempty"` ForceSendFields []string `json:"-"` }
func (Sequencing) MarshalJSON ¶ added in v0.23.0
func (s Sequencing) MarshalJSON() ([]byte, error)
func (*Sequencing) UnmarshalJSON ¶ added in v0.23.0
func (s *Sequencing) UnmarshalJSON(b []byte) error
type SerializedException ¶ added in v0.5.0
type SerializedException struct { // Runtime class of the exception ClassName string `json:"class_name,omitempty"` // Exception message Message string `json:"message,omitempty"` // Stack trace consisting of a list of stack frames Stack []StackFrame `json:"stack,omitempty"` ForceSendFields []string `json:"-"` }
func (SerializedException) MarshalJSON ¶ added in v0.23.0
func (s SerializedException) MarshalJSON() ([]byte, error)
func (*SerializedException) UnmarshalJSON ¶ added in v0.23.0
func (s *SerializedException) UnmarshalJSON(b []byte) error
type StackFrame ¶ added in v0.5.0
type StackFrame struct { // Class from which the method call originated DeclaringClass string `json:"declaring_class,omitempty"` // File where the method is defined FileName string `json:"file_name,omitempty"` // Line from which the method was called LineNumber int `json:"line_number,omitempty"` // Name of the method which was called MethodName string `json:"method_name,omitempty"` ForceSendFields []string `json:"-"` }
func (StackFrame) MarshalJSON ¶ added in v0.23.0
func (s StackFrame) MarshalJSON() ([]byte, error)
func (*StackFrame) UnmarshalJSON ¶ added in v0.23.0
func (s *StackFrame) UnmarshalJSON(b []byte) error
type StartUpdate ¶
type StartUpdate struct { Cause StartUpdateCause `json:"cause,omitempty"` // If true, this update will reset all tables before running. FullRefresh bool `json:"full_refresh,omitempty"` // A list of tables to update with fullRefresh. If both refresh_selection // and full_refresh_selection are empty, this is a full graph update. Full // Refresh on a table means that the states of the table will be reset // before the refresh. FullRefreshSelection []string `json:"full_refresh_selection,omitempty"` PipelineId string `json:"-" url:"-"` // A list of tables to update without fullRefresh. If both refresh_selection // and full_refresh_selection are empty, this is a full graph update. Full // Refresh on a table means that the states of the table will be reset // before the refresh. RefreshSelection []string `json:"refresh_selection,omitempty"` // If true, this update only validates the correctness of pipeline source // code but does not materialize or publish any datasets. ValidateOnly bool `json:"validate_only,omitempty"` ForceSendFields []string `json:"-"` }
func (StartUpdate) MarshalJSON ¶ added in v0.23.0
func (s StartUpdate) MarshalJSON() ([]byte, error)
func (*StartUpdate) UnmarshalJSON ¶ added in v0.23.0
func (s *StartUpdate) UnmarshalJSON(b []byte) error
type StartUpdateCause ¶
type StartUpdateCause string
const StartUpdateCauseApiCall StartUpdateCause = `API_CALL`
const StartUpdateCauseJobTask StartUpdateCause = `JOB_TASK`
const StartUpdateCauseRetryOnFailure StartUpdateCause = `RETRY_ON_FAILURE`
const StartUpdateCauseSchemaChange StartUpdateCause = `SCHEMA_CHANGE`
const StartUpdateCauseServiceUpgrade StartUpdateCause = `SERVICE_UPGRADE`
const StartUpdateCauseUserAction StartUpdateCause = `USER_ACTION`
func (*StartUpdateCause) Set ¶ added in v0.2.0
func (f *StartUpdateCause) Set(v string) error
Set raw string value and validate it against allowed values
func (*StartUpdateCause) String ¶ added in v0.2.0
func (f *StartUpdateCause) String() string
String representation for fmt.Print
func (*StartUpdateCause) Type ¶ added in v0.2.0
func (f *StartUpdateCause) Type() string
Type always returns StartUpdateCause to satisfy [pflag.Value] interface
type StartUpdateResponse ¶
type StartUpdateResponse struct { UpdateId string `json:"update_id,omitempty"` ForceSendFields []string `json:"-"` }
func (StartUpdateResponse) MarshalJSON ¶ added in v0.23.0
func (s StartUpdateResponse) MarshalJSON() ([]byte, error)
func (*StartUpdateResponse) UnmarshalJSON ¶ added in v0.23.0
func (s *StartUpdateResponse) UnmarshalJSON(b []byte) error
type StopPipelineResponse ¶ added in v0.34.0
type StopPipelineResponse struct { }
type StopRequest ¶ added in v0.8.0
type StopRequest struct {
PipelineId string `json:"-" url:"-"`
}
Stop a pipeline
type TableSpec ¶ added in v0.39.0
type TableSpec struct { // Required. Destination catalog to store table. DestinationCatalog string `json:"destination_catalog,omitempty"` // Required. Destination schema to store table. DestinationSchema string `json:"destination_schema,omitempty"` // Optional. Destination table name. The pipeline fails if a table with that // name already exists. If not set, the source table name is used. DestinationTable string `json:"destination_table,omitempty"` // Source catalog name. Might be optional depending on the type of source. SourceCatalog string `json:"source_catalog,omitempty"` // Schema name in the source database. Might be optional depending on the // type of source. SourceSchema string `json:"source_schema,omitempty"` // Required. Table name in the source database. SourceTable string `json:"source_table,omitempty"` // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object and the SchemaSpec. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` ForceSendFields []string `json:"-"` }
func (TableSpec) MarshalJSON ¶ added in v0.39.0
func (*TableSpec) UnmarshalJSON ¶ added in v0.39.0
type TableSpecificConfig ¶ added in v0.41.0
type TableSpecificConfig struct { // The primary key of the table used to apply changes. PrimaryKeys []string `json:"primary_keys,omitempty"` // If true, formula fields defined in the table are included in the // ingestion. This setting is only valid for the Salesforce connector SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` // The SCD type to use to ingest the table. ScdType TableSpecificConfigScdType `json:"scd_type,omitempty"` // The column names specifying the logical order of events in the source // data. Delta Live Tables uses this sequencing to handle change events that // arrive out of order. SequenceBy []string `json:"sequence_by,omitempty"` ForceSendFields []string `json:"-"` }
func (TableSpecificConfig) MarshalJSON ¶ added in v0.41.0
func (s TableSpecificConfig) MarshalJSON() ([]byte, error)
func (*TableSpecificConfig) UnmarshalJSON ¶ added in v0.41.0
func (s *TableSpecificConfig) UnmarshalJSON(b []byte) error
type TableSpecificConfigScdType ¶ added in v0.41.0
type TableSpecificConfigScdType string
The SCD type to use to ingest the table.
const TableSpecificConfigScdTypeScdType1 TableSpecificConfigScdType = `SCD_TYPE_1`
const TableSpecificConfigScdTypeScdType2 TableSpecificConfigScdType = `SCD_TYPE_2`
func (*TableSpecificConfigScdType) Set ¶ added in v0.41.0
func (f *TableSpecificConfigScdType) Set(v string) error
Set raw string value and validate it against allowed values
func (*TableSpecificConfigScdType) String ¶ added in v0.41.0
func (f *TableSpecificConfigScdType) String() string
String representation for fmt.Print
func (*TableSpecificConfigScdType) Type ¶ added in v0.41.0
func (f *TableSpecificConfigScdType) Type() string
Type always returns TableSpecificConfigScdType to satisfy [pflag.Value] interface
type UpdateInfo ¶
type UpdateInfo struct { // What triggered this update. Cause UpdateInfoCause `json:"cause,omitempty"` // The ID of the cluster that the update is running on. ClusterId string `json:"cluster_id,omitempty"` // The pipeline configuration with system defaults applied where unspecified // by the user. Not returned by ListUpdates. Config *PipelineSpec `json:"config,omitempty"` // The time when this update was created. CreationTime int64 `json:"creation_time,omitempty"` // If true, this update will reset all tables before running. FullRefresh bool `json:"full_refresh,omitempty"` // A list of tables to update with fullRefresh. If both refresh_selection // and full_refresh_selection are empty, this is a full graph update. Full // Refresh on a table means that the states of the table will be reset // before the refresh. FullRefreshSelection []string `json:"full_refresh_selection,omitempty"` // The ID of the pipeline. PipelineId string `json:"pipeline_id,omitempty"` // A list of tables to update without fullRefresh. If both refresh_selection // and full_refresh_selection are empty, this is a full graph update. Full // Refresh on a table means that the states of the table will be reset // before the refresh. RefreshSelection []string `json:"refresh_selection,omitempty"` // The update state. State UpdateInfoState `json:"state,omitempty"` // The ID of this update. UpdateId string `json:"update_id,omitempty"` // If true, this update only validates the correctness of pipeline source // code but does not materialize or publish any datasets. ValidateOnly bool `json:"validate_only,omitempty"` ForceSendFields []string `json:"-"` }
func (UpdateInfo) MarshalJSON ¶ added in v0.23.0
func (s UpdateInfo) MarshalJSON() ([]byte, error)
func (*UpdateInfo) UnmarshalJSON ¶ added in v0.23.0
func (s *UpdateInfo) UnmarshalJSON(b []byte) error
type UpdateInfoCause ¶
type UpdateInfoCause string
What triggered this update.
const UpdateInfoCauseApiCall UpdateInfoCause = `API_CALL`
const UpdateInfoCauseJobTask UpdateInfoCause = `JOB_TASK`
const UpdateInfoCauseRetryOnFailure UpdateInfoCause = `RETRY_ON_FAILURE`
const UpdateInfoCauseSchemaChange UpdateInfoCause = `SCHEMA_CHANGE`
const UpdateInfoCauseServiceUpgrade UpdateInfoCause = `SERVICE_UPGRADE`
const UpdateInfoCauseUserAction UpdateInfoCause = `USER_ACTION`
func (*UpdateInfoCause) Set ¶ added in v0.2.0
func (f *UpdateInfoCause) Set(v string) error
Set raw string value and validate it against allowed values
func (*UpdateInfoCause) String ¶ added in v0.2.0
func (f *UpdateInfoCause) String() string
String representation for fmt.Print
func (*UpdateInfoCause) Type ¶ added in v0.2.0
func (f *UpdateInfoCause) Type() string
Type always returns UpdateInfoCause to satisfy [pflag.Value] interface
type UpdateInfoState ¶
type UpdateInfoState string
The update state.
const UpdateInfoStateCanceled UpdateInfoState = `CANCELED`
const UpdateInfoStateCompleted UpdateInfoState = `COMPLETED`
const UpdateInfoStateCreated UpdateInfoState = `CREATED`
const UpdateInfoStateFailed UpdateInfoState = `FAILED`
const UpdateInfoStateInitializing UpdateInfoState = `INITIALIZING`
const UpdateInfoStateQueued UpdateInfoState = `QUEUED`
const UpdateInfoStateResetting UpdateInfoState = `RESETTING`
const UpdateInfoStateRunning UpdateInfoState = `RUNNING`
const UpdateInfoStateSettingUpTables UpdateInfoState = `SETTING_UP_TABLES`
const UpdateInfoStateStopping UpdateInfoState = `STOPPING`
const UpdateInfoStateWaitingForResources UpdateInfoState = `WAITING_FOR_RESOURCES`
func (*UpdateInfoState) Set ¶ added in v0.2.0
func (f *UpdateInfoState) Set(v string) error
Set raw string value and validate it against allowed values
func (*UpdateInfoState) String ¶ added in v0.2.0
func (f *UpdateInfoState) String() string
String representation for fmt.Print
func (*UpdateInfoState) Type ¶ added in v0.2.0
func (f *UpdateInfoState) Type() string
Type always returns UpdateInfoState to satisfy [pflag.Value] interface
type UpdateStateInfo ¶
type UpdateStateInfo struct { CreationTime string `json:"creation_time,omitempty"` State UpdateStateInfoState `json:"state,omitempty"` UpdateId string `json:"update_id,omitempty"` ForceSendFields []string `json:"-"` }
func (UpdateStateInfo) MarshalJSON ¶ added in v0.23.0
func (s UpdateStateInfo) MarshalJSON() ([]byte, error)
func (*UpdateStateInfo) UnmarshalJSON ¶ added in v0.23.0
func (s *UpdateStateInfo) UnmarshalJSON(b []byte) error
type UpdateStateInfoState ¶
type UpdateStateInfoState string
const UpdateStateInfoStateCanceled UpdateStateInfoState = `CANCELED`
const UpdateStateInfoStateCompleted UpdateStateInfoState = `COMPLETED`
const UpdateStateInfoStateCreated UpdateStateInfoState = `CREATED`
const UpdateStateInfoStateFailed UpdateStateInfoState = `FAILED`
const UpdateStateInfoStateInitializing UpdateStateInfoState = `INITIALIZING`
const UpdateStateInfoStateQueued UpdateStateInfoState = `QUEUED`
const UpdateStateInfoStateResetting UpdateStateInfoState = `RESETTING`
const UpdateStateInfoStateRunning UpdateStateInfoState = `RUNNING`
const UpdateStateInfoStateSettingUpTables UpdateStateInfoState = `SETTING_UP_TABLES`
const UpdateStateInfoStateStopping UpdateStateInfoState = `STOPPING`
const UpdateStateInfoStateWaitingForResources UpdateStateInfoState = `WAITING_FOR_RESOURCES`
func (*UpdateStateInfoState) Set ¶ added in v0.2.0
func (f *UpdateStateInfoState) Set(v string) error
Set raw string value and validate it against allowed values
func (*UpdateStateInfoState) String ¶ added in v0.2.0
func (f *UpdateStateInfoState) String() string
String representation for fmt.Print
func (*UpdateStateInfoState) Type ¶ added in v0.2.0
func (f *UpdateStateInfoState) Type() string
Type always returns UpdateStateInfoState to satisfy [pflag.Value] interface
type WaitGetPipelineIdle ¶ added in v0.10.0
type WaitGetPipelineIdle[R any] struct { Response *R PipelineId string `json:"pipeline_id"` Poll func(time.Duration, func(*GetPipelineResponse)) (*GetPipelineResponse, error) // contains filtered or unexported fields }
WaitGetPipelineIdle is a wrapper that calls PipelinesAPI.WaitGetPipelineIdle and waits to reach IDLE state.
func (*WaitGetPipelineIdle[R]) Get ¶ added in v0.10.0
func (w *WaitGetPipelineIdle[R]) Get() (*GetPipelineResponse, error)
Get the GetPipelineResponse with the default timeout of 20 minutes.
func (*WaitGetPipelineIdle[R]) GetWithTimeout ¶ added in v0.10.0
func (w *WaitGetPipelineIdle[R]) GetWithTimeout(timeout time.Duration) (*GetPipelineResponse, error)
Get the GetPipelineResponse with custom timeout.
func (*WaitGetPipelineIdle[R]) OnProgress ¶ added in v0.10.0
func (w *WaitGetPipelineIdle[R]) OnProgress(callback func(*GetPipelineResponse)) *WaitGetPipelineIdle[R]
OnProgress invokes a callback every time it polls for the status update.
type WaitGetPipelineRunning ¶ added in v0.10.0
type WaitGetPipelineRunning[R any] struct { Response *R PipelineId string `json:"pipeline_id"` Poll func(time.Duration, func(*GetPipelineResponse)) (*GetPipelineResponse, error) // contains filtered or unexported fields }
WaitGetPipelineRunning is a wrapper that calls PipelinesAPI.WaitGetPipelineRunning and waits to reach RUNNING state.
func (*WaitGetPipelineRunning[R]) Get ¶ added in v0.10.0
func (w *WaitGetPipelineRunning[R]) Get() (*GetPipelineResponse, error)
Get the GetPipelineResponse with the default timeout of 20 minutes.
func (*WaitGetPipelineRunning[R]) GetWithTimeout ¶ added in v0.10.0
func (w *WaitGetPipelineRunning[R]) GetWithTimeout(timeout time.Duration) (*GetPipelineResponse, error)
Get the GetPipelineResponse with custom timeout.
func (*WaitGetPipelineRunning[R]) OnProgress ¶ added in v0.10.0
func (w *WaitGetPipelineRunning[R]) OnProgress(callback func(*GetPipelineResponse)) *WaitGetPipelineRunning[R]
OnProgress invokes a callback every time it polls for the status update.