compute

package
v0.42.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 3, 2024 License: Apache-2.0 Imports: 18 Imported by: 26

Documentation

Overview

These APIs allow you to manage Cluster Policies, Clusters, Command Execution, Global Init Scripts, Instance Pools, Instance Profiles, Libraries, Policy Families, etc.

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

func TrimLeadingWhitespace

func TrimLeadingWhitespace(commandStr string) (newCommand string)

TrimLeadingWhitespace removes leading whitespace, so that Python code blocks that are embedded into Go code still could be interpreted properly.

Types

type AddInstanceProfile

type AddInstanceProfile struct {
	// The AWS IAM role ARN of the role associated with the instance profile.
	// This field is required if your role name and instance profile name do not
	// match and you want to use the instance profile with [Databricks SQL
	// Serverless].
	//
	// Otherwise, this field is optional.
	//
	// [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html
	IamRoleArn string `json:"iam_role_arn,omitempty"`
	// The AWS ARN of the instance profile to register with Databricks. This
	// field is required.
	InstanceProfileArn string `json:"instance_profile_arn"`
	// Boolean flag indicating whether the instance profile should only be used
	// in credential passthrough scenarios. If true, it means the instance
	// profile contains an meta IAM role which could assume a wide range of
	// roles. Therefore it should always be used with authorization. This field
	// is optional, the default value is `false`.
	IsMetaInstanceProfile bool `json:"is_meta_instance_profile,omitempty"`
	// By default, Databricks validates that it has sufficient permissions to
	// launch instances with the instance profile. This validation uses AWS
	// dry-run mode for the RunInstances API. If validation fails with an error
	// message that does not indicate an IAM related permission issue, (e.g.
	// “Your requested instance type is not supported in your requested
	// availability zone”), you can pass this flag to skip the validation and
	// forcibly add the instance profile.
	SkipValidation bool `json:"skip_validation,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AddInstanceProfile) MarshalJSON added in v0.23.0

func (s AddInstanceProfile) MarshalJSON() ([]byte, error)

func (*AddInstanceProfile) UnmarshalJSON added in v0.23.0

func (s *AddInstanceProfile) UnmarshalJSON(b []byte) error

type AddResponse added in v0.34.0

type AddResponse struct {
}

type Adlsgen2Info added in v0.31.0

type Adlsgen2Info struct {
	// abfss destination, e.g.
	// `abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`.
	Destination string `json:"destination"`
}

type AutoScale

type AutoScale struct {
	// The maximum number of workers to which the cluster can scale up when
	// overloaded. Note that `max_workers` must be strictly greater than
	// `min_workers`.
	MaxWorkers int `json:"max_workers,omitempty"`
	// The minimum number of workers to which the cluster can scale down when
	// underutilized. It is also the initial number of workers the cluster will
	// have after creation.
	MinWorkers int `json:"min_workers,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AutoScale) MarshalJSON added in v0.31.0

func (s AutoScale) MarshalJSON() ([]byte, error)

func (*AutoScale) UnmarshalJSON added in v0.31.0

func (s *AutoScale) UnmarshalJSON(b []byte) error

type AwsAttributes

type AwsAttributes struct {
	// Availability type used for all subsequent nodes past the
	// `first_on_demand` ones.
	//
	// Note: If `first_on_demand` is zero, this availability type will be used
	// for the entire cluster.
	Availability AwsAvailability `json:"availability,omitempty"`
	// The number of volumes launched for each instance. Users can choose up to
	// 10 volumes. This feature is only enabled for supported node types. Legacy
	// node types cannot specify custom EBS volumes. For node types with no
	// instance store, at least one EBS volume needs to be specified; otherwise,
	// cluster creation will fail.
	//
	// These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance
	// store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.
	//
	// If EBS volumes are attached, Databricks will configure Spark to use only
	// the EBS volumes for scratch storage because heterogenously sized scratch
	// devices can lead to inefficient disk utilization. If no EBS volumes are
	// attached, Databricks will configure Spark to use instance store volumes.
	//
	// Please note that if EBS volumes are specified, then the Spark
	// configuration `spark.local.dir` will be overridden.
	EbsVolumeCount int `json:"ebs_volume_count,omitempty"`
	// If using gp3 volumes, what IOPS to use for the disk. If this is not set,
	// the maximum performance of a gp2 volume with the same volume size will be
	// used.
	EbsVolumeIops int `json:"ebs_volume_iops,omitempty"`
	// The size of each EBS volume (in GiB) launched for each instance. For
	// general purpose SSD, this value must be within the range 100 - 4096. For
	// throughput optimized HDD, this value must be within the range 500 - 4096.
	EbsVolumeSize int `json:"ebs_volume_size,omitempty"`
	// If using gp3 volumes, what throughput to use for the disk. If this is not
	// set, the maximum performance of a gp2 volume with the same volume size
	// will be used.
	EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"`
	// The type of EBS volumes that will be launched with this cluster.
	EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"`
	// The first `first_on_demand` nodes of the cluster will be placed on
	// on-demand instances. If this value is greater than 0, the cluster driver
	// node in particular will be placed on an on-demand instance. If this value
	// is greater than or equal to the current cluster size, all nodes will be
	// placed on on-demand instances. If this value is less than the current
	// cluster size, `first_on_demand` nodes will be placed on on-demand
	// instances and the remainder will be placed on `availability` instances.
	// Note that this value does not affect cluster size and cannot currently be
	// mutated over the lifetime of a cluster.
	FirstOnDemand int `json:"first_on_demand,omitempty"`
	// Nodes for this cluster will only be placed on AWS instances with this
	// instance profile. If ommitted, nodes will be placed on instances without
	// an IAM instance profile. The instance profile must have previously been
	// added to the Databricks environment by an account administrator.
	//
	// This feature may only be available to certain customer plans.
	//
	// If this field is ommitted, we will pull in the default from the conf if
	// it exists.
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// The bid price for AWS spot instances, as a percentage of the
	// corresponding instance type's on-demand price. For example, if this field
	// is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then
	// the bid price is half of the price of on-demand `r3.xlarge` instances.
	// Similarly, if this field is set to 200, the bid price is twice the price
	// of on-demand `r3.xlarge` instances. If not specified, the default value
	// is 100. When spot instances are requested for this cluster, only spot
	// instances whose bid price percentage matches this field will be
	// considered. Note that, for safety, we enforce this field to be no more
	// than 10000.
	//
	// The default value and documentation here should be kept consistent with
	// CommonConf.defaultSpotBidPricePercent and
	// CommonConf.maxSpotBidPricePercent.
	SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"`
	// Identifier for the availability zone/datacenter in which the cluster
	// resides. This string will be of a form like "us-west-2a". The provided
	// availability zone must be in the same region as the Databricks
	// deployment. For example, "us-west-2a" is not a valid zone id if the
	// Databricks deployment resides in the "us-east-1" region. This is an
	// optional field at cluster creation, and if not specified, a default zone
	// will be used. If the zone specified is "auto", will try to place cluster
	// in a zone with high availability, and will retry placement in a different
	// AZ if there is not enough capacity. The list of available zones as well
	// as the default value can be found by using the `List Zones` method.
	ZoneId string `json:"zone_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AwsAttributes) MarshalJSON added in v0.23.0

func (s AwsAttributes) MarshalJSON() ([]byte, error)

func (*AwsAttributes) UnmarshalJSON added in v0.23.0

func (s *AwsAttributes) UnmarshalJSON(b []byte) error

type AwsAvailability

type AwsAvailability string

Availability type used for all subsequent nodes past the `first_on_demand` ones.

Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster.

const AwsAvailabilityOnDemand AwsAvailability = `ON_DEMAND`
const AwsAvailabilitySpot AwsAvailability = `SPOT`
const AwsAvailabilitySpotWithFallback AwsAvailability = `SPOT_WITH_FALLBACK`

func (*AwsAvailability) Set

func (f *AwsAvailability) Set(v string) error

Set raw string value and validate it against allowed values

func (*AwsAvailability) String

func (f *AwsAvailability) String() string

String representation for fmt.Print

func (*AwsAvailability) Type

func (f *AwsAvailability) Type() string

Type always returns AwsAvailability to satisfy [pflag.Value] interface

type AzureAttributes

type AzureAttributes struct {
	// Availability type used for all subsequent nodes past the
	// `first_on_demand` ones. Note: If `first_on_demand` is zero (which only
	// happens on pool clusters), this availability type will be used for the
	// entire cluster.
	Availability AzureAvailability `json:"availability,omitempty"`
	// The first `first_on_demand` nodes of the cluster will be placed on
	// on-demand instances. This value should be greater than 0, to make sure
	// the cluster driver node is placed on an on-demand instance. If this value
	// is greater than or equal to the current cluster size, all nodes will be
	// placed on on-demand instances. If this value is less than the current
	// cluster size, `first_on_demand` nodes will be placed on on-demand
	// instances and the remainder will be placed on `availability` instances.
	// Note that this value does not affect cluster size and cannot currently be
	// mutated over the lifetime of a cluster.
	FirstOnDemand int `json:"first_on_demand,omitempty"`
	// Defines values necessary to configure and run Azure Log Analytics agent
	LogAnalyticsInfo *LogAnalyticsInfo `json:"log_analytics_info,omitempty"`
	// The max bid price to be used for Azure spot instances. The Max price for
	// the bid cannot be higher than the on-demand price of the instance. If not
	// specified, the default value is -1, which specifies that the instance
	// cannot be evicted on the basis of price, and only on the basis of
	// availability. Further, the value should > 0 or -1.
	SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AzureAttributes) MarshalJSON added in v0.23.0

func (s AzureAttributes) MarshalJSON() ([]byte, error)

func (*AzureAttributes) UnmarshalJSON added in v0.23.0

func (s *AzureAttributes) UnmarshalJSON(b []byte) error

type AzureAvailability

type AzureAvailability string

Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If `first_on_demand` is zero (which only happens on pool clusters), this availability type will be used for the entire cluster.

const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE`
const AzureAvailabilitySpotAzure AzureAvailability = `SPOT_AZURE`
const AzureAvailabilitySpotWithFallbackAzure AzureAvailability = `SPOT_WITH_FALLBACK_AZURE`

func (*AzureAvailability) Set

func (f *AzureAvailability) Set(v string) error

Set raw string value and validate it against allowed values

func (*AzureAvailability) String

func (f *AzureAvailability) String() string

String representation for fmt.Print

func (*AzureAvailability) Type

func (f *AzureAvailability) Type() string

Type always returns AzureAvailability to satisfy [pflag.Value] interface

type CancelCommand

type CancelCommand struct {
	ClusterId string `json:"clusterId,omitempty"`

	CommandId string `json:"commandId,omitempty"`

	ContextId string `json:"contextId,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CancelCommand) MarshalJSON added in v0.23.0

func (s CancelCommand) MarshalJSON() ([]byte, error)

func (*CancelCommand) UnmarshalJSON added in v0.23.0

func (s *CancelCommand) UnmarshalJSON(b []byte) error

type CancelResponse added in v0.34.0

type CancelResponse struct {
}

type ChangeClusterOwner

type ChangeClusterOwner struct {
	// <needs content added>
	ClusterId string `json:"cluster_id"`
	// New owner of the cluster_id after this RPC.
	OwnerUsername string `json:"owner_username"`
}

type ChangeClusterOwnerResponse added in v0.34.0

type ChangeClusterOwnerResponse struct {
}

type ClientsTypes

type ClientsTypes struct {
	// With jobs set, the cluster can be used for jobs
	Jobs bool `json:"jobs,omitempty"`
	// With notebooks set, this cluster can be used for notebooks
	Notebooks bool `json:"notebooks,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClientsTypes) MarshalJSON added in v0.23.0

func (s ClientsTypes) MarshalJSON() ([]byte, error)

func (*ClientsTypes) UnmarshalJSON added in v0.23.0

func (s *ClientsTypes) UnmarshalJSON(b []byte) error

type CloneCluster added in v0.38.0

type CloneCluster struct {
	// The cluster that is being cloned.
	SourceClusterId string `json:"source_cluster_id"`
}

type CloudProviderNodeInfo

type CloudProviderNodeInfo struct {
	Status []CloudProviderNodeStatus `json:"status,omitempty"`
}

type CloudProviderNodeStatus

type CloudProviderNodeStatus string
const CloudProviderNodeStatusNotAvailableInRegion CloudProviderNodeStatus = `NotAvailableInRegion`
const CloudProviderNodeStatusNotEnabledOnSubscription CloudProviderNodeStatus = `NotEnabledOnSubscription`

func (*CloudProviderNodeStatus) Set

Set raw string value and validate it against allowed values

func (*CloudProviderNodeStatus) String

func (f *CloudProviderNodeStatus) String() string

String representation for fmt.Print

func (*CloudProviderNodeStatus) Type

func (f *CloudProviderNodeStatus) Type() string

Type always returns CloudProviderNodeStatus to satisfy [pflag.Value] interface

type ClusterAccessControlRequest added in v0.15.0

type ClusterAccessControlRequest struct {
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Permission level
	PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"`
	// application ID of a service principal
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterAccessControlRequest) MarshalJSON added in v0.23.0

func (s ClusterAccessControlRequest) MarshalJSON() ([]byte, error)

func (*ClusterAccessControlRequest) UnmarshalJSON added in v0.23.0

func (s *ClusterAccessControlRequest) UnmarshalJSON(b []byte) error

type ClusterAccessControlResponse added in v0.15.0

type ClusterAccessControlResponse struct {
	// All permissions.
	AllPermissions []ClusterPermission `json:"all_permissions,omitempty"`
	// Display name of the user or service principal.
	DisplayName string `json:"display_name,omitempty"`
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Name of the service principal.
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterAccessControlResponse) MarshalJSON added in v0.23.0

func (s ClusterAccessControlResponse) MarshalJSON() ([]byte, error)

func (*ClusterAccessControlResponse) UnmarshalJSON added in v0.23.0

func (s *ClusterAccessControlResponse) UnmarshalJSON(b []byte) error

type ClusterAttributes

type ClusterAttributes struct {
	// Automatically terminates the cluster after it is inactive for this time
	// in minutes. If not set, this cluster will not be automatically
	// terminated. If specified, the threshold must be between 10 and 10000
	// minutes. Users can also set this value to 0 to explicitly disable
	// automatic termination.
	AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
	// Attributes related to clusters running on Amazon Web Services. If not
	// specified at cluster creation, a set of default values will be used.
	AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to clusters running on Microsoft Azure. If not
	// specified at cluster creation, a set of default values will be used.
	AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"`
	// The configuration for delivering spark logs to a long-term storage
	// destination. Two kinds of destinations (dbfs and s3) are supported. Only
	// one destination can be specified for one cluster. If the conf is given,
	// the logs will be delivered to the destination every `5 mins`. The
	// destination of driver logs is `$destination/$clusterId/driver`, while the
	// destination of executor logs is `$destination/$clusterId/executor`.
	ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"`
	// Cluster name requested by the user. This doesn't have to be unique. If
	// not specified at creation, the cluster name will be an empty string.
	ClusterName string `json:"cluster_name,omitempty"`
	// Additional tags for cluster resources. Databricks will tag all cluster
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	//
	// - Clusters can only reuse cloud resources if the resources' tags are a
	// subset of the cluster tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Data security mode decides what data governance model to use when
	// accessing data from a cluster.
	//
	// * `NONE`: No security isolation for multiple users sharing the cluster.
	// Data governance features are not available in this mode. * `SINGLE_USER`:
	// A secure cluster that can only be exclusively used by a single user
	// specified in `single_user_name`. Most programming languages, cluster
	// features and data governance features are available in this mode. *
	// `USER_ISOLATION`: A secure cluster that can be shared by multiple users.
	// Cluster users are fully isolated so that they cannot see each other's
	// data and credentials. Most data governance features are supported in this
	// mode. But programming languages and cluster features might be limited.
	//
	// The following modes are deprecated starting with Databricks Runtime 15.0
	// and will be removed for future Databricks Runtime versions:
	//
	// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table
	// ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating
	// from legacy Passthrough on high concurrency clusters. *
	// `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
	// Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This
	// mode provides a way that doesn’t have UC nor passthrough enabled.
	DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"`

	DockerImage *DockerImage `json:"docker_image,omitempty"`
	// The optional ID of the instance pool for the driver of the cluster
	// belongs. The pool cluster uses the instance pool with id
	// (instance_pool_id) if the driver pool is not assigned.
	DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"`
	// The node type of the Spark driver. Note that this field is optional; if
	// unset, the driver node type will be set as the same value as
	// `node_type_id` defined above.
	DriverNodeTypeId string `json:"driver_node_type_id,omitempty"`
	// Autoscaling Local Storage: when enabled, this cluster will dynamically
	// acquire additional disk space when its Spark workers are running low on
	// disk space. This feature requires specific AWS permissions to function
	// correctly - refer to the User Guide for more details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Whether to enable LUKS on cluster VMs' local disks
	EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
	// Attributes related to clusters running on Google Cloud Platform. If not
	// specified at cluster creation, a set of default values will be used.
	GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"`
	// The configuration for storing init scripts. Any number of destinations
	// can be specified. The scripts are executed sequentially in the order
	// provided. If `cluster_log_conf` is specified, init script logs are sent
	// to `<destination>/<cluster-ID>/init_scripts`.
	InitScripts []InitScriptInfo `json:"init_scripts,omitempty"`
	// The optional ID of the instance pool to which the cluster belongs.
	InstancePoolId string `json:"instance_pool_id,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id,omitempty"`
	// The ID of the cluster policy used to create the cluster if applicable.
	PolicyId string `json:"policy_id,omitempty"`
	// Decides which runtime engine to be use, e.g. Standard vs. Photon. If
	// unspecified, the runtime engine is inferred from spark_version.
	RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"`
	// Single user name if data_security_mode is `SINGLE_USER`
	SingleUserName string `json:"single_user_name,omitempty"`
	// An object containing a set of optional, user-specified Spark
	// configuration key-value pairs. Users can also pass in a string of extra
	// JVM options to the driver and the executors via
	// `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`
	// respectively.
	SparkConf map[string]string `json:"spark_conf,omitempty"`
	// An object containing a set of optional, user-specified environment
	// variable key-value pairs. Please note that key-value pair of the form
	// (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the
	// driver and workers.
	//
	// In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we
	// recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the
	// example below. This ensures that all default databricks managed
	// environmental variables are included as well.
	//
	// Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m",
	// "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS":
	// "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}`
	SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
	// The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of
	// available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	SparkVersion string `json:"spark_version"`
	// SSH public key contents that will be added to each Spark node in this
	// cluster. The corresponding private keys can be used to login with the
	// user name `ubuntu` on port `2200`. Up to 10 keys can be specified.
	SshPublicKeys []string `json:"ssh_public_keys,omitempty"`

	WorkloadType *WorkloadType `json:"workload_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterAttributes) MarshalJSON added in v0.23.0

func (s ClusterAttributes) MarshalJSON() ([]byte, error)

func (*ClusterAttributes) UnmarshalJSON added in v0.23.0

func (s *ClusterAttributes) UnmarshalJSON(b []byte) error

type ClusterDetails added in v0.11.0

type ClusterDetails struct {
	// Parameters needed in order to automatically scale clusters up and down
	// based on load. Note: autoscaling works best with DB runtime versions 3.0
	// or later.
	Autoscale *AutoScale `json:"autoscale,omitempty"`
	// Automatically terminates the cluster after it is inactive for this time
	// in minutes. If not set, this cluster will not be automatically
	// terminated. If specified, the threshold must be between 10 and 10000
	// minutes. Users can also set this value to 0 to explicitly disable
	// automatic termination.
	AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
	// Attributes related to clusters running on Amazon Web Services. If not
	// specified at cluster creation, a set of default values will be used.
	AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to clusters running on Microsoft Azure. If not
	// specified at cluster creation, a set of default values will be used.
	AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"`
	// Number of CPU cores available for this cluster. Note that this can be
	// fractional, e.g. 7.5 cores, since certain node types are configured to
	// share cores between Spark nodes on the same instance.
	ClusterCores float64 `json:"cluster_cores,omitempty"`
	// Canonical identifier for the cluster. This id is retained during cluster
	// restarts and resizes, while each new cluster has a globally unique id.
	ClusterId string `json:"cluster_id,omitempty"`
	// The configuration for delivering spark logs to a long-term storage
	// destination. Two kinds of destinations (dbfs and s3) are supported. Only
	// one destination can be specified for one cluster. If the conf is given,
	// the logs will be delivered to the destination every `5 mins`. The
	// destination of driver logs is `$destination/$clusterId/driver`, while the
	// destination of executor logs is `$destination/$clusterId/executor`.
	ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"`
	// Cluster log delivery status.
	ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"`
	// Total amount of cluster memory, in megabytes
	ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"`
	// Cluster name requested by the user. This doesn't have to be unique. If
	// not specified at creation, the cluster name will be an empty string.
	ClusterName string `json:"cluster_name,omitempty"`
	// Determines whether the cluster was created by a user through the UI,
	// created by the Databricks Jobs Scheduler, or through an API request. This
	// is the same as cluster_creator, but read only.
	ClusterSource ClusterSource `json:"cluster_source,omitempty"`
	// Creator user name. The field won't be included in the response if the
	// user has already been deleted.
	CreatorUserName string `json:"creator_user_name,omitempty"`
	// Additional tags for cluster resources. Databricks will tag all cluster
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	//
	// - Clusters can only reuse cloud resources if the resources' tags are a
	// subset of the cluster tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Data security mode decides what data governance model to use when
	// accessing data from a cluster.
	//
	// * `NONE`: No security isolation for multiple users sharing the cluster.
	// Data governance features are not available in this mode. * `SINGLE_USER`:
	// A secure cluster that can only be exclusively used by a single user
	// specified in `single_user_name`. Most programming languages, cluster
	// features and data governance features are available in this mode. *
	// `USER_ISOLATION`: A secure cluster that can be shared by multiple users.
	// Cluster users are fully isolated so that they cannot see each other's
	// data and credentials. Most data governance features are supported in this
	// mode. But programming languages and cluster features might be limited.
	//
	// The following modes are deprecated starting with Databricks Runtime 15.0
	// and will be removed for future Databricks Runtime versions:
	//
	// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table
	// ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating
	// from legacy Passthrough on high concurrency clusters. *
	// `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
	// Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This
	// mode provides a way that doesn’t have UC nor passthrough enabled.
	DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"`
	// Tags that are added by Databricks regardless of any `custom_tags`,
	// including:
	//
	// - Vendor: Databricks
	//
	// - Creator: <username_of_creator>
	//
	// - ClusterName: <name_of_cluster>
	//
	// - ClusterId: <id_of_cluster>
	//
	// - Name: <Databricks internal use>
	DefaultTags map[string]string `json:"default_tags,omitempty"`

	DockerImage *DockerImage `json:"docker_image,omitempty"`
	// Node on which the Spark driver resides. The driver node contains the
	// Spark master and the <Databricks> application that manages the
	// per-notebook Spark REPLs.
	Driver *SparkNode `json:"driver,omitempty"`
	// The optional ID of the instance pool for the driver of the cluster
	// belongs. The pool cluster uses the instance pool with id
	// (instance_pool_id) if the driver pool is not assigned.
	DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"`
	// The node type of the Spark driver. Note that this field is optional; if
	// unset, the driver node type will be set as the same value as
	// `node_type_id` defined above.
	DriverNodeTypeId string `json:"driver_node_type_id,omitempty"`
	// Autoscaling Local Storage: when enabled, this cluster will dynamically
	// acquire additional disk space when its Spark workers are running low on
	// disk space. This feature requires specific AWS permissions to function
	// correctly - refer to the User Guide for more details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Whether to enable LUKS on cluster VMs' local disks
	EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
	// Nodes on which the Spark executors reside.
	Executors []SparkNode `json:"executors,omitempty"`
	// Attributes related to clusters running on Google Cloud Platform. If not
	// specified at cluster creation, a set of default values will be used.
	GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"`
	// The configuration for storing init scripts. Any number of destinations
	// can be specified. The scripts are executed sequentially in the order
	// provided. If `cluster_log_conf` is specified, init script logs are sent
	// to `<destination>/<cluster-ID>/init_scripts`.
	InitScripts []InitScriptInfo `json:"init_scripts,omitempty"`
	// The optional ID of the instance pool to which the cluster belongs.
	InstancePoolId string `json:"instance_pool_id,omitempty"`
	// Port on which Spark JDBC server is listening, in the driver nod. No
	// service will be listeningon on this port in executor nodes.
	JdbcPort int `json:"jdbc_port,omitempty"`
	// the timestamp that the cluster was started/restarted
	LastRestartedTime int64 `json:"last_restarted_time,omitempty"`
	// Time when the cluster driver last lost its state (due to a restart or
	// driver failure).
	LastStateLossTime int64 `json:"last_state_loss_time,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id,omitempty"`
	// Number of worker nodes that this cluster should have. A cluster has one
	// Spark Driver and `num_workers` Executors for a total of `num_workers` + 1
	// Spark nodes.
	//
	// Note: When reading the properties of a cluster, this field reflects the
	// desired number of workers rather than the actual current number of
	// workers. For instance, if a cluster is resized from 5 to 10 workers, this
	// field will immediately be updated to reflect the target size of 10
	// workers, whereas the workers listed in `spark_info` will gradually
	// increase from 5 to 10 as the new nodes are provisioned.
	NumWorkers int `json:"num_workers,omitempty"`
	// The ID of the cluster policy used to create the cluster if applicable.
	PolicyId string `json:"policy_id,omitempty"`
	// Decides which runtime engine to be use, e.g. Standard vs. Photon. If
	// unspecified, the runtime engine is inferred from spark_version.
	RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"`
	// Single user name if data_security_mode is `SINGLE_USER`
	SingleUserName string `json:"single_user_name,omitempty"`
	// An object containing a set of optional, user-specified Spark
	// configuration key-value pairs. Users can also pass in a string of extra
	// JVM options to the driver and the executors via
	// `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`
	// respectively.
	SparkConf map[string]string `json:"spark_conf,omitempty"`
	// A canonical SparkContext identifier. This value *does* change when the
	// Spark driver restarts. The pair `(cluster_id, spark_context_id)` is a
	// globally unique identifier over all Spark contexts.
	SparkContextId int64 `json:"spark_context_id,omitempty"`
	// An object containing a set of optional, user-specified environment
	// variable key-value pairs. Please note that key-value pair of the form
	// (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the
	// driver and workers.
	//
	// In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we
	// recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the
	// example below. This ensures that all default databricks managed
	// environmental variables are included as well.
	//
	// Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m",
	// "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS":
	// "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}`
	SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
	// The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of
	// available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	SparkVersion string `json:"spark_version,omitempty"`
	// `spec` contains a snapshot of the field values that were used to create
	// or edit this cluster. The contents of `spec` can be used in the body of a
	// create cluster request. This field might not be populated for older
	// clusters. Note: not included in the response of the ListClusters API.
	Spec *ClusterSpec `json:"spec,omitempty"`
	// SSH public key contents that will be added to each Spark node in this
	// cluster. The corresponding private keys can be used to login with the
	// user name `ubuntu` on port `2200`. Up to 10 keys can be specified.
	SshPublicKeys []string `json:"ssh_public_keys,omitempty"`
	// Time (in epoch milliseconds) when the cluster creation request was
	// received (when the cluster entered a `PENDING` state).
	StartTime int64 `json:"start_time,omitempty"`
	// Current state of the cluster.
	State State `json:"state,omitempty"`
	// A message associated with the most recent state transition (e.g., the
	// reason why the cluster entered a `TERMINATED` state).
	StateMessage string `json:"state_message,omitempty"`
	// Time (in epoch milliseconds) when the cluster was terminated, if
	// applicable.
	TerminatedTime int64 `json:"terminated_time,omitempty"`
	// Information about why the cluster was terminated. This field only appears
	// when the cluster is in a `TERMINATING` or `TERMINATED` state.
	TerminationReason *TerminationReason `json:"termination_reason,omitempty"`

	WorkloadType *WorkloadType `json:"workload_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (*ClusterDetails) IsRunningOrResizing added in v0.11.0

func (c *ClusterDetails) IsRunningOrResizing() bool

func (ClusterDetails) MarshalJSON added in v0.23.0

func (s ClusterDetails) MarshalJSON() ([]byte, error)

func (*ClusterDetails) UnmarshalJSON added in v0.23.0

func (s *ClusterDetails) UnmarshalJSON(b []byte) error

type ClusterEvent

type ClusterEvent struct {
	// <needs content added>
	ClusterId string `json:"cluster_id"`
	// <needs content added>
	DataPlaneEventDetails *DataPlaneEventDetails `json:"data_plane_event_details,omitempty"`
	// <needs content added>
	Details *EventDetails `json:"details,omitempty"`
	// The timestamp when the event occurred, stored as the number of
	// milliseconds since the Unix epoch. If not provided, this will be assigned
	// by the Timeline service.
	Timestamp int64 `json:"timestamp,omitempty"`

	Type EventType `json:"type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterEvent) MarshalJSON added in v0.23.0

func (s ClusterEvent) MarshalJSON() ([]byte, error)

func (*ClusterEvent) UnmarshalJSON added in v0.23.0

func (s *ClusterEvent) UnmarshalJSON(b []byte) error

type ClusterLibraryStatuses

type ClusterLibraryStatuses struct {
	// Unique identifier for the cluster.
	ClusterId string `json:"cluster_id,omitempty"`
	// Status of all libraries on the cluster.
	LibraryStatuses []LibraryFullStatus `json:"library_statuses,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterLibraryStatuses) IsRetryNeeded

func (cls ClusterLibraryStatuses) IsRetryNeeded(w Wait) (bool, error)

IsRetryNeeded returns first bool if there needs to be retry. If there needs to be retry, error message will explain why. If retry does not need to happen and error is not nil - it failed.

func (ClusterLibraryStatuses) MarshalJSON added in v0.23.0

func (s ClusterLibraryStatuses) MarshalJSON() ([]byte, error)

func (ClusterLibraryStatuses) ToLibraryList

func (cls ClusterLibraryStatuses) ToLibraryList() InstallLibraries

ToLibraryList convert to envity for convenient comparison

func (*ClusterLibraryStatuses) UnmarshalJSON added in v0.23.0

func (s *ClusterLibraryStatuses) UnmarshalJSON(b []byte) error

type ClusterLogConf

type ClusterLogConf struct {
	// destination needs to be provided. e.g. `{ "dbfs" : { "destination" :
	// "dbfs:/home/cluster_log" } }`
	Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"`
	// destination and either the region or endpoint need to be provided. e.g.
	// `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" :
	// "us-west-2" } }` Cluster iam role is used to access s3, please make sure
	// the cluster iam role in `instance_profile_arn` has permission to write
	// data to the s3 destination.
	S3 *S3StorageInfo `json:"s3,omitempty"`
}

type ClusterPermission added in v0.15.0

type ClusterPermission struct {
	Inherited bool `json:"inherited,omitempty"`

	InheritedFromObject []string `json:"inherited_from_object,omitempty"`
	// Permission level
	PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPermission) MarshalJSON added in v0.23.0

func (s ClusterPermission) MarshalJSON() ([]byte, error)

func (*ClusterPermission) UnmarshalJSON added in v0.23.0

func (s *ClusterPermission) UnmarshalJSON(b []byte) error

type ClusterPermissionLevel added in v0.15.0

type ClusterPermissionLevel string

Permission level

const ClusterPermissionLevelCanAttachTo ClusterPermissionLevel = `CAN_ATTACH_TO`
const ClusterPermissionLevelCanManage ClusterPermissionLevel = `CAN_MANAGE`
const ClusterPermissionLevelCanRestart ClusterPermissionLevel = `CAN_RESTART`

func (*ClusterPermissionLevel) Set added in v0.15.0

Set raw string value and validate it against allowed values

func (*ClusterPermissionLevel) String added in v0.15.0

func (f *ClusterPermissionLevel) String() string

String representation for fmt.Print

func (*ClusterPermissionLevel) Type added in v0.15.0

func (f *ClusterPermissionLevel) Type() string

Type always returns ClusterPermissionLevel to satisfy [pflag.Value] interface

type ClusterPermissions added in v0.15.0

type ClusterPermissions struct {
	AccessControlList []ClusterAccessControlResponse `json:"access_control_list,omitempty"`

	ObjectId string `json:"object_id,omitempty"`

	ObjectType string `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPermissions) MarshalJSON added in v0.23.0

func (s ClusterPermissions) MarshalJSON() ([]byte, error)

func (*ClusterPermissions) UnmarshalJSON added in v0.23.0

func (s *ClusterPermissions) UnmarshalJSON(b []byte) error

type ClusterPermissionsDescription added in v0.15.0

type ClusterPermissionsDescription struct {
	Description string `json:"description,omitempty"`
	// Permission level
	PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPermissionsDescription) MarshalJSON added in v0.23.0

func (s ClusterPermissionsDescription) MarshalJSON() ([]byte, error)

func (*ClusterPermissionsDescription) UnmarshalJSON added in v0.23.0

func (s *ClusterPermissionsDescription) UnmarshalJSON(b []byte) error

type ClusterPermissionsRequest added in v0.15.0

type ClusterPermissionsRequest struct {
	AccessControlList []ClusterAccessControlRequest `json:"access_control_list,omitempty"`
	// The cluster for which to get or manage permissions.
	ClusterId string `json:"-" url:"-"`
}

type ClusterPoliciesAPI

type ClusterPoliciesAPI struct {
	// contains filtered or unexported fields
}

You can use cluster policies to control users' ability to configure clusters based on a set of rules. These rules specify which attributes or attribute values can be used during cluster creation. Cluster policies have ACLs that limit their use to specific users and groups.

With cluster policies, you can: - Auto-install cluster libraries on the next restart by listing them in the policy's "libraries" field (Public Preview). - Limit users to creating clusters with the prescribed settings. - Simplify the user interface, enabling more users to create clusters, by fixing and hiding some fields. - Manage costs by setting limits on attributes that impact the hourly rate.

Cluster policy permissions limit which policies a user can select in the Policy drop-down when the user creates a cluster: - A user who has unrestricted cluster create permission can select the Unrestricted policy and create fully-configurable clusters. - A user who has both unrestricted cluster create permission and access to cluster policies can select the Unrestricted policy and policies they have access to. - A user that has access to only cluster policies, can select the policies they have access to.

If no policies exist in the workspace, the Policy drop-down doesn't appear. Only admin users can create, edit, and delete policies. Admin users also have access to all policies.

func NewClusterPolicies

func NewClusterPolicies(client *client.DatabricksClient) *ClusterPoliciesAPI

func (*ClusterPoliciesAPI) Create

Create a new policy.

Creates a new policy with prescribed settings.

Example (ClusterPolicies)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.ClusterPolicies.Create(ctx, compute.CreatePolicy{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Definition: `{
            "spark_conf.spark.databricks.delta.preview.enabled": {
                "type": "fixed",
                "value": true
            }
        }
`,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.ClusterPolicies.DeleteByPolicyId(ctx, created.PolicyId)
if err != nil {
	panic(err)
}
Output:

func (*ClusterPoliciesAPI) Delete

func (a *ClusterPoliciesAPI) Delete(ctx context.Context, request DeletePolicy) error

Delete a cluster policy.

Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited.

func (*ClusterPoliciesAPI) DeleteByPolicyId

func (a *ClusterPoliciesAPI) DeleteByPolicyId(ctx context.Context, policyId string) error

Delete a cluster policy.

Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited.

func (*ClusterPoliciesAPI) Edit

func (a *ClusterPoliciesAPI) Edit(ctx context.Context, request EditPolicy) error

Update a cluster policy.

Update an existing policy for cluster. This operation may make some clusters governed by the previous policy invalid.

Example (ClusterPolicies)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.ClusterPolicies.Create(ctx, compute.CreatePolicy{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Definition: `{
            "spark_conf.spark.databricks.delta.preview.enabled": {
                "type": "fixed",
                "value": true
            }
        }
`,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

policy, err := w.ClusterPolicies.GetByPolicyId(ctx, created.PolicyId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", policy)

err = w.ClusterPolicies.Edit(ctx, compute.EditPolicy{
	PolicyId: policy.PolicyId,
	Name:     policy.Name,
	Definition: `{
            "spark_conf.spark.databricks.delta.preview.enabled": {
                "type": "fixed",
                "value": false
            }
        }
`,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.ClusterPolicies.DeleteByPolicyId(ctx, created.PolicyId)
if err != nil {
	panic(err)
}
Output:

func (*ClusterPoliciesAPI) Get

Get a cluster policy.

Get a cluster policy entity. Creation and editing is available to admins only.

Example (ClusterPolicies)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.ClusterPolicies.Create(ctx, compute.CreatePolicy{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Definition: `{
            "spark_conf.spark.databricks.delta.preview.enabled": {
                "type": "fixed",
                "value": true
            }
        }
`,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

policy, err := w.ClusterPolicies.GetByPolicyId(ctx, created.PolicyId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", policy)

// cleanup

err = w.ClusterPolicies.DeleteByPolicyId(ctx, created.PolicyId)
if err != nil {
	panic(err)
}
Output:

func (*ClusterPoliciesAPI) GetByName

func (a *ClusterPoliciesAPI) GetByName(ctx context.Context, name string) (*Policy, error)

GetByName calls ClusterPoliciesAPI.PolicyNameToPolicyIdMap and returns a single Policy.

Returns an error if there's more than one Policy with the same .Name.

Note: All Policy instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*ClusterPoliciesAPI) GetByPolicyId

func (a *ClusterPoliciesAPI) GetByPolicyId(ctx context.Context, policyId string) (*Policy, error)

Get a cluster policy.

Get a cluster policy entity. Creation and editing is available to admins only.

func (*ClusterPoliciesAPI) GetPermissionLevels added in v0.19.0

Get cluster policy permission levels.

Gets the permission levels that a user can have on an object.

func (*ClusterPoliciesAPI) GetPermissionLevelsByClusterPolicyId added in v0.19.0

func (a *ClusterPoliciesAPI) GetPermissionLevelsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*GetClusterPolicyPermissionLevelsResponse, error)

Get cluster policy permission levels.

Gets the permission levels that a user can have on an object.

func (*ClusterPoliciesAPI) GetPermissions added in v0.19.0

Get cluster policy permissions.

Gets the permissions of a cluster policy. Cluster policies can inherit permissions from their root object.

func (*ClusterPoliciesAPI) GetPermissionsByClusterPolicyId added in v0.19.0

func (a *ClusterPoliciesAPI) GetPermissionsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*ClusterPolicyPermissions, error)

Get cluster policy permissions.

Gets the permissions of a cluster policy. Cluster policies can inherit permissions from their root object.

func (*ClusterPoliciesAPI) Impl

Impl returns low-level ClusterPolicies API implementation Deprecated: use MockClusterPoliciesInterface instead.

func (*ClusterPoliciesAPI) List added in v0.24.0

List cluster policies.

Returns a list of policies accessible by the requesting user.

This method is generated by Databricks SDK Code Generator.

func (*ClusterPoliciesAPI) ListAll

List cluster policies.

Returns a list of policies accessible by the requesting user.

This method is generated by Databricks SDK Code Generator.

Example (ClusterPolicies)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.ClusterPolicies.ListAll(ctx, compute.ListClusterPoliciesRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*ClusterPoliciesAPI) PolicyNameToPolicyIdMap

func (a *ClusterPoliciesAPI) PolicyNameToPolicyIdMap(ctx context.Context, request ListClusterPoliciesRequest) (map[string]string, error)

PolicyNameToPolicyIdMap calls ClusterPoliciesAPI.ListAll and creates a map of results with Policy.Name as key and Policy.PolicyId as value.

Returns an error if there's more than one Policy with the same .Name.

Note: All Policy instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*ClusterPoliciesAPI) SetPermissions added in v0.19.0

Set cluster policy permissions.

Sets permissions on a cluster policy. Cluster policies can inherit permissions from their root object.

func (*ClusterPoliciesAPI) UpdatePermissions added in v0.19.0

Update cluster policy permissions.

Updates the permissions on a cluster policy. Cluster policies can inherit permissions from their root object.

func (*ClusterPoliciesAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockClusterPoliciesInterface instead.

type ClusterPoliciesInterface added in v0.29.0

type ClusterPoliciesInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockClusterPoliciesInterface instead.
	WithImpl(impl ClusterPoliciesService) ClusterPoliciesInterface

	// Impl returns low-level ClusterPolicies API implementation
	// Deprecated: use MockClusterPoliciesInterface instead.
	Impl() ClusterPoliciesService

	// Create a new policy.
	//
	// Creates a new policy with prescribed settings.
	Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error)

	// Delete a cluster policy.
	//
	// Delete a policy for a cluster. Clusters governed by this policy can still
	// run, but cannot be edited.
	Delete(ctx context.Context, request DeletePolicy) error

	// Delete a cluster policy.
	//
	// Delete a policy for a cluster. Clusters governed by this policy can still
	// run, but cannot be edited.
	DeleteByPolicyId(ctx context.Context, policyId string) error

	// Update a cluster policy.
	//
	// Update an existing policy for cluster. This operation may make some clusters
	// governed by the previous policy invalid.
	Edit(ctx context.Context, request EditPolicy) error

	// Get a cluster policy.
	//
	// Get a cluster policy entity. Creation and editing is available to admins
	// only.
	Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error)

	// Get a cluster policy.
	//
	// Get a cluster policy entity. Creation and editing is available to admins
	// only.
	GetByPolicyId(ctx context.Context, policyId string) (*Policy, error)

	// Get cluster policy permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error)

	// Get cluster policy permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevelsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*GetClusterPolicyPermissionLevelsResponse, error)

	// Get cluster policy permissions.
	//
	// Gets the permissions of a cluster policy. Cluster policies can inherit
	// permissions from their root object.
	GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)

	// Get cluster policy permissions.
	//
	// Gets the permissions of a cluster policy. Cluster policies can inherit
	// permissions from their root object.
	GetPermissionsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*ClusterPolicyPermissions, error)

	// List cluster policies.
	//
	// Returns a list of policies accessible by the requesting user.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListClusterPoliciesRequest) listing.Iterator[Policy]

	// List cluster policies.
	//
	// Returns a list of policies accessible by the requesting user.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error)

	// PolicyNameToPolicyIdMap calls [ClusterPoliciesAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value.
	//
	// Returns an error if there's more than one [Policy] with the same .Name.
	//
	// Note: All [Policy] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	PolicyNameToPolicyIdMap(ctx context.Context, request ListClusterPoliciesRequest) (map[string]string, error)

	// GetByName calls [ClusterPoliciesAPI.PolicyNameToPolicyIdMap] and returns a single [Policy].
	//
	// Returns an error if there's more than one [Policy] with the same .Name.
	//
	// Note: All [Policy] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByName(ctx context.Context, name string) (*Policy, error)

	// Set cluster policy permissions.
	//
	// Sets permissions on a cluster policy. Cluster policies can inherit
	// permissions from their root object.
	SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)

	// Update cluster policy permissions.
	//
	// Updates the permissions on a cluster policy. Cluster policies can inherit
	// permissions from their root object.
	UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)
}

type ClusterPoliciesService

type ClusterPoliciesService interface {

	// Create a new policy.
	//
	// Creates a new policy with prescribed settings.
	Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error)

	// Delete a cluster policy.
	//
	// Delete a policy for a cluster. Clusters governed by this policy can still
	// run, but cannot be edited.
	Delete(ctx context.Context, request DeletePolicy) error

	// Update a cluster policy.
	//
	// Update an existing policy for cluster. This operation may make some
	// clusters governed by the previous policy invalid.
	Edit(ctx context.Context, request EditPolicy) error

	// Get a cluster policy.
	//
	// Get a cluster policy entity. Creation and editing is available to admins
	// only.
	Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error)

	// Get cluster policy permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error)

	// Get cluster policy permissions.
	//
	// Gets the permissions of a cluster policy. Cluster policies can inherit
	// permissions from their root object.
	GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)

	// List cluster policies.
	//
	// Returns a list of policies accessible by the requesting user.
	//
	// Use ListAll() to get all Policy instances
	List(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error)

	// Set cluster policy permissions.
	//
	// Sets permissions on a cluster policy. Cluster policies can inherit
	// permissions from their root object.
	SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)

	// Update cluster policy permissions.
	//
	// Updates the permissions on a cluster policy. Cluster policies can inherit
	// permissions from their root object.
	UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error)
}

You can use cluster policies to control users' ability to configure clusters based on a set of rules. These rules specify which attributes or attribute values can be used during cluster creation. Cluster policies have ACLs that limit their use to specific users and groups.

With cluster policies, you can: - Auto-install cluster libraries on the next restart by listing them in the policy's "libraries" field (Public Preview). - Limit users to creating clusters with the prescribed settings. - Simplify the user interface, enabling more users to create clusters, by fixing and hiding some fields. - Manage costs by setting limits on attributes that impact the hourly rate.

Cluster policy permissions limit which policies a user can select in the Policy drop-down when the user creates a cluster: - A user who has unrestricted cluster create permission can select the Unrestricted policy and create fully-configurable clusters. - A user who has both unrestricted cluster create permission and access to cluster policies can select the Unrestricted policy and policies they have access to. - A user that has access to only cluster policies, can select the policies they have access to.

If no policies exist in the workspace, the Policy drop-down doesn't appear. Only admin users can create, edit, and delete policies. Admin users also have access to all policies.

type ClusterPolicyAccessControlRequest added in v0.15.0

type ClusterPolicyAccessControlRequest struct {
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Permission level
	PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"`
	// application ID of a service principal
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPolicyAccessControlRequest) MarshalJSON added in v0.23.0

func (s ClusterPolicyAccessControlRequest) MarshalJSON() ([]byte, error)

func (*ClusterPolicyAccessControlRequest) UnmarshalJSON added in v0.23.0

func (s *ClusterPolicyAccessControlRequest) UnmarshalJSON(b []byte) error

type ClusterPolicyAccessControlResponse added in v0.15.0

type ClusterPolicyAccessControlResponse struct {
	// All permissions.
	AllPermissions []ClusterPolicyPermission `json:"all_permissions,omitempty"`
	// Display name of the user or service principal.
	DisplayName string `json:"display_name,omitempty"`
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Name of the service principal.
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPolicyAccessControlResponse) MarshalJSON added in v0.23.0

func (s ClusterPolicyAccessControlResponse) MarshalJSON() ([]byte, error)

func (*ClusterPolicyAccessControlResponse) UnmarshalJSON added in v0.23.0

func (s *ClusterPolicyAccessControlResponse) UnmarshalJSON(b []byte) error

type ClusterPolicyPermission added in v0.15.0

type ClusterPolicyPermission struct {
	Inherited bool `json:"inherited,omitempty"`

	InheritedFromObject []string `json:"inherited_from_object,omitempty"`
	// Permission level
	PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPolicyPermission) MarshalJSON added in v0.23.0

func (s ClusterPolicyPermission) MarshalJSON() ([]byte, error)

func (*ClusterPolicyPermission) UnmarshalJSON added in v0.23.0

func (s *ClusterPolicyPermission) UnmarshalJSON(b []byte) error

type ClusterPolicyPermissionLevel added in v0.15.0

type ClusterPolicyPermissionLevel string

Permission level

const ClusterPolicyPermissionLevelCanUse ClusterPolicyPermissionLevel = `CAN_USE`

func (*ClusterPolicyPermissionLevel) Set added in v0.15.0

Set raw string value and validate it against allowed values

func (*ClusterPolicyPermissionLevel) String added in v0.15.0

String representation for fmt.Print

func (*ClusterPolicyPermissionLevel) Type added in v0.15.0

Type always returns ClusterPolicyPermissionLevel to satisfy [pflag.Value] interface

type ClusterPolicyPermissions added in v0.15.0

type ClusterPolicyPermissions struct {
	AccessControlList []ClusterPolicyAccessControlResponse `json:"access_control_list,omitempty"`

	ObjectId string `json:"object_id,omitempty"`

	ObjectType string `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPolicyPermissions) MarshalJSON added in v0.23.0

func (s ClusterPolicyPermissions) MarshalJSON() ([]byte, error)

func (*ClusterPolicyPermissions) UnmarshalJSON added in v0.23.0

func (s *ClusterPolicyPermissions) UnmarshalJSON(b []byte) error

type ClusterPolicyPermissionsDescription added in v0.15.0

type ClusterPolicyPermissionsDescription struct {
	Description string `json:"description,omitempty"`
	// Permission level
	PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterPolicyPermissionsDescription) MarshalJSON added in v0.23.0

func (s ClusterPolicyPermissionsDescription) MarshalJSON() ([]byte, error)

func (*ClusterPolicyPermissionsDescription) UnmarshalJSON added in v0.23.0

func (s *ClusterPolicyPermissionsDescription) UnmarshalJSON(b []byte) error

type ClusterPolicyPermissionsRequest added in v0.15.0

type ClusterPolicyPermissionsRequest struct {
	AccessControlList []ClusterPolicyAccessControlRequest `json:"access_control_list,omitempty"`
	// The cluster policy for which to get or manage permissions.
	ClusterPolicyId string `json:"-" url:"-"`
}

type ClusterSize

type ClusterSize struct {
	// Parameters needed in order to automatically scale clusters up and down
	// based on load. Note: autoscaling works best with DB runtime versions 3.0
	// or later.
	Autoscale *AutoScale `json:"autoscale,omitempty"`
	// Number of worker nodes that this cluster should have. A cluster has one
	// Spark Driver and `num_workers` Executors for a total of `num_workers` + 1
	// Spark nodes.
	//
	// Note: When reading the properties of a cluster, this field reflects the
	// desired number of workers rather than the actual current number of
	// workers. For instance, if a cluster is resized from 5 to 10 workers, this
	// field will immediately be updated to reflect the target size of 10
	// workers, whereas the workers listed in `spark_info` will gradually
	// increase from 5 to 10 as the new nodes are provisioned.
	NumWorkers int `json:"num_workers,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterSize) MarshalJSON added in v0.23.0

func (s ClusterSize) MarshalJSON() ([]byte, error)

func (*ClusterSize) UnmarshalJSON added in v0.23.0

func (s *ClusterSize) UnmarshalJSON(b []byte) error

type ClusterSource

type ClusterSource string

Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only.

const ClusterSourceApi ClusterSource = `API`
const ClusterSourceJob ClusterSource = `JOB`
const ClusterSourceModels ClusterSource = `MODELS`
const ClusterSourcePipeline ClusterSource = `PIPELINE`
const ClusterSourcePipelineMaintenance ClusterSource = `PIPELINE_MAINTENANCE`
const ClusterSourceSql ClusterSource = `SQL`
const ClusterSourceUi ClusterSource = `UI`

func (*ClusterSource) Set

func (f *ClusterSource) Set(v string) error

Set raw string value and validate it against allowed values

func (*ClusterSource) String

func (f *ClusterSource) String() string

String representation for fmt.Print

func (*ClusterSource) Type

func (f *ClusterSource) Type() string

Type always returns ClusterSource to satisfy [pflag.Value] interface

type ClusterSpec added in v0.11.0

type ClusterSpec struct {
	// When set to true, fixed and default values from the policy will be used
	// for fields that are omitted. When set to false, only fixed values from
	// the policy will be applied.
	ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"`
	// Parameters needed in order to automatically scale clusters up and down
	// based on load. Note: autoscaling works best with DB runtime versions 3.0
	// or later.
	Autoscale *AutoScale `json:"autoscale,omitempty"`
	// Automatically terminates the cluster after it is inactive for this time
	// in minutes. If not set, this cluster will not be automatically
	// terminated. If specified, the threshold must be between 10 and 10000
	// minutes. Users can also set this value to 0 to explicitly disable
	// automatic termination.
	AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
	// Attributes related to clusters running on Amazon Web Services. If not
	// specified at cluster creation, a set of default values will be used.
	AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to clusters running on Microsoft Azure. If not
	// specified at cluster creation, a set of default values will be used.
	AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"`
	// The configuration for delivering spark logs to a long-term storage
	// destination. Two kinds of destinations (dbfs and s3) are supported. Only
	// one destination can be specified for one cluster. If the conf is given,
	// the logs will be delivered to the destination every `5 mins`. The
	// destination of driver logs is `$destination/$clusterId/driver`, while the
	// destination of executor logs is `$destination/$clusterId/executor`.
	ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"`
	// Cluster name requested by the user. This doesn't have to be unique. If
	// not specified at creation, the cluster name will be an empty string.
	ClusterName string `json:"cluster_name,omitempty"`
	// Additional tags for cluster resources. Databricks will tag all cluster
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	//
	// - Clusters can only reuse cloud resources if the resources' tags are a
	// subset of the cluster tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Data security mode decides what data governance model to use when
	// accessing data from a cluster.
	//
	// * `NONE`: No security isolation for multiple users sharing the cluster.
	// Data governance features are not available in this mode. * `SINGLE_USER`:
	// A secure cluster that can only be exclusively used by a single user
	// specified in `single_user_name`. Most programming languages, cluster
	// features and data governance features are available in this mode. *
	// `USER_ISOLATION`: A secure cluster that can be shared by multiple users.
	// Cluster users are fully isolated so that they cannot see each other's
	// data and credentials. Most data governance features are supported in this
	// mode. But programming languages and cluster features might be limited.
	//
	// The following modes are deprecated starting with Databricks Runtime 15.0
	// and will be removed for future Databricks Runtime versions:
	//
	// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table
	// ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating
	// from legacy Passthrough on high concurrency clusters. *
	// `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
	// Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This
	// mode provides a way that doesn’t have UC nor passthrough enabled.
	DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"`

	DockerImage *DockerImage `json:"docker_image,omitempty"`
	// The optional ID of the instance pool for the driver of the cluster
	// belongs. The pool cluster uses the instance pool with id
	// (instance_pool_id) if the driver pool is not assigned.
	DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"`
	// The node type of the Spark driver. Note that this field is optional; if
	// unset, the driver node type will be set as the same value as
	// `node_type_id` defined above.
	DriverNodeTypeId string `json:"driver_node_type_id,omitempty"`
	// Autoscaling Local Storage: when enabled, this cluster will dynamically
	// acquire additional disk space when its Spark workers are running low on
	// disk space. This feature requires specific AWS permissions to function
	// correctly - refer to the User Guide for more details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Whether to enable LUKS on cluster VMs' local disks
	EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
	// Attributes related to clusters running on Google Cloud Platform. If not
	// specified at cluster creation, a set of default values will be used.
	GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"`
	// The configuration for storing init scripts. Any number of destinations
	// can be specified. The scripts are executed sequentially in the order
	// provided. If `cluster_log_conf` is specified, init script logs are sent
	// to `<destination>/<cluster-ID>/init_scripts`.
	InitScripts []InitScriptInfo `json:"init_scripts,omitempty"`
	// The optional ID of the instance pool to which the cluster belongs.
	InstancePoolId string `json:"instance_pool_id,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id,omitempty"`
	// Number of worker nodes that this cluster should have. A cluster has one
	// Spark Driver and `num_workers` Executors for a total of `num_workers` + 1
	// Spark nodes.
	//
	// Note: When reading the properties of a cluster, this field reflects the
	// desired number of workers rather than the actual current number of
	// workers. For instance, if a cluster is resized from 5 to 10 workers, this
	// field will immediately be updated to reflect the target size of 10
	// workers, whereas the workers listed in `spark_info` will gradually
	// increase from 5 to 10 as the new nodes are provisioned.
	NumWorkers int `json:"num_workers,omitempty"`
	// The ID of the cluster policy used to create the cluster if applicable.
	PolicyId string `json:"policy_id,omitempty"`
	// Decides which runtime engine to be use, e.g. Standard vs. Photon. If
	// unspecified, the runtime engine is inferred from spark_version.
	RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"`
	// Single user name if data_security_mode is `SINGLE_USER`
	SingleUserName string `json:"single_user_name,omitempty"`
	// An object containing a set of optional, user-specified Spark
	// configuration key-value pairs. Users can also pass in a string of extra
	// JVM options to the driver and the executors via
	// `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`
	// respectively.
	SparkConf map[string]string `json:"spark_conf,omitempty"`
	// An object containing a set of optional, user-specified environment
	// variable key-value pairs. Please note that key-value pair of the form
	// (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the
	// driver and workers.
	//
	// In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we
	// recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the
	// example below. This ensures that all default databricks managed
	// environmental variables are included as well.
	//
	// Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m",
	// "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS":
	// "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}`
	SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
	// The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of
	// available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	SparkVersion string `json:"spark_version,omitempty"`
	// SSH public key contents that will be added to each Spark node in this
	// cluster. The corresponding private keys can be used to login with the
	// user name `ubuntu` on port `2200`. Up to 10 keys can be specified.
	SshPublicKeys []string `json:"ssh_public_keys,omitempty"`

	WorkloadType *WorkloadType `json:"workload_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ClusterSpec) MarshalJSON added in v0.23.0

func (s ClusterSpec) MarshalJSON() ([]byte, error)

func (*ClusterSpec) UnmarshalJSON added in v0.23.0

func (s *ClusterSpec) UnmarshalJSON(b []byte) error

type ClusterStatus added in v0.39.0

type ClusterStatus struct {
	// Unique identifier of the cluster whose status should be retrieved.
	ClusterId string `json:"-" url:"cluster_id"`
}

Get status

type ClustersAPI

type ClustersAPI struct {
	// contains filtered or unexported fields
}

The Clusters API allows you to create, start, edit, list, terminate, and delete clusters.

Databricks maps cluster node instance types to compute units known as DBUs. See the instance type pricing page for a list of the supported instance types and their corresponding DBUs.

A Databricks cluster is a set of computation resources and configurations on which you run data engineering, data science, and data analytics workloads, such as production ETL pipelines, streaming analytics, ad-hoc analytics, and machine learning.

You run these workloads as a set of commands in a notebook or as an automated job. Databricks makes a distinction between all-purpose clusters and job clusters. You use all-purpose clusters to analyze data collaboratively using interactive notebooks. You use job clusters to run fast and robust automated jobs.

You can create an all-purpose cluster using the UI, CLI, or REST API. You can manually terminate and restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive analysis.

IMPORTANT: Databricks retains cluster configuration information for up to 200 all-purpose clusters terminated in the last 30 days and up to 30 job clusters recently terminated by the job scheduler. To keep an all-purpose cluster configuration even after it has been terminated for more than 30 days, an administrator can pin a cluster to the cluster list.

Example
w := databricks.Must(databricks.NewWorkspaceClient())
ctx := context.Background()

// Fetch list of spark runtime versions
sparkVersions, err := w.Clusters.SparkVersions(ctx)
if err != nil {
	panic(err)
}

// Select the latest LTS version
latestLTS, err := sparkVersions.Select(compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}

// Fetch list of available node types
nodeTypes, err := w.Clusters.ListNodeTypes(ctx)
if err != nil {
	panic(err)
}

// Select the smallest node type id
smallestWithDisk, err := nodeTypes.Smallest(compute.NodeTypeRequest{
	LocalDisk: true,
})
if err != nil {
	panic(err)
}

// Create cluster and wait until it's ready to use
runningCluster, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            "Test cluster from SDK",
	SparkVersion:           latestLTS,
	NodeTypeId:             smallestWithDisk,
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}

fmt.Printf("Cluster is ready: %s#setting/clusters/%s/configuration\n",
	w.Config.Host, runningCluster.ClusterId)
Output:

func NewClusters

func NewClusters(client *client.DatabricksClient) *ClustersAPI

func (*ClustersAPI) ChangeOwner

func (a *ClustersAPI) ChangeOwner(ctx context.Context, request ChangeClusterOwner) error

Change cluster owner.

Change the owner of the cluster. You must be an admin and the cluster must be terminated to perform this operation. The service principal application ID can be supplied as an argument to `owner_username`.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

otherOwner, err := w.Users.Create(ctx, iam.User{
	UserName: fmt.Sprintf("sdk-%x@example.com", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", otherOwner)

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

err = w.Clusters.ChangeOwner(ctx, compute.ChangeClusterOwner{
	ClusterId:     clstr.ClusterId,
	OwnerUsername: otherOwner.UserName,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Users.DeleteById(ctx, otherOwner.Id)
if err != nil {
	panic(err)
}
err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) ClusterDetailsClusterNameToClusterIdMap added in v0.11.0

func (a *ClustersAPI) ClusterDetailsClusterNameToClusterIdMap(ctx context.Context, request ListClustersRequest) (map[string]string, error)

ClusterDetailsClusterNameToClusterIdMap calls ClustersAPI.ListAll and creates a map of results with ClusterDetails.ClusterName as key and ClusterDetails.ClusterId as value.

Returns an error if there's more than one ClusterDetails with the same .ClusterName.

Note: All ClusterDetails instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*ClustersAPI) Create

Create new cluster.

Creates a new Spark cluster. This method will acquire new instances from the cloud provider if necessary. Note: Databricks may not be able to acquire some of the requested nodes, due to cloud provider limitations (account limits, spot price, etc.) or transient network issues.

If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed. Otherwise the cluster will terminate with an informative error message.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) CreateAndWait deprecated

func (a *ClustersAPI) CreateAndWait(ctx context.Context, createCluster CreateCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

Calls ClustersAPI.Create and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[ClusterDetails](60*time.Minute) functional option.

Deprecated: use ClustersAPI.Create.Get() or ClustersAPI.WaitGetClusterRunning

func (*ClustersAPI) Delete

func (a *ClustersAPI) Delete(ctx context.Context, deleteCluster DeleteCluster) (*WaitGetClusterTerminated[struct{}], error)

Terminate cluster.

Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. Once the termination has completed, the cluster will be in a `TERMINATED` state. If the cluster is already in a `TERMINATING` or `TERMINATED` state, nothing will happen.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

_, err = w.Clusters.DeleteByClusterIdAndWait(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) DeleteAndWait deprecated

func (a *ClustersAPI) DeleteAndWait(ctx context.Context, deleteCluster DeleteCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

Calls ClustersAPI.Delete and waits to reach TERMINATED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[ClusterDetails](60*time.Minute) functional option.

Deprecated: use ClustersAPI.Delete.Get() or ClustersAPI.WaitGetClusterTerminated

func (*ClustersAPI) DeleteByClusterId

func (a *ClustersAPI) DeleteByClusterId(ctx context.Context, clusterId string) error

Terminate cluster.

Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. Once the termination has completed, the cluster will be in a `TERMINATED` state. If the cluster is already in a `TERMINATING` or `TERMINATED` state, nothing will happen.

func (*ClustersAPI) DeleteByClusterIdAndWait

func (a *ClustersAPI) DeleteByClusterIdAndWait(ctx context.Context, clusterId string, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

func (*ClustersAPI) Edit

func (a *ClustersAPI) Edit(ctx context.Context, editCluster EditCluster) (*WaitGetClusterRunning[struct{}], error)

Update cluster configuration.

Updates the configuration of a cluster to match the provided attributes and size. A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state.

If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes can take effect.

If a cluster is updated while in a `TERMINATED` state, it will remain `TERMINATED`. The next time it is started using the `clusters/start` API, the new attributes will take effect. Any attempt to update a cluster in any other state will be rejected with an `INVALID_STATE` error code.

Clusters created by the Databricks Jobs service cannot be edited.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

_, err = w.Clusters.EditAndWait(ctx, compute.EditCluster{
	ClusterId:              clstr.ClusterId,
	SparkVersion:           latest,
	ClusterName:            clusterName,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 10,
	NumWorkers:             2,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) EditAndWait deprecated

func (a *ClustersAPI) EditAndWait(ctx context.Context, editCluster EditCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

Calls ClustersAPI.Edit and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[ClusterDetails](60*time.Minute) functional option.

Deprecated: use ClustersAPI.Edit.Get() or ClustersAPI.WaitGetClusterRunning

func (*ClustersAPI) EnsureClusterIsRunning added in v0.9.0

func (a *ClustersAPI) EnsureClusterIsRunning(ctx context.Context, clusterId string) error
Example (CommandsDirectUsage)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

clusterId := os.Getenv("TEST_DEFAULT_CLUSTER_ID")

context, err := w.CommandExecution.CreateAndWait(ctx, compute.CreateContext{
	ClusterId: clusterId,
	Language:  compute.LanguagePython,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", context)

err = w.Clusters.EnsureClusterIsRunning(ctx, clusterId)
if err != nil {
	panic(err)
}

// cleanup

err = w.CommandExecution.Destroy(ctx, compute.DestroyContext{
	ClusterId: clusterId,
	ContextId: context.Id,
})
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) Events added in v0.24.0

List cluster activity events.

Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more events to read, the response includes all the nparameters necessary to request the next page of events.

This method is generated by Databricks SDK Code Generator.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

events, err := w.Clusters.EventsAll(ctx, compute.GetEvents{
	ClusterId: clstr.ClusterId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", events)

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) EventsAll

func (a *ClustersAPI) EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error)

List cluster activity events.

Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more events to read, the response includes all the nparameters necessary to request the next page of events.

This method is generated by Databricks SDK Code Generator.

func (*ClustersAPI) Get

Get cluster info.

Retrieves the information for a cluster given its identifier. Clusters can be described while they are running, or up to 60 days after they are terminated.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

byId, err := w.Clusters.GetByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) GetByClusterId

func (a *ClustersAPI) GetByClusterId(ctx context.Context, clusterId string) (*ClusterDetails, error)

Get cluster info.

Retrieves the information for a cluster given its identifier. Clusters can be described while they are running, or up to 60 days after they are terminated.

func (*ClustersAPI) GetByClusterName

func (a *ClustersAPI) GetByClusterName(ctx context.Context, name string) (*ClusterDetails, error)

GetByClusterName calls ClustersAPI.ClusterDetailsClusterNameToClusterIdMap and returns a single ClusterDetails.

Returns an error if there's more than one ClusterDetails with the same .ClusterName.

Note: All ClusterDetails instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*ClustersAPI) GetOrCreateRunningCluster

func (a *ClustersAPI) GetOrCreateRunningCluster(ctx context.Context, name string, custom ...CreateCluster) (c *ClusterDetails, err error)

GetOrCreateRunningCluster creates an autoterminating cluster if it doesn't exist

func (*ClustersAPI) GetPermissionLevels added in v0.19.0

Get cluster permission levels.

Gets the permission levels that a user can have on an object.

func (*ClustersAPI) GetPermissionLevelsByClusterId added in v0.19.0

func (a *ClustersAPI) GetPermissionLevelsByClusterId(ctx context.Context, clusterId string) (*GetClusterPermissionLevelsResponse, error)

Get cluster permission levels.

Gets the permission levels that a user can have on an object.

func (*ClustersAPI) GetPermissions added in v0.19.0

Get cluster permissions.

Gets the permissions of a cluster. Clusters can inherit permissions from their root object.

func (*ClustersAPI) GetPermissionsByClusterId added in v0.19.0

func (a *ClustersAPI) GetPermissionsByClusterId(ctx context.Context, clusterId string) (*ClusterPermissions, error)

Get cluster permissions.

Gets the permissions of a cluster. Clusters can inherit permissions from their root object.

func (*ClustersAPI) Impl

func (a *ClustersAPI) Impl() ClustersService

Impl returns low-level Clusters API implementation Deprecated: use MockClustersInterface instead.

func (*ClustersAPI) List added in v0.24.0

List all clusters.

Return information about all pinned clusters, active clusters, up to 200 of the most recently terminated all-purpose clusters in the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days.

For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated all-purpose clusters in the past 30 days, and 50 terminated job clusters in the past 30 days, then this API returns the 1 pinned cluster, 4 active clusters, all 45 terminated all-purpose clusters, and the 30 most recently terminated job clusters.

This method is generated by Databricks SDK Code Generator.

func (*ClustersAPI) ListAll

func (a *ClustersAPI) ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error)

List all clusters.

Return information about all pinned clusters, active clusters, up to 200 of the most recently terminated all-purpose clusters in the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days.

For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated all-purpose clusters in the past 30 days, and 50 terminated job clusters in the past 30 days, then this API returns the 1 pinned cluster, 4 active clusters, all 45 terminated all-purpose clusters, and the 30 most recently terminated job clusters.

This method is generated by Databricks SDK Code Generator.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*ClustersAPI) ListByCanUseClient

func (a *ClustersAPI) ListByCanUseClient(ctx context.Context, canUseClient string) (*ListClustersResponse, error)

List all clusters.

Return information about all pinned clusters, active clusters, up to 200 of the most recently terminated all-purpose clusters in the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days.

For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated all-purpose clusters in the past 30 days, and 50 terminated job clusters in the past 30 days, then this API returns the 1 pinned cluster, 4 active clusters, all 45 terminated all-purpose clusters, and the 30 most recently terminated job clusters.

func (*ClustersAPI) ListNodeTypes

func (a *ClustersAPI) ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error)

List node types.

Returns a list of supported Spark node types. These node types can be used to launch a cluster.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

nodes, err := w.Clusters.ListNodeTypes(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", nodes)
Output:

func (*ClustersAPI) ListZones

List availability zones.

Returns a list of availability zones where clusters can be created in (For example, us-west-2a). These zones can be used to launch a cluster.

func (*ClustersAPI) PermanentDelete

func (a *ClustersAPI) PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error

Permanently delete cluster.

Permanently deletes a Spark cluster. This cluster is terminated and resources are asynchronously removed.

In addition, users will no longer see permanently deleted clusters in the cluster list, and API users can no longer perform any action on permanently deleted clusters.

func (*ClustersAPI) PermanentDeleteByClusterId

func (a *ClustersAPI) PermanentDeleteByClusterId(ctx context.Context, clusterId string) error

Permanently delete cluster.

Permanently deletes a Spark cluster. This cluster is terminated and resources are asynchronously removed.

In addition, users will no longer see permanently deleted clusters in the cluster list, and API users can no longer perform any action on permanently deleted clusters.

func (*ClustersAPI) Pin

func (a *ClustersAPI) Pin(ctx context.Context, request PinCluster) error

Pin cluster.

Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. Pinning a cluster that is already pinned will have no effect. This API can only be called by workspace admins.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

err = w.Clusters.PinByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) PinByClusterId

func (a *ClustersAPI) PinByClusterId(ctx context.Context, clusterId string) error

Pin cluster.

Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. Pinning a cluster that is already pinned will have no effect. This API can only be called by workspace admins.

func (*ClustersAPI) Resize

func (a *ClustersAPI) Resize(ctx context.Context, resizeCluster ResizeCluster) (*WaitGetClusterRunning[struct{}], error)

Resize cluster.

Resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a `RUNNING` state.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

byId, err := w.Clusters.ResizeAndWait(ctx, compute.ResizeCluster{
	ClusterId:  clstr.ClusterId,
	NumWorkers: 1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) ResizeAndWait deprecated

func (a *ClustersAPI) ResizeAndWait(ctx context.Context, resizeCluster ResizeCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

Calls ClustersAPI.Resize and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[ClusterDetails](60*time.Minute) functional option.

Deprecated: use ClustersAPI.Resize.Get() or ClustersAPI.WaitGetClusterRunning

func (*ClustersAPI) Restart

func (a *ClustersAPI) Restart(ctx context.Context, restartCluster RestartCluster) (*WaitGetClusterRunning[struct{}], error)

Restart cluster.

Restarts a Spark cluster with the supplied ID. If the cluster is not currently in a `RUNNING` state, nothing will happen.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

_, err = w.Clusters.RestartAndWait(ctx, compute.RestartCluster{
	ClusterId: clstr.ClusterId,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) RestartAndWait deprecated

func (a *ClustersAPI) RestartAndWait(ctx context.Context, restartCluster RestartCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

Calls ClustersAPI.Restart and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[ClusterDetails](60*time.Minute) functional option.

Deprecated: use ClustersAPI.Restart.Get() or ClustersAPI.WaitGetClusterRunning

func (*ClustersAPI) SelectNodeType added in v0.9.0

func (a *ClustersAPI) SelectNodeType(ctx context.Context, r NodeTypeRequest) (string, error)
Example (InstancePools)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

smallest, err := w.Clusters.SelectNodeType(ctx, compute.NodeTypeRequest{
	LocalDisk: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", smallest)
Output:

func (*ClustersAPI) SelectSparkVersion added in v0.9.0

func (a *ClustersAPI) SelectSparkVersion(ctx context.Context, r SparkVersionRequest) (string, error)
Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)
Output:

func (*ClustersAPI) SetPermissions added in v0.19.0

func (a *ClustersAPI) SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)

Set cluster permissions.

Sets permissions on a cluster. Clusters can inherit permissions from their root object.

func (*ClustersAPI) SparkVersions

func (a *ClustersAPI) SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error)

List available Spark versions.

Returns the list of available Spark versions. These versions can be used to launch a cluster.

func (*ClustersAPI) Start

func (a *ClustersAPI) Start(ctx context.Context, startCluster StartCluster) (*WaitGetClusterRunning[struct{}], error)

Start terminated cluster.

Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:

* The previous cluster id and attributes are preserved. * The cluster starts with the last specified cluster size. * If the previous cluster was an autoscaling cluster, the current cluster starts with the minimum number of nodes. * If the cluster is not currently in a `TERMINATED` state, nothing will happen. * Clusters launched to run a job cannot be started.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

_, err = w.Clusters.StartByClusterIdAndWait(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) StartAndWait deprecated

func (a *ClustersAPI) StartAndWait(ctx context.Context, startCluster StartCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

Calls ClustersAPI.Start and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[ClusterDetails](60*time.Minute) functional option.

Deprecated: use ClustersAPI.Start.Get() or ClustersAPI.WaitGetClusterRunning

func (*ClustersAPI) StartByClusterId

func (a *ClustersAPI) StartByClusterId(ctx context.Context, clusterId string) error

Start terminated cluster.

Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:

* The previous cluster id and attributes are preserved. * The cluster starts with the last specified cluster size. * If the previous cluster was an autoscaling cluster, the current cluster starts with the minimum number of nodes. * If the cluster is not currently in a `TERMINATED` state, nothing will happen. * Clusters launched to run a job cannot be started.

func (*ClustersAPI) StartByClusterIdAndWait

func (a *ClustersAPI) StartByClusterIdAndWait(ctx context.Context, clusterId string, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

func (*ClustersAPI) Unpin

func (a *ClustersAPI) Unpin(ctx context.Context, request UnpinCluster) error

Unpin cluster.

Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API. Unpinning a cluster that is not pinned will have no effect. This API can only be called by workspace admins.

Example (ClustersApiIntegration)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
	Latest:          true,
	LongTermSupport: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", latest)

clusterName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
	ClusterName:            clusterName,
	SparkVersion:           latest,
	InstancePoolId:         os.Getenv("TEST_INSTANCE_POOL_ID"),
	AutoterminationMinutes: 15,
	NumWorkers:             1,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", clstr)

err = w.Clusters.UnpinByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}

// cleanup

err = w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
if err != nil {
	panic(err)
}
Output:

func (*ClustersAPI) UnpinByClusterId

func (a *ClustersAPI) UnpinByClusterId(ctx context.Context, clusterId string) error

Unpin cluster.

Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API. Unpinning a cluster that is not pinned will have no effect. This API can only be called by workspace admins.

func (*ClustersAPI) UpdatePermissions added in v0.19.0

func (a *ClustersAPI) UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)

Update cluster permissions.

Updates the permissions on a cluster. Clusters can inherit permissions from their root object.

func (*ClustersAPI) WaitGetClusterRunning added in v0.10.0

func (a *ClustersAPI) WaitGetClusterRunning(ctx context.Context, clusterId string,
	timeout time.Duration, callback func(*ClusterDetails)) (*ClusterDetails, error)

WaitGetClusterRunning repeatedly calls ClustersAPI.Get and waits to reach RUNNING state

func (*ClustersAPI) WaitGetClusterTerminated added in v0.10.0

func (a *ClustersAPI) WaitGetClusterTerminated(ctx context.Context, clusterId string,
	timeout time.Duration, callback func(*ClusterDetails)) (*ClusterDetails, error)

WaitGetClusterTerminated repeatedly calls ClustersAPI.Get and waits to reach TERMINATED state

func (*ClustersAPI) WithImpl

func (a *ClustersAPI) WithImpl(impl ClustersService) ClustersInterface

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockClustersInterface instead.

type ClustersInterface added in v0.29.0

type ClustersInterface interface {

	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockClustersInterface instead.
	WithImpl(impl ClustersService) ClustersInterface

	// Impl returns low-level Clusters API implementation
	// Deprecated: use MockClustersInterface instead.
	Impl() ClustersService

	// WaitGetClusterRunning repeatedly calls [ClustersAPI.Get] and waits to reach RUNNING state
	WaitGetClusterRunning(ctx context.Context, clusterId string,
		timeout time.Duration, callback func(*ClusterDetails)) (*ClusterDetails, error)

	// WaitGetClusterTerminated repeatedly calls [ClustersAPI.Get] and waits to reach TERMINATED state
	WaitGetClusterTerminated(ctx context.Context, clusterId string,
		timeout time.Duration, callback func(*ClusterDetails)) (*ClusterDetails, error)

	// Change cluster owner.
	//
	// Change the owner of the cluster. You must be an admin and the cluster must be
	// terminated to perform this operation. The service principal application ID
	// can be supplied as an argument to `owner_username`.
	ChangeOwner(ctx context.Context, request ChangeClusterOwner) error

	// Create new cluster.
	//
	// Creates a new Spark cluster. This method will acquire new instances from the
	// cloud provider if necessary. Note: Databricks may not be able to acquire some
	// of the requested nodes, due to cloud provider limitations (account limits,
	// spot price, etc.) or transient network issues.
	//
	// If Databricks acquires at least 85% of the requested on-demand nodes, cluster
	// creation will succeed. Otherwise the cluster will terminate with an
	// informative error message.
	Create(ctx context.Context, createCluster CreateCluster) (*WaitGetClusterRunning[CreateClusterResponse], error)

	// Calls [ClustersAPIInterface.Create] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[ClusterDetails](60*time.Minute) functional option.
	//
	// Deprecated: use [ClustersAPIInterface.Create].Get() or [ClustersAPIInterface.WaitGetClusterRunning]
	CreateAndWait(ctx context.Context, createCluster CreateCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// Terminate cluster.
	//
	// Terminates the Spark cluster with the specified ID. The cluster is removed
	// asynchronously. Once the termination has completed, the cluster will be in a
	// `TERMINATED` state. If the cluster is already in a `TERMINATING` or
	// `TERMINATED` state, nothing will happen.
	Delete(ctx context.Context, deleteCluster DeleteCluster) (*WaitGetClusterTerminated[struct{}], error)

	// Calls [ClustersAPIInterface.Delete] and waits to reach TERMINATED state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[ClusterDetails](60*time.Minute) functional option.
	//
	// Deprecated: use [ClustersAPIInterface.Delete].Get() or [ClustersAPIInterface.WaitGetClusterTerminated]
	DeleteAndWait(ctx context.Context, deleteCluster DeleteCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// Terminate cluster.
	//
	// Terminates the Spark cluster with the specified ID. The cluster is removed
	// asynchronously. Once the termination has completed, the cluster will be in a
	// `TERMINATED` state. If the cluster is already in a `TERMINATING` or
	// `TERMINATED` state, nothing will happen.
	DeleteByClusterId(ctx context.Context, clusterId string) error

	DeleteByClusterIdAndWait(ctx context.Context, clusterId string, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// Update cluster configuration.
	//
	// Updates the configuration of a cluster to match the provided attributes and
	// size. A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state.
	//
	// If a cluster is updated while in a `RUNNING` state, it will be restarted so
	// that the new attributes can take effect.
	//
	// If a cluster is updated while in a `TERMINATED` state, it will remain
	// `TERMINATED`. The next time it is started using the `clusters/start` API, the
	// new attributes will take effect. Any attempt to update a cluster in any other
	// state will be rejected with an `INVALID_STATE` error code.
	//
	// Clusters created by the Databricks Jobs service cannot be edited.
	Edit(ctx context.Context, editCluster EditCluster) (*WaitGetClusterRunning[struct{}], error)

	// Calls [ClustersAPIInterface.Edit] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[ClusterDetails](60*time.Minute) functional option.
	//
	// Deprecated: use [ClustersAPIInterface.Edit].Get() or [ClustersAPIInterface.WaitGetClusterRunning]
	EditAndWait(ctx context.Context, editCluster EditCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// List cluster activity events.
	//
	// Retrieves a list of events about the activity of a cluster. This API is
	// paginated. If there are more events to read, the response includes all the
	// nparameters necessary to request the next page of events.
	//
	// This method is generated by Databricks SDK Code Generator.
	Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent]

	// List cluster activity events.
	//
	// Retrieves a list of events about the activity of a cluster. This API is
	// paginated. If there are more events to read, the response includes all the
	// nparameters necessary to request the next page of events.
	//
	// This method is generated by Databricks SDK Code Generator.
	EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error)

	// Get cluster info.
	//
	// Retrieves the information for a cluster given its identifier. Clusters can be
	// described while they are running, or up to 60 days after they are terminated.
	Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error)

	// Get cluster info.
	//
	// Retrieves the information for a cluster given its identifier. Clusters can be
	// described while they are running, or up to 60 days after they are terminated.
	GetByClusterId(ctx context.Context, clusterId string) (*ClusterDetails, error)

	// Get cluster permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error)

	// Get cluster permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevelsByClusterId(ctx context.Context, clusterId string) (*GetClusterPermissionLevelsResponse, error)

	// Get cluster permissions.
	//
	// Gets the permissions of a cluster. Clusters can inherit permissions from
	// their root object.
	GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error)

	// Get cluster permissions.
	//
	// Gets the permissions of a cluster. Clusters can inherit permissions from
	// their root object.
	GetPermissionsByClusterId(ctx context.Context, clusterId string) (*ClusterPermissions, error)

	// List all clusters.
	//
	// Return information about all pinned clusters, active clusters, up to 200 of
	// the most recently terminated all-purpose clusters in the past 30 days, and up
	// to 30 of the most recently terminated job clusters in the past 30 days.
	//
	// For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated
	// all-purpose clusters in the past 30 days, and 50 terminated job clusters in
	// the past 30 days, then this API returns the 1 pinned cluster, 4 active
	// clusters, all 45 terminated all-purpose clusters, and the 30 most recently
	// terminated job clusters.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails]

	// List all clusters.
	//
	// Return information about all pinned clusters, active clusters, up to 200 of
	// the most recently terminated all-purpose clusters in the past 30 days, and up
	// to 30 of the most recently terminated job clusters in the past 30 days.
	//
	// For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated
	// all-purpose clusters in the past 30 days, and 50 terminated job clusters in
	// the past 30 days, then this API returns the 1 pinned cluster, 4 active
	// clusters, all 45 terminated all-purpose clusters, and the 30 most recently
	// terminated job clusters.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error)

	// ClusterDetailsClusterNameToClusterIdMap calls [ClustersAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value.
	//
	// Returns an error if there's more than one [ClusterDetails] with the same .ClusterName.
	//
	// Note: All [ClusterDetails] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	ClusterDetailsClusterNameToClusterIdMap(ctx context.Context, request ListClustersRequest) (map[string]string, error)

	// GetByClusterName calls [ClustersAPI.ClusterDetailsClusterNameToClusterIdMap] and returns a single [ClusterDetails].
	//
	// Returns an error if there's more than one [ClusterDetails] with the same .ClusterName.
	//
	// Note: All [ClusterDetails] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByClusterName(ctx context.Context, name string) (*ClusterDetails, error)

	// List all clusters.
	//
	// Return information about all pinned clusters, active clusters, up to 200 of
	// the most recently terminated all-purpose clusters in the past 30 days, and up
	// to 30 of the most recently terminated job clusters in the past 30 days.
	//
	// For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated
	// all-purpose clusters in the past 30 days, and 50 terminated job clusters in
	// the past 30 days, then this API returns the 1 pinned cluster, 4 active
	// clusters, all 45 terminated all-purpose clusters, and the 30 most recently
	// terminated job clusters.
	ListByCanUseClient(ctx context.Context, canUseClient string) (*ListClustersResponse, error)

	// List node types.
	//
	// Returns a list of supported Spark node types. These node types can be used to
	// launch a cluster.
	ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error)

	// List availability zones.
	//
	// Returns a list of availability zones where clusters can be created in (For
	// example, us-west-2a). These zones can be used to launch a cluster.
	ListZones(ctx context.Context) (*ListAvailableZonesResponse, error)

	// Permanently delete cluster.
	//
	// Permanently deletes a Spark cluster. This cluster is terminated and resources
	// are asynchronously removed.
	//
	// In addition, users will no longer see permanently deleted clusters in the
	// cluster list, and API users can no longer perform any action on permanently
	// deleted clusters.
	PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error

	// Permanently delete cluster.
	//
	// Permanently deletes a Spark cluster. This cluster is terminated and resources
	// are asynchronously removed.
	//
	// In addition, users will no longer see permanently deleted clusters in the
	// cluster list, and API users can no longer perform any action on permanently
	// deleted clusters.
	PermanentDeleteByClusterId(ctx context.Context, clusterId string) error

	// Pin cluster.
	//
	// Pinning a cluster ensures that the cluster will always be returned by the
	// ListClusters API. Pinning a cluster that is already pinned will have no
	// effect. This API can only be called by workspace admins.
	Pin(ctx context.Context, request PinCluster) error

	// Pin cluster.
	//
	// Pinning a cluster ensures that the cluster will always be returned by the
	// ListClusters API. Pinning a cluster that is already pinned will have no
	// effect. This API can only be called by workspace admins.
	PinByClusterId(ctx context.Context, clusterId string) error

	// Resize cluster.
	//
	// Resizes a cluster to have a desired number of workers. This will fail unless
	// the cluster is in a `RUNNING` state.
	Resize(ctx context.Context, resizeCluster ResizeCluster) (*WaitGetClusterRunning[struct{}], error)

	// Calls [ClustersAPIInterface.Resize] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[ClusterDetails](60*time.Minute) functional option.
	//
	// Deprecated: use [ClustersAPIInterface.Resize].Get() or [ClustersAPIInterface.WaitGetClusterRunning]
	ResizeAndWait(ctx context.Context, resizeCluster ResizeCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// Restart cluster.
	//
	// Restarts a Spark cluster with the supplied ID. If the cluster is not
	// currently in a `RUNNING` state, nothing will happen.
	Restart(ctx context.Context, restartCluster RestartCluster) (*WaitGetClusterRunning[struct{}], error)

	// Calls [ClustersAPIInterface.Restart] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[ClusterDetails](60*time.Minute) functional option.
	//
	// Deprecated: use [ClustersAPIInterface.Restart].Get() or [ClustersAPIInterface.WaitGetClusterRunning]
	RestartAndWait(ctx context.Context, restartCluster RestartCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// Set cluster permissions.
	//
	// Sets permissions on a cluster. Clusters can inherit permissions from their
	// root object.
	SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)

	// List available Spark versions.
	//
	// Returns the list of available Spark versions. These versions can be used to
	// launch a cluster.
	SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error)

	// Start terminated cluster.
	//
	// Starts a terminated Spark cluster with the supplied ID. This works similar to
	// `createCluster` except:
	//
	// * The previous cluster id and attributes are preserved. * The cluster starts
	// with the last specified cluster size. * If the previous cluster was an
	// autoscaling cluster, the current cluster starts with the minimum number of
	// nodes. * If the cluster is not currently in a `TERMINATED` state, nothing
	// will happen. * Clusters launched to run a job cannot be started.
	Start(ctx context.Context, startCluster StartCluster) (*WaitGetClusterRunning[struct{}], error)

	// Calls [ClustersAPIInterface.Start] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[ClusterDetails](60*time.Minute) functional option.
	//
	// Deprecated: use [ClustersAPIInterface.Start].Get() or [ClustersAPIInterface.WaitGetClusterRunning]
	StartAndWait(ctx context.Context, startCluster StartCluster, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// Start terminated cluster.
	//
	// Starts a terminated Spark cluster with the supplied ID. This works similar to
	// `createCluster` except:
	//
	// * The previous cluster id and attributes are preserved. * The cluster starts
	// with the last specified cluster size. * If the previous cluster was an
	// autoscaling cluster, the current cluster starts with the minimum number of
	// nodes. * If the cluster is not currently in a `TERMINATED` state, nothing
	// will happen. * Clusters launched to run a job cannot be started.
	StartByClusterId(ctx context.Context, clusterId string) error

	StartByClusterIdAndWait(ctx context.Context, clusterId string, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error)

	// Unpin cluster.
	//
	// Unpinning a cluster will allow the cluster to eventually be removed from the
	// ListClusters API. Unpinning a cluster that is not pinned will have no effect.
	// This API can only be called by workspace admins.
	Unpin(ctx context.Context, request UnpinCluster) error

	// Unpin cluster.
	//
	// Unpinning a cluster will allow the cluster to eventually be removed from the
	// ListClusters API. Unpinning a cluster that is not pinned will have no effect.
	// This API can only be called by workspace admins.
	UnpinByClusterId(ctx context.Context, clusterId string) error

	// Update cluster permissions.
	//
	// Updates the permissions on a cluster. Clusters can inherit permissions from
	// their root object.
	UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)
	// contains filtered or unexported methods
}

type ClustersService

type ClustersService interface {

	// Change cluster owner.
	//
	// Change the owner of the cluster. You must be an admin and the cluster
	// must be terminated to perform this operation. The service principal
	// application ID can be supplied as an argument to `owner_username`.
	ChangeOwner(ctx context.Context, request ChangeClusterOwner) error

	// Create new cluster.
	//
	// Creates a new Spark cluster. This method will acquire new instances from
	// the cloud provider if necessary. Note: Databricks may not be able to
	// acquire some of the requested nodes, due to cloud provider limitations
	// (account limits, spot price, etc.) or transient network issues.
	//
	// If Databricks acquires at least 85% of the requested on-demand nodes,
	// cluster creation will succeed. Otherwise the cluster will terminate with
	// an informative error message.
	Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error)

	// Terminate cluster.
	//
	// Terminates the Spark cluster with the specified ID. The cluster is
	// removed asynchronously. Once the termination has completed, the cluster
	// will be in a `TERMINATED` state. If the cluster is already in a
	// `TERMINATING` or `TERMINATED` state, nothing will happen.
	Delete(ctx context.Context, request DeleteCluster) error

	// Update cluster configuration.
	//
	// Updates the configuration of a cluster to match the provided attributes
	// and size. A cluster can be updated if it is in a `RUNNING` or
	// `TERMINATED` state.
	//
	// If a cluster is updated while in a `RUNNING` state, it will be restarted
	// so that the new attributes can take effect.
	//
	// If a cluster is updated while in a `TERMINATED` state, it will remain
	// `TERMINATED`. The next time it is started using the `clusters/start` API,
	// the new attributes will take effect. Any attempt to update a cluster in
	// any other state will be rejected with an `INVALID_STATE` error code.
	//
	// Clusters created by the Databricks Jobs service cannot be edited.
	Edit(ctx context.Context, request EditCluster) error

	// List cluster activity events.
	//
	// Retrieves a list of events about the activity of a cluster. This API is
	// paginated. If there are more events to read, the response includes all
	// the nparameters necessary to request the next page of events.
	//
	// Use EventsAll() to get all ClusterEvent instances, which will iterate over every result page.
	Events(ctx context.Context, request GetEvents) (*GetEventsResponse, error)

	// Get cluster info.
	//
	// Retrieves the information for a cluster given its identifier. Clusters
	// can be described while they are running, or up to 60 days after they are
	// terminated.
	Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error)

	// Get cluster permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error)

	// Get cluster permissions.
	//
	// Gets the permissions of a cluster. Clusters can inherit permissions from
	// their root object.
	GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error)

	// List all clusters.
	//
	// Return information about all pinned clusters, active clusters, up to 200
	// of the most recently terminated all-purpose clusters in the past 30 days,
	// and up to 30 of the most recently terminated job clusters in the past 30
	// days.
	//
	// For example, if there is 1 pinned cluster, 4 active clusters, 45
	// terminated all-purpose clusters in the past 30 days, and 50 terminated
	// job clusters in the past 30 days, then this API returns the 1 pinned
	// cluster, 4 active clusters, all 45 terminated all-purpose clusters, and
	// the 30 most recently terminated job clusters.
	//
	// Use ListAll() to get all ClusterDetails instances
	List(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error)

	// List node types.
	//
	// Returns a list of supported Spark node types. These node types can be
	// used to launch a cluster.
	ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error)

	// List availability zones.
	//
	// Returns a list of availability zones where clusters can be created in
	// (For example, us-west-2a). These zones can be used to launch a cluster.
	ListZones(ctx context.Context) (*ListAvailableZonesResponse, error)

	// Permanently delete cluster.
	//
	// Permanently deletes a Spark cluster. This cluster is terminated and
	// resources are asynchronously removed.
	//
	// In addition, users will no longer see permanently deleted clusters in the
	// cluster list, and API users can no longer perform any action on
	// permanently deleted clusters.
	PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error

	// Pin cluster.
	//
	// Pinning a cluster ensures that the cluster will always be returned by the
	// ListClusters API. Pinning a cluster that is already pinned will have no
	// effect. This API can only be called by workspace admins.
	Pin(ctx context.Context, request PinCluster) error

	// Resize cluster.
	//
	// Resizes a cluster to have a desired number of workers. This will fail
	// unless the cluster is in a `RUNNING` state.
	Resize(ctx context.Context, request ResizeCluster) error

	// Restart cluster.
	//
	// Restarts a Spark cluster with the supplied ID. If the cluster is not
	// currently in a `RUNNING` state, nothing will happen.
	Restart(ctx context.Context, request RestartCluster) error

	// Set cluster permissions.
	//
	// Sets permissions on a cluster. Clusters can inherit permissions from
	// their root object.
	SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)

	// List available Spark versions.
	//
	// Returns the list of available Spark versions. These versions can be used
	// to launch a cluster.
	SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error)

	// Start terminated cluster.
	//
	// Starts a terminated Spark cluster with the supplied ID. This works
	// similar to `createCluster` except:
	//
	// * The previous cluster id and attributes are preserved. * The cluster
	// starts with the last specified cluster size. * If the previous cluster
	// was an autoscaling cluster, the current cluster starts with the minimum
	// number of nodes. * If the cluster is not currently in a `TERMINATED`
	// state, nothing will happen. * Clusters launched to run a job cannot be
	// started.
	Start(ctx context.Context, request StartCluster) error

	// Unpin cluster.
	//
	// Unpinning a cluster will allow the cluster to eventually be removed from
	// the ListClusters API. Unpinning a cluster that is not pinned will have no
	// effect. This API can only be called by workspace admins.
	Unpin(ctx context.Context, request UnpinCluster) error

	// Update cluster permissions.
	//
	// Updates the permissions on a cluster. Clusters can inherit permissions
	// from their root object.
	UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error)
}

The Clusters API allows you to create, start, edit, list, terminate, and delete clusters.

Databricks maps cluster node instance types to compute units known as DBUs. See the instance type pricing page for a list of the supported instance types and their corresponding DBUs.

A Databricks cluster is a set of computation resources and configurations on which you run data engineering, data science, and data analytics workloads, such as production ETL pipelines, streaming analytics, ad-hoc analytics, and machine learning.

You run these workloads as a set of commands in a notebook or as an automated job. Databricks makes a distinction between all-purpose clusters and job clusters. You use all-purpose clusters to analyze data collaboratively using interactive notebooks. You use job clusters to run fast and robust automated jobs.

You can create an all-purpose cluster using the UI, CLI, or REST API. You can manually terminate and restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive analysis.

IMPORTANT: Databricks retains cluster configuration information for up to 200 all-purpose clusters terminated in the last 30 days and up to 30 job clusters recently terminated by the job scheduler. To keep an all-purpose cluster configuration even after it has been terminated for more than 30 days, an administrator can pin a cluster to the cluster list.

type Command

type Command struct {
	// Running cluster id
	ClusterId string `json:"clusterId,omitempty"`
	// Executable code
	Command string `json:"command,omitempty"`
	// Running context id
	ContextId string `json:"contextId,omitempty"`

	Language Language `json:"language,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Command) MarshalJSON added in v0.23.0

func (s Command) MarshalJSON() ([]byte, error)

func (*Command) UnmarshalJSON added in v0.23.0

func (s *Command) UnmarshalJSON(b []byte) error

type CommandExecutionAPI

type CommandExecutionAPI struct {
	// contains filtered or unexported fields
}

This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters.

func NewCommandExecution

func NewCommandExecution(client *client.DatabricksClient) *CommandExecutionAPI

func (*CommandExecutionAPI) Cancel

Cancel a command.

Cancels a currently running command within an execution context.

The command ID is obtained from a prior successful call to __execute__.

func (*CommandExecutionAPI) CancelAndWait deprecated

func (a *CommandExecutionAPI) CancelAndWait(ctx context.Context, cancelCommand CancelCommand, options ...retries.Option[CommandStatusResponse]) (*CommandStatusResponse, error)

Calls CommandExecutionAPI.Cancel and waits to reach Cancelled state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[CommandStatusResponse](60*time.Minute) functional option.

Deprecated: use CommandExecutionAPI.Cancel.Get() or CommandExecutionAPI.WaitCommandStatusCommandExecutionCancelled

func (*CommandExecutionAPI) CommandStatus

Get command info.

Gets the status of and, if available, the results from a currently executing command.

The command ID is obtained from a prior successful call to __execute__.

func (*CommandExecutionAPI) ContextStatus

Get status.

Gets the status for an execution context.

func (*CommandExecutionAPI) Create

Create an execution context.

Creates an execution context for running cluster commands.

If successful, this method returns the ID of the new execution context.

Example (CommandsDirectUsage)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

clusterId := os.Getenv("TEST_DEFAULT_CLUSTER_ID")

context, err := w.CommandExecution.CreateAndWait(ctx, compute.CreateContext{
	ClusterId: clusterId,
	Language:  compute.LanguagePython,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", context)

// cleanup

err = w.CommandExecution.Destroy(ctx, compute.DestroyContext{
	ClusterId: clusterId,
	ContextId: context.Id,
})
if err != nil {
	panic(err)
}
Output:

func (*CommandExecutionAPI) CreateAndWait deprecated

func (a *CommandExecutionAPI) CreateAndWait(ctx context.Context, createContext CreateContext, options ...retries.Option[ContextStatusResponse]) (*ContextStatusResponse, error)

Calls CommandExecutionAPI.Create and waits to reach Running state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[ContextStatusResponse](60*time.Minute) functional option.

Deprecated: use CommandExecutionAPI.Create.Get() or CommandExecutionAPI.WaitContextStatusCommandExecutionRunning

func (*CommandExecutionAPI) Destroy

func (a *CommandExecutionAPI) Destroy(ctx context.Context, request DestroyContext) error

Delete an execution context.

Deletes an execution context.

func (*CommandExecutionAPI) Execute

Run a command.

Runs a cluster command in the given execution context, using the provided language.

If successful, it returns an ID for tracking the status of the command's execution.

Example (CommandsDirectUsage)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

clusterId := os.Getenv("TEST_DEFAULT_CLUSTER_ID")

context, err := w.CommandExecution.CreateAndWait(ctx, compute.CreateContext{
	ClusterId: clusterId,
	Language:  compute.LanguagePython,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", context)

textResults, err := w.CommandExecution.ExecuteAndWait(ctx, compute.Command{
	ClusterId: clusterId,
	ContextId: context.Id,
	Language:  compute.LanguagePython,
	Command:   "print(1)",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", textResults)

// cleanup

err = w.CommandExecution.Destroy(ctx, compute.DestroyContext{
	ClusterId: clusterId,
	ContextId: context.Id,
})
if err != nil {
	panic(err)
}
Output:

func (*CommandExecutionAPI) ExecuteAndWait deprecated

Calls CommandExecutionAPI.Execute and waits to reach Finished or Error state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[CommandStatusResponse](60*time.Minute) functional option.

Deprecated: use CommandExecutionAPI.Execute.Get() or CommandExecutionAPI.WaitCommandStatusCommandExecutionFinishedOrError

func (*CommandExecutionAPI) Impl

Impl returns low-level CommandExecution API implementation Deprecated: use MockCommandExecutionInterface instead.

func (*CommandExecutionAPI) Start added in v0.10.0

func (a *CommandExecutionAPI) Start(ctx context.Context, clusterID string, language Language) (*CommandExecutorV2, error)

Start the command execution context on a cluster and ensure it transitions to a running state

Example (Commands)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

clusterId := os.Getenv("TEST_DEFAULT_CLUSTER_ID")

commandContext, err := w.CommandExecution.Start(ctx, clusterId, compute.LanguagePython)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", commandContext)
Output:

func (*CommandExecutionAPI) WaitCommandStatusCommandExecutionCancelled added in v0.10.0

func (a *CommandExecutionAPI) WaitCommandStatusCommandExecutionCancelled(ctx context.Context, clusterId string, commandId string, contextId string,
	timeout time.Duration, callback func(*CommandStatusResponse)) (*CommandStatusResponse, error)

WaitCommandStatusCommandExecutionCancelled repeatedly calls CommandExecutionAPI.CommandStatus and waits to reach Cancelled state

func (*CommandExecutionAPI) WaitCommandStatusCommandExecutionFinishedOrError added in v0.10.0

func (a *CommandExecutionAPI) WaitCommandStatusCommandExecutionFinishedOrError(ctx context.Context, clusterId string, commandId string, contextId string,
	timeout time.Duration, callback func(*CommandStatusResponse)) (*CommandStatusResponse, error)

WaitCommandStatusCommandExecutionFinishedOrError repeatedly calls CommandExecutionAPI.CommandStatus and waits to reach Finished or Error state

func (*CommandExecutionAPI) WaitContextStatusCommandExecutionRunning added in v0.10.0

func (a *CommandExecutionAPI) WaitContextStatusCommandExecutionRunning(ctx context.Context, clusterId string, contextId string,
	timeout time.Duration, callback func(*ContextStatusResponse)) (*ContextStatusResponse, error)

WaitContextStatusCommandExecutionRunning repeatedly calls CommandExecutionAPI.ContextStatus and waits to reach Running state

func (*CommandExecutionAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockCommandExecutionInterface instead.

type CommandExecutionInterface added in v0.29.0

type CommandExecutionInterface interface {

	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockCommandExecutionInterface instead.
	WithImpl(impl CommandExecutionService) CommandExecutionInterface

	// Impl returns low-level CommandExecution API implementation
	// Deprecated: use MockCommandExecutionInterface instead.
	Impl() CommandExecutionService

	// WaitCommandStatusCommandExecutionCancelled repeatedly calls [CommandExecutionAPI.CommandStatus] and waits to reach Cancelled state
	WaitCommandStatusCommandExecutionCancelled(ctx context.Context, clusterId string, commandId string, contextId string,
		timeout time.Duration, callback func(*CommandStatusResponse)) (*CommandStatusResponse, error)

	// WaitCommandStatusCommandExecutionFinishedOrError repeatedly calls [CommandExecutionAPI.CommandStatus] and waits to reach Finished or Error state
	WaitCommandStatusCommandExecutionFinishedOrError(ctx context.Context, clusterId string, commandId string, contextId string,
		timeout time.Duration, callback func(*CommandStatusResponse)) (*CommandStatusResponse, error)

	// WaitContextStatusCommandExecutionRunning repeatedly calls [CommandExecutionAPI.ContextStatus] and waits to reach Running state
	WaitContextStatusCommandExecutionRunning(ctx context.Context, clusterId string, contextId string,
		timeout time.Duration, callback func(*ContextStatusResponse)) (*ContextStatusResponse, error)

	// Cancel a command.
	//
	// Cancels a currently running command within an execution context.
	//
	// The command ID is obtained from a prior successful call to __execute__.
	Cancel(ctx context.Context, cancelCommand CancelCommand) (*WaitCommandStatusCommandExecutionCancelled[struct{}], error)

	// Calls [CommandExecutionAPIInterface.Cancel] and waits to reach Cancelled state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[CommandStatusResponse](60*time.Minute) functional option.
	//
	// Deprecated: use [CommandExecutionAPIInterface.Cancel].Get() or [CommandExecutionAPIInterface.WaitCommandStatusCommandExecutionCancelled]
	CancelAndWait(ctx context.Context, cancelCommand CancelCommand, options ...retries.Option[CommandStatusResponse]) (*CommandStatusResponse, error)

	// Get command info.
	//
	// Gets the status of and, if available, the results from a currently executing
	// command.
	//
	// The command ID is obtained from a prior successful call to __execute__.
	CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error)

	// Get status.
	//
	// Gets the status for an execution context.
	ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error)

	// Create an execution context.
	//
	// Creates an execution context for running cluster commands.
	//
	// If successful, this method returns the ID of the new execution context.
	Create(ctx context.Context, createContext CreateContext) (*WaitContextStatusCommandExecutionRunning[Created], error)

	// Calls [CommandExecutionAPIInterface.Create] and waits to reach Running state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[ContextStatusResponse](60*time.Minute) functional option.
	//
	// Deprecated: use [CommandExecutionAPIInterface.Create].Get() or [CommandExecutionAPIInterface.WaitContextStatusCommandExecutionRunning]
	CreateAndWait(ctx context.Context, createContext CreateContext, options ...retries.Option[ContextStatusResponse]) (*ContextStatusResponse, error)

	// Delete an execution context.
	//
	// Deletes an execution context.
	Destroy(ctx context.Context, request DestroyContext) error

	// Run a command.
	//
	// Runs a cluster command in the given execution context, using the provided
	// language.
	//
	// If successful, it returns an ID for tracking the status of the command's
	// execution.
	Execute(ctx context.Context, command Command) (*WaitCommandStatusCommandExecutionFinishedOrError[Created], error)

	// Calls [CommandExecutionAPIInterface.Execute] and waits to reach Finished or Error state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[CommandStatusResponse](60*time.Minute) functional option.
	//
	// Deprecated: use [CommandExecutionAPIInterface.Execute].Get() or [CommandExecutionAPIInterface.WaitCommandStatusCommandExecutionFinishedOrError]
	ExecuteAndWait(ctx context.Context, command Command, options ...retries.Option[CommandStatusResponse]) (*CommandStatusResponse, error)
	// contains filtered or unexported methods
}

type CommandExecutionService

type CommandExecutionService interface {

	// Cancel a command.
	//
	// Cancels a currently running command within an execution context.
	//
	// The command ID is obtained from a prior successful call to __execute__.
	Cancel(ctx context.Context, request CancelCommand) error

	// Get command info.
	//
	// Gets the status of and, if available, the results from a currently
	// executing command.
	//
	// The command ID is obtained from a prior successful call to __execute__.
	CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error)

	// Get status.
	//
	// Gets the status for an execution context.
	ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error)

	// Create an execution context.
	//
	// Creates an execution context for running cluster commands.
	//
	// If successful, this method returns the ID of the new execution context.
	Create(ctx context.Context, request CreateContext) (*Created, error)

	// Delete an execution context.
	//
	// Deletes an execution context.
	Destroy(ctx context.Context, request DestroyContext) error

	// Run a command.
	//
	// Runs a cluster command in the given execution context, using the provided
	// language.
	//
	// If successful, it returns an ID for tracking the status of the command's
	// execution.
	Execute(ctx context.Context, request Command) (*Created, error)
}

This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters.

type CommandExecutor

type CommandExecutor interface {
	Execute(ctx context.Context, clusterID, language, commandStr string) Results
}

CommandExecutor creates a spark context and executes a command and then closes context

func NewCommandExecutor

func NewCommandExecutor(client *client.DatabricksClient) CommandExecutor

type CommandExecutorV2 added in v0.10.0

type CommandExecutorV2 struct {
	// contains filtered or unexported fields
}

func (*CommandExecutorV2) Destroy added in v0.10.0

func (c *CommandExecutorV2) Destroy(ctx context.Context) error

func (*CommandExecutorV2) Execute added in v0.10.0

func (c *CommandExecutorV2) Execute(ctx context.Context, command string) (*Results, error)

Execute runs given command in the running cluster and context and waits for the results

type CommandMock

type CommandMock func(commandStr string) Results

CommandMock mocks the execution of command

func (CommandMock) Execute

func (m CommandMock) Execute(_ context.Context, _, _, commandStr string) Results

type CommandStatus

type CommandStatus string
const CommandStatusCancelled CommandStatus = `Cancelled`
const CommandStatusCancelling CommandStatus = `Cancelling`
const CommandStatusError CommandStatus = `Error`
const CommandStatusFinished CommandStatus = `Finished`
const CommandStatusQueued CommandStatus = `Queued`
const CommandStatusRunning CommandStatus = `Running`

func (*CommandStatus) Set

func (f *CommandStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*CommandStatus) String

func (f *CommandStatus) String() string

String representation for fmt.Print

func (*CommandStatus) Type

func (f *CommandStatus) Type() string

Type always returns CommandStatus to satisfy [pflag.Value] interface

type CommandStatusRequest

type CommandStatusRequest struct {
	ClusterId string `json:"-" url:"clusterId"`

	CommandId string `json:"-" url:"commandId"`

	ContextId string `json:"-" url:"contextId"`
}

Get command info

type CommandStatusResponse

type CommandStatusResponse struct {
	Id string `json:"id,omitempty"`

	Results *Results `json:"results,omitempty"`

	Status CommandStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CommandStatusResponse) MarshalJSON added in v0.23.0

func (s CommandStatusResponse) MarshalJSON() ([]byte, error)

func (*CommandStatusResponse) UnmarshalJSON added in v0.23.0

func (s *CommandStatusResponse) UnmarshalJSON(b []byte) error

type CommandsHighLevelAPI

type CommandsHighLevelAPI struct {
	// contains filtered or unexported fields
}

CommandsHighLevelAPI exposes more friendly wrapper over command execution

func (*CommandsHighLevelAPI) Execute deprecated

func (a *CommandsHighLevelAPI) Execute(ctx context.Context, clusterID, language, commandStr string) Results

Execute creates a spark context and executes a command and then closes context Any leading whitespace is trimmed

Deprecated: please switch to CommandExecutorV2

type ContextStatus

type ContextStatus string
const ContextStatusError ContextStatus = `Error`
const ContextStatusPending ContextStatus = `Pending`
const ContextStatusRunning ContextStatus = `Running`

func (*ContextStatus) Set

func (f *ContextStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*ContextStatus) String

func (f *ContextStatus) String() string

String representation for fmt.Print

func (*ContextStatus) Type

func (f *ContextStatus) Type() string

Type always returns ContextStatus to satisfy [pflag.Value] interface

type ContextStatusRequest

type ContextStatusRequest struct {
	ClusterId string `json:"-" url:"clusterId"`

	ContextId string `json:"-" url:"contextId"`
}

Get status

type ContextStatusResponse

type ContextStatusResponse struct {
	Id string `json:"id,omitempty"`

	Status ContextStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ContextStatusResponse) MarshalJSON added in v0.23.0

func (s ContextStatusResponse) MarshalJSON() ([]byte, error)

func (*ContextStatusResponse) UnmarshalJSON added in v0.23.0

func (s *ContextStatusResponse) UnmarshalJSON(b []byte) error

type CreateCluster

type CreateCluster struct {
	// When set to true, fixed and default values from the policy will be used
	// for fields that are omitted. When set to false, only fixed values from
	// the policy will be applied.
	ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"`
	// Parameters needed in order to automatically scale clusters up and down
	// based on load. Note: autoscaling works best with DB runtime versions 3.0
	// or later.
	Autoscale *AutoScale `json:"autoscale,omitempty"`
	// Automatically terminates the cluster after it is inactive for this time
	// in minutes. If not set, this cluster will not be automatically
	// terminated. If specified, the threshold must be between 10 and 10000
	// minutes. Users can also set this value to 0 to explicitly disable
	// automatic termination.
	AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
	// Attributes related to clusters running on Amazon Web Services. If not
	// specified at cluster creation, a set of default values will be used.
	AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to clusters running on Microsoft Azure. If not
	// specified at cluster creation, a set of default values will be used.
	AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"`
	// When specified, this clones libraries from a source cluster during the
	// creation of a new cluster.
	CloneFrom *CloneCluster `json:"clone_from,omitempty"`
	// The configuration for delivering spark logs to a long-term storage
	// destination. Two kinds of destinations (dbfs and s3) are supported. Only
	// one destination can be specified for one cluster. If the conf is given,
	// the logs will be delivered to the destination every `5 mins`. The
	// destination of driver logs is `$destination/$clusterId/driver`, while the
	// destination of executor logs is `$destination/$clusterId/executor`.
	ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"`
	// Cluster name requested by the user. This doesn't have to be unique. If
	// not specified at creation, the cluster name will be an empty string.
	ClusterName string `json:"cluster_name,omitempty"`
	// Additional tags for cluster resources. Databricks will tag all cluster
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	//
	// - Clusters can only reuse cloud resources if the resources' tags are a
	// subset of the cluster tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Data security mode decides what data governance model to use when
	// accessing data from a cluster.
	//
	// * `NONE`: No security isolation for multiple users sharing the cluster.
	// Data governance features are not available in this mode. * `SINGLE_USER`:
	// A secure cluster that can only be exclusively used by a single user
	// specified in `single_user_name`. Most programming languages, cluster
	// features and data governance features are available in this mode. *
	// `USER_ISOLATION`: A secure cluster that can be shared by multiple users.
	// Cluster users are fully isolated so that they cannot see each other's
	// data and credentials. Most data governance features are supported in this
	// mode. But programming languages and cluster features might be limited.
	//
	// The following modes are deprecated starting with Databricks Runtime 15.0
	// and will be removed for future Databricks Runtime versions:
	//
	// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table
	// ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating
	// from legacy Passthrough on high concurrency clusters. *
	// `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
	// Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This
	// mode provides a way that doesn’t have UC nor passthrough enabled.
	DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"`

	DockerImage *DockerImage `json:"docker_image,omitempty"`
	// The optional ID of the instance pool for the driver of the cluster
	// belongs. The pool cluster uses the instance pool with id
	// (instance_pool_id) if the driver pool is not assigned.
	DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"`
	// The node type of the Spark driver. Note that this field is optional; if
	// unset, the driver node type will be set as the same value as
	// `node_type_id` defined above.
	DriverNodeTypeId string `json:"driver_node_type_id,omitempty"`
	// Autoscaling Local Storage: when enabled, this cluster will dynamically
	// acquire additional disk space when its Spark workers are running low on
	// disk space. This feature requires specific AWS permissions to function
	// correctly - refer to the User Guide for more details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Whether to enable LUKS on cluster VMs' local disks
	EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
	// Attributes related to clusters running on Google Cloud Platform. If not
	// specified at cluster creation, a set of default values will be used.
	GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"`
	// The configuration for storing init scripts. Any number of destinations
	// can be specified. The scripts are executed sequentially in the order
	// provided. If `cluster_log_conf` is specified, init script logs are sent
	// to `<destination>/<cluster-ID>/init_scripts`.
	InitScripts []InitScriptInfo `json:"init_scripts,omitempty"`
	// The optional ID of the instance pool to which the cluster belongs.
	InstancePoolId string `json:"instance_pool_id,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id,omitempty"`
	// Number of worker nodes that this cluster should have. A cluster has one
	// Spark Driver and `num_workers` Executors for a total of `num_workers` + 1
	// Spark nodes.
	//
	// Note: When reading the properties of a cluster, this field reflects the
	// desired number of workers rather than the actual current number of
	// workers. For instance, if a cluster is resized from 5 to 10 workers, this
	// field will immediately be updated to reflect the target size of 10
	// workers, whereas the workers listed in `spark_info` will gradually
	// increase from 5 to 10 as the new nodes are provisioned.
	NumWorkers int `json:"num_workers,omitempty"`
	// The ID of the cluster policy used to create the cluster if applicable.
	PolicyId string `json:"policy_id,omitempty"`
	// Decides which runtime engine to be use, e.g. Standard vs. Photon. If
	// unspecified, the runtime engine is inferred from spark_version.
	RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"`
	// Single user name if data_security_mode is `SINGLE_USER`
	SingleUserName string `json:"single_user_name,omitempty"`
	// An object containing a set of optional, user-specified Spark
	// configuration key-value pairs. Users can also pass in a string of extra
	// JVM options to the driver and the executors via
	// `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`
	// respectively.
	SparkConf map[string]string `json:"spark_conf,omitempty"`
	// An object containing a set of optional, user-specified environment
	// variable key-value pairs. Please note that key-value pair of the form
	// (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the
	// driver and workers.
	//
	// In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we
	// recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the
	// example below. This ensures that all default databricks managed
	// environmental variables are included as well.
	//
	// Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m",
	// "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS":
	// "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}`
	SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
	// The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of
	// available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	SparkVersion string `json:"spark_version"`
	// SSH public key contents that will be added to each Spark node in this
	// cluster. The corresponding private keys can be used to login with the
	// user name `ubuntu` on port `2200`. Up to 10 keys can be specified.
	SshPublicKeys []string `json:"ssh_public_keys,omitempty"`

	WorkloadType *WorkloadType `json:"workload_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateCluster) MarshalJSON added in v0.23.0

func (s CreateCluster) MarshalJSON() ([]byte, error)

func (*CreateCluster) UnmarshalJSON added in v0.23.0

func (s *CreateCluster) UnmarshalJSON(b []byte) error

type CreateClusterResponse

type CreateClusterResponse struct {
	ClusterId string `json:"cluster_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateClusterResponse) MarshalJSON added in v0.23.0

func (s CreateClusterResponse) MarshalJSON() ([]byte, error)

func (*CreateClusterResponse) UnmarshalJSON added in v0.23.0

func (s *CreateClusterResponse) UnmarshalJSON(b []byte) error

type CreateContext

type CreateContext struct {
	// Running cluster id
	ClusterId string `json:"clusterId,omitempty"`

	Language Language `json:"language,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateContext) MarshalJSON added in v0.23.0

func (s CreateContext) MarshalJSON() ([]byte, error)

func (*CreateContext) UnmarshalJSON added in v0.23.0

func (s *CreateContext) UnmarshalJSON(b []byte) error

type CreateInstancePool

type CreateInstancePool struct {
	// Attributes related to instance pools running on Amazon Web Services. If
	// not specified at pool creation, a set of default values will be used.
	AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to instance pools running on Azure. If not specified
	// at pool creation, a set of default values will be used.
	AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"`
	// Additional tags for pool resources. Databricks will tag all pool
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Defines the specification of the disks that will be attached to all spark
	// containers.
	DiskSpec *DiskSpec `json:"disk_spec,omitempty"`
	// Autoscaling Local Storage: when enabled, this instances in this pool will
	// dynamically acquire additional disk space when its Spark workers are
	// running low on disk space. In AWS, this feature requires specific AWS
	// permissions to function correctly - refer to the User Guide for more
	// details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Attributes related to instance pools running on Google Cloud Platform. If
	// not specified at pool creation, a set of default values will be used.
	GcpAttributes *InstancePoolGcpAttributes `json:"gcp_attributes,omitempty"`
	// Automatically terminates the extra instances in the pool cache after they
	// are inactive for this time in minutes if min_idle_instances requirement
	// is already met. If not set, the extra pool instances will be
	// automatically terminated after a default timeout. If specified, the
	// threshold must be between 0 and 10000 minutes. Users can also set this
	// value to 0 to instantly remove idle instances from the cache if min cache
	// size could still hold.
	IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"`
	// Pool name requested by the user. Pool name must be unique. Length must be
	// between 1 and 100 characters.
	InstancePoolName string `json:"instance_pool_name"`
	// Maximum number of outstanding instances to keep in the pool, including
	// both instances used by clusters and idle instances. Clusters that require
	// further instance provisioning will fail during upsize requests.
	MaxCapacity int `json:"max_capacity,omitempty"`
	// Minimum number of idle instances to keep in the instance pool
	MinIdleInstances int `json:"min_idle_instances,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id"`
	// Custom Docker Image BYOC
	PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty"`
	// A list containing at most one preloaded Spark image version for the pool.
	// Pool-backed clusters started with the preloaded Spark version will start
	// faster. A list of available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateInstancePool) MarshalJSON added in v0.23.0

func (s CreateInstancePool) MarshalJSON() ([]byte, error)

func (*CreateInstancePool) UnmarshalJSON added in v0.23.0

func (s *CreateInstancePool) UnmarshalJSON(b []byte) error

type CreateInstancePoolResponse

type CreateInstancePoolResponse struct {
	// The ID of the created instance pool.
	InstancePoolId string `json:"instance_pool_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateInstancePoolResponse) MarshalJSON added in v0.23.0

func (s CreateInstancePoolResponse) MarshalJSON() ([]byte, error)

func (*CreateInstancePoolResponse) UnmarshalJSON added in v0.23.0

func (s *CreateInstancePoolResponse) UnmarshalJSON(b []byte) error

type CreatePolicy

type CreatePolicy struct {
	// Policy definition document expressed in [Databricks Cluster Policy
	// Definition Language].
	//
	// [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html
	Definition string `json:"definition,omitempty"`
	// Additional human-readable description of the cluster policy.
	Description string `json:"description,omitempty"`
	// A list of libraries to be installed on the next cluster restart that uses
	// this policy. The maximum number of libraries is 500.
	Libraries []Library `json:"libraries,omitempty"`
	// Max number of clusters per user that can be active using this policy. If
	// not present, there is no max limit.
	MaxClustersPerUser int64 `json:"max_clusters_per_user,omitempty"`
	// Cluster Policy name requested by the user. This has to be unique. Length
	// must be between 1 and 100 characters.
	Name string `json:"name"`
	// Policy definition JSON document expressed in [Databricks Policy
	// Definition Language]. The JSON document must be passed as a string and
	// cannot be embedded in the requests.
	//
	// You can use this to customize the policy definition inherited from the
	// policy family. Policy rules specified here are merged into the inherited
	// policy definition.
	//
	// [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html
	PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
	// ID of the policy family. The cluster policy's policy definition inherits
	// the policy family's policy definition.
	//
	// Cannot be used with `definition`. Use
	// `policy_family_definition_overrides` instead to customize the policy
	// definition.
	PolicyFamilyId string `json:"policy_family_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreatePolicy) MarshalJSON added in v0.23.0

func (s CreatePolicy) MarshalJSON() ([]byte, error)

func (*CreatePolicy) UnmarshalJSON added in v0.23.0

func (s *CreatePolicy) UnmarshalJSON(b []byte) error

type CreatePolicyResponse

type CreatePolicyResponse struct {
	// Canonical unique identifier for the cluster policy.
	PolicyId string `json:"policy_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreatePolicyResponse) MarshalJSON added in v0.23.0

func (s CreatePolicyResponse) MarshalJSON() ([]byte, error)

func (*CreatePolicyResponse) UnmarshalJSON added in v0.23.0

func (s *CreatePolicyResponse) UnmarshalJSON(b []byte) error

type CreateResponse

type CreateResponse struct {
	// The global init script ID.
	ScriptId string `json:"script_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateResponse) MarshalJSON added in v0.23.0

func (s CreateResponse) MarshalJSON() ([]byte, error)

func (*CreateResponse) UnmarshalJSON added in v0.23.0

func (s *CreateResponse) UnmarshalJSON(b []byte) error

type Created

type Created struct {
	Id string `json:"id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Created) MarshalJSON added in v0.23.0

func (s Created) MarshalJSON() ([]byte, error)

func (*Created) UnmarshalJSON added in v0.23.0

func (s *Created) UnmarshalJSON(b []byte) error

type DataPlaneEventDetails

type DataPlaneEventDetails struct {
	// <needs content added>
	EventType DataPlaneEventDetailsEventType `json:"event_type,omitempty"`
	// <needs content added>
	ExecutorFailures int `json:"executor_failures,omitempty"`
	// <needs content added>
	HostId string `json:"host_id,omitempty"`
	// <needs content added>
	Timestamp int64 `json:"timestamp,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DataPlaneEventDetails) MarshalJSON added in v0.23.0

func (s DataPlaneEventDetails) MarshalJSON() ([]byte, error)

func (*DataPlaneEventDetails) UnmarshalJSON added in v0.23.0

func (s *DataPlaneEventDetails) UnmarshalJSON(b []byte) error

type DataPlaneEventDetailsEventType

type DataPlaneEventDetailsEventType string

<needs content added>

const DataPlaneEventDetailsEventTypeNodeBlacklisted DataPlaneEventDetailsEventType = `NODE_BLACKLISTED`
const DataPlaneEventDetailsEventTypeNodeExcludedDecommissioned DataPlaneEventDetailsEventType = `NODE_EXCLUDED_DECOMMISSIONED`

func (*DataPlaneEventDetailsEventType) Set

Set raw string value and validate it against allowed values

func (*DataPlaneEventDetailsEventType) String

String representation for fmt.Print

func (*DataPlaneEventDetailsEventType) Type

Type always returns DataPlaneEventDetailsEventType to satisfy [pflag.Value] interface

type DataSecurityMode

type DataSecurityMode string

Data security mode decides what data governance model to use when accessing data from a cluster.

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for future Databricks Runtime versions:

* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC nor passthrough enabled.

const DataSecurityModeLegacyPassthrough DataSecurityMode = `LEGACY_PASSTHROUGH`

This mode is for users migrating from legacy Passthrough on high concurrency clusters.

const DataSecurityModeLegacySingleUser DataSecurityMode = `LEGACY_SINGLE_USER`

This mode is for users migrating from legacy Passthrough on standard clusters.

const DataSecurityModeLegacySingleUserStandard DataSecurityMode = `LEGACY_SINGLE_USER_STANDARD`

This mode provides a way that doesn’t have UC nor passthrough enabled.

const DataSecurityModeLegacyTableAcl DataSecurityMode = `LEGACY_TABLE_ACL`

This mode is for users migrating from legacy Table ACL clusters.

const DataSecurityModeNone DataSecurityMode = `NONE`

No security isolation for multiple users sharing the cluster. Data governance features are not available in this mode.

const DataSecurityModeSingleUser DataSecurityMode = `SINGLE_USER`

A secure cluster that can only be exclusively used by a single user specified in `single_user_name`. Most programming languages, cluster features and data governance features are available in this mode.

const DataSecurityModeUserIsolation DataSecurityMode = `USER_ISOLATION`

A secure cluster that can be shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data and credentials. Most data governance features are supported in this mode. But programming languages and cluster features might be limited.

func (*DataSecurityMode) Set

func (f *DataSecurityMode) Set(v string) error

Set raw string value and validate it against allowed values

func (*DataSecurityMode) String

func (f *DataSecurityMode) String() string

String representation for fmt.Print

func (*DataSecurityMode) Type

func (f *DataSecurityMode) Type() string

Type always returns DataSecurityMode to satisfy [pflag.Value] interface

type DbfsStorageInfo

type DbfsStorageInfo struct {
	// dbfs destination, e.g. `dbfs:/my/path`
	Destination string `json:"destination"`
}

type DeleteCluster

type DeleteCluster struct {
	// The cluster to be terminated.
	ClusterId string `json:"cluster_id"`
}

type DeleteClusterResponse added in v0.34.0

type DeleteClusterResponse struct {
}

type DeleteGlobalInitScriptRequest

type DeleteGlobalInitScriptRequest struct {
	// The ID of the global init script.
	ScriptId string `json:"-" url:"-"`
}

Delete init script

type DeleteInstancePool

type DeleteInstancePool struct {
	// The instance pool to be terminated.
	InstancePoolId string `json:"instance_pool_id"`
}

type DeleteInstancePoolResponse added in v0.34.0

type DeleteInstancePoolResponse struct {
}

type DeletePolicy

type DeletePolicy struct {
	// The ID of the policy to delete.
	PolicyId string `json:"policy_id"`
}

type DeletePolicyResponse added in v0.34.0

type DeletePolicyResponse struct {
}

type DeleteResponse added in v0.34.0

type DeleteResponse struct {
}

type DestroyContext

type DestroyContext struct {
	ClusterId string `json:"clusterId"`

	ContextId string `json:"contextId"`
}

type DestroyResponse added in v0.34.0

type DestroyResponse struct {
}

type DiskSpec

type DiskSpec struct {
	// The number of disks launched for each instance: - This feature is only
	// enabled for supported node types. - Users can choose up to the limit of
	// the disks supported by the node type. - For node types with no OS disk,
	// at least one disk must be specified; otherwise, cluster creation will
	// fail.
	//
	// If disks are attached, Databricks will configure Spark to use only the
	// disks for scratch storage, because heterogenously sized scratch devices
	// can lead to inefficient disk utilization. If no disks are attached,
	// Databricks will configure Spark to use instance store disks.
	//
	// Note: If disks are specified, then the Spark configuration
	// `spark.local.dir` will be overridden.
	//
	// Disks will be mounted at: - For AWS: `/ebs0`, `/ebs1`, and etc. - For
	// Azure: `/remote_volume0`, `/remote_volume1`, and etc.
	DiskCount int `json:"disk_count,omitempty"`

	DiskIops int `json:"disk_iops,omitempty"`
	// The size of each disk (in GiB) launched for each instance. Values must
	// fall into the supported range for a particular instance type.
	//
	// For AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized
	// HDD: 500 - 4096 GiB
	//
	// For Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1-
	// 1023 GiB
	DiskSize int `json:"disk_size,omitempty"`

	DiskThroughput int `json:"disk_throughput,omitempty"`
	// The type of disks that will be launched with this cluster.
	DiskType *DiskType `json:"disk_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DiskSpec) MarshalJSON added in v0.23.0

func (s DiskSpec) MarshalJSON() ([]byte, error)

func (*DiskSpec) UnmarshalJSON added in v0.23.0

func (s *DiskSpec) UnmarshalJSON(b []byte) error

type DiskType

type DiskType struct {
	AzureDiskVolumeType DiskTypeAzureDiskVolumeType `json:"azure_disk_volume_type,omitempty"`

	EbsVolumeType DiskTypeEbsVolumeType `json:"ebs_volume_type,omitempty"`
}

type DiskTypeAzureDiskVolumeType

type DiskTypeAzureDiskVolumeType string
const DiskTypeAzureDiskVolumeTypePremiumLrs DiskTypeAzureDiskVolumeType = `PREMIUM_LRS`
const DiskTypeAzureDiskVolumeTypeStandardLrs DiskTypeAzureDiskVolumeType = `STANDARD_LRS`

func (*DiskTypeAzureDiskVolumeType) Set

Set raw string value and validate it against allowed values

func (*DiskTypeAzureDiskVolumeType) String

func (f *DiskTypeAzureDiskVolumeType) String() string

String representation for fmt.Print

func (*DiskTypeAzureDiskVolumeType) Type

Type always returns DiskTypeAzureDiskVolumeType to satisfy [pflag.Value] interface

type DiskTypeEbsVolumeType

type DiskTypeEbsVolumeType string
const DiskTypeEbsVolumeTypeGeneralPurposeSsd DiskTypeEbsVolumeType = `GENERAL_PURPOSE_SSD`
const DiskTypeEbsVolumeTypeThroughputOptimizedHdd DiskTypeEbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD`

func (*DiskTypeEbsVolumeType) Set

Set raw string value and validate it against allowed values

func (*DiskTypeEbsVolumeType) String

func (f *DiskTypeEbsVolumeType) String() string

String representation for fmt.Print

func (*DiskTypeEbsVolumeType) Type

func (f *DiskTypeEbsVolumeType) Type() string

Type always returns DiskTypeEbsVolumeType to satisfy [pflag.Value] interface

type DockerBasicAuth

type DockerBasicAuth struct {
	// Password of the user
	Password string `json:"password,omitempty"`
	// Name of the user
	Username string `json:"username,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DockerBasicAuth) MarshalJSON added in v0.23.0

func (s DockerBasicAuth) MarshalJSON() ([]byte, error)

func (*DockerBasicAuth) UnmarshalJSON added in v0.23.0

func (s *DockerBasicAuth) UnmarshalJSON(b []byte) error

type DockerImage

type DockerImage struct {
	BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"`
	// URL of the docker image.
	Url string `json:"url,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DockerImage) MarshalJSON added in v0.23.0

func (s DockerImage) MarshalJSON() ([]byte, error)

func (*DockerImage) UnmarshalJSON added in v0.23.0

func (s *DockerImage) UnmarshalJSON(b []byte) error

type EbsVolumeType

type EbsVolumeType string

The type of EBS volumes that will be launched with this cluster.

const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD`
const EbsVolumeTypeThroughputOptimizedHdd EbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD`

func (*EbsVolumeType) Set

func (f *EbsVolumeType) Set(v string) error

Set raw string value and validate it against allowed values

func (*EbsVolumeType) String

func (f *EbsVolumeType) String() string

String representation for fmt.Print

func (*EbsVolumeType) Type

func (f *EbsVolumeType) Type() string

Type always returns EbsVolumeType to satisfy [pflag.Value] interface

type EditCluster

type EditCluster struct {
	// When set to true, fixed and default values from the policy will be used
	// for fields that are omitted. When set to false, only fixed values from
	// the policy will be applied.
	ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"`
	// Parameters needed in order to automatically scale clusters up and down
	// based on load. Note: autoscaling works best with DB runtime versions 3.0
	// or later.
	Autoscale *AutoScale `json:"autoscale,omitempty"`
	// Automatically terminates the cluster after it is inactive for this time
	// in minutes. If not set, this cluster will not be automatically
	// terminated. If specified, the threshold must be between 10 and 10000
	// minutes. Users can also set this value to 0 to explicitly disable
	// automatic termination.
	AutoterminationMinutes int `json:"autotermination_minutes,omitempty"`
	// Attributes related to clusters running on Amazon Web Services. If not
	// specified at cluster creation, a set of default values will be used.
	AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to clusters running on Microsoft Azure. If not
	// specified at cluster creation, a set of default values will be used.
	AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"`
	// ID of the cluser
	ClusterId string `json:"cluster_id"`
	// The configuration for delivering spark logs to a long-term storage
	// destination. Two kinds of destinations (dbfs and s3) are supported. Only
	// one destination can be specified for one cluster. If the conf is given,
	// the logs will be delivered to the destination every `5 mins`. The
	// destination of driver logs is `$destination/$clusterId/driver`, while the
	// destination of executor logs is `$destination/$clusterId/executor`.
	ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"`
	// Cluster name requested by the user. This doesn't have to be unique. If
	// not specified at creation, the cluster name will be an empty string.
	ClusterName string `json:"cluster_name,omitempty"`
	// Additional tags for cluster resources. Databricks will tag all cluster
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	//
	// - Clusters can only reuse cloud resources if the resources' tags are a
	// subset of the cluster tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Data security mode decides what data governance model to use when
	// accessing data from a cluster.
	//
	// * `NONE`: No security isolation for multiple users sharing the cluster.
	// Data governance features are not available in this mode. * `SINGLE_USER`:
	// A secure cluster that can only be exclusively used by a single user
	// specified in `single_user_name`. Most programming languages, cluster
	// features and data governance features are available in this mode. *
	// `USER_ISOLATION`: A secure cluster that can be shared by multiple users.
	// Cluster users are fully isolated so that they cannot see each other's
	// data and credentials. Most data governance features are supported in this
	// mode. But programming languages and cluster features might be limited.
	//
	// The following modes are deprecated starting with Databricks Runtime 15.0
	// and will be removed for future Databricks Runtime versions:
	//
	// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table
	// ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating
	// from legacy Passthrough on high concurrency clusters. *
	// `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
	// Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This
	// mode provides a way that doesn’t have UC nor passthrough enabled.
	DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"`

	DockerImage *DockerImage `json:"docker_image,omitempty"`
	// The optional ID of the instance pool for the driver of the cluster
	// belongs. The pool cluster uses the instance pool with id
	// (instance_pool_id) if the driver pool is not assigned.
	DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"`
	// The node type of the Spark driver. Note that this field is optional; if
	// unset, the driver node type will be set as the same value as
	// `node_type_id` defined above.
	DriverNodeTypeId string `json:"driver_node_type_id,omitempty"`
	// Autoscaling Local Storage: when enabled, this cluster will dynamically
	// acquire additional disk space when its Spark workers are running low on
	// disk space. This feature requires specific AWS permissions to function
	// correctly - refer to the User Guide for more details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Whether to enable LUKS on cluster VMs' local disks
	EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"`
	// Attributes related to clusters running on Google Cloud Platform. If not
	// specified at cluster creation, a set of default values will be used.
	GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"`
	// The configuration for storing init scripts. Any number of destinations
	// can be specified. The scripts are executed sequentially in the order
	// provided. If `cluster_log_conf` is specified, init script logs are sent
	// to `<destination>/<cluster-ID>/init_scripts`.
	InitScripts []InitScriptInfo `json:"init_scripts,omitempty"`
	// The optional ID of the instance pool to which the cluster belongs.
	InstancePoolId string `json:"instance_pool_id,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id,omitempty"`
	// Number of worker nodes that this cluster should have. A cluster has one
	// Spark Driver and `num_workers` Executors for a total of `num_workers` + 1
	// Spark nodes.
	//
	// Note: When reading the properties of a cluster, this field reflects the
	// desired number of workers rather than the actual current number of
	// workers. For instance, if a cluster is resized from 5 to 10 workers, this
	// field will immediately be updated to reflect the target size of 10
	// workers, whereas the workers listed in `spark_info` will gradually
	// increase from 5 to 10 as the new nodes are provisioned.
	NumWorkers int `json:"num_workers,omitempty"`
	// The ID of the cluster policy used to create the cluster if applicable.
	PolicyId string `json:"policy_id,omitempty"`
	// Decides which runtime engine to be use, e.g. Standard vs. Photon. If
	// unspecified, the runtime engine is inferred from spark_version.
	RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"`
	// Single user name if data_security_mode is `SINGLE_USER`
	SingleUserName string `json:"single_user_name,omitempty"`
	// An object containing a set of optional, user-specified Spark
	// configuration key-value pairs. Users can also pass in a string of extra
	// JVM options to the driver and the executors via
	// `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`
	// respectively.
	SparkConf map[string]string `json:"spark_conf,omitempty"`
	// An object containing a set of optional, user-specified environment
	// variable key-value pairs. Please note that key-value pair of the form
	// (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the
	// driver and workers.
	//
	// In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we
	// recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the
	// example below. This ensures that all default databricks managed
	// environmental variables are included as well.
	//
	// Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m",
	// "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS":
	// "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}`
	SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"`
	// The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of
	// available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	SparkVersion string `json:"spark_version"`
	// SSH public key contents that will be added to each Spark node in this
	// cluster. The corresponding private keys can be used to login with the
	// user name `ubuntu` on port `2200`. Up to 10 keys can be specified.
	SshPublicKeys []string `json:"ssh_public_keys,omitempty"`

	WorkloadType *WorkloadType `json:"workload_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EditCluster) MarshalJSON added in v0.23.0

func (s EditCluster) MarshalJSON() ([]byte, error)

func (*EditCluster) UnmarshalJSON added in v0.23.0

func (s *EditCluster) UnmarshalJSON(b []byte) error

type EditClusterResponse added in v0.34.0

type EditClusterResponse struct {
}

type EditInstancePool

type EditInstancePool struct {
	// Additional tags for pool resources. Databricks will tag all pool
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Automatically terminates the extra instances in the pool cache after they
	// are inactive for this time in minutes if min_idle_instances requirement
	// is already met. If not set, the extra pool instances will be
	// automatically terminated after a default timeout. If specified, the
	// threshold must be between 0 and 10000 minutes. Users can also set this
	// value to 0 to instantly remove idle instances from the cache if min cache
	// size could still hold.
	IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"`
	// Instance pool ID
	InstancePoolId string `json:"instance_pool_id"`
	// Pool name requested by the user. Pool name must be unique. Length must be
	// between 1 and 100 characters.
	InstancePoolName string `json:"instance_pool_name"`
	// Maximum number of outstanding instances to keep in the pool, including
	// both instances used by clusters and idle instances. Clusters that require
	// further instance provisioning will fail during upsize requests.
	MaxCapacity int `json:"max_capacity,omitempty"`
	// Minimum number of idle instances to keep in the instance pool
	MinIdleInstances int `json:"min_idle_instances,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id"`

	ForceSendFields []string `json:"-"`
}

func (EditInstancePool) MarshalJSON added in v0.23.0

func (s EditInstancePool) MarshalJSON() ([]byte, error)

func (*EditInstancePool) UnmarshalJSON added in v0.23.0

func (s *EditInstancePool) UnmarshalJSON(b []byte) error

type EditInstancePoolResponse added in v0.34.0

type EditInstancePoolResponse struct {
}

type EditPolicy

type EditPolicy struct {
	// Policy definition document expressed in [Databricks Cluster Policy
	// Definition Language].
	//
	// [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html
	Definition string `json:"definition,omitempty"`
	// Additional human-readable description of the cluster policy.
	Description string `json:"description,omitempty"`
	// A list of libraries to be installed on the next cluster restart that uses
	// this policy. The maximum number of libraries is 500.
	Libraries []Library `json:"libraries,omitempty"`
	// Max number of clusters per user that can be active using this policy. If
	// not present, there is no max limit.
	MaxClustersPerUser int64 `json:"max_clusters_per_user,omitempty"`
	// Cluster Policy name requested by the user. This has to be unique. Length
	// must be between 1 and 100 characters.
	Name string `json:"name"`
	// Policy definition JSON document expressed in [Databricks Policy
	// Definition Language]. The JSON document must be passed as a string and
	// cannot be embedded in the requests.
	//
	// You can use this to customize the policy definition inherited from the
	// policy family. Policy rules specified here are merged into the inherited
	// policy definition.
	//
	// [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html
	PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
	// ID of the policy family. The cluster policy's policy definition inherits
	// the policy family's policy definition.
	//
	// Cannot be used with `definition`. Use
	// `policy_family_definition_overrides` instead to customize the policy
	// definition.
	PolicyFamilyId string `json:"policy_family_id,omitempty"`
	// The ID of the policy to update.
	PolicyId string `json:"policy_id"`

	ForceSendFields []string `json:"-"`
}

func (EditPolicy) MarshalJSON added in v0.23.0

func (s EditPolicy) MarshalJSON() ([]byte, error)

func (*EditPolicy) UnmarshalJSON added in v0.23.0

func (s *EditPolicy) UnmarshalJSON(b []byte) error

type EditPolicyResponse added in v0.34.0

type EditPolicyResponse struct {
}

type EditResponse added in v0.34.0

type EditResponse struct {
}

type Environment added in v0.38.0

type Environment struct {
	// Client version used by the environment The client is the user-facing
	// environment of the runtime. Each client comes with a specific set of
	// pre-installed libraries. The version is a string, consisting of the major
	// client version.
	Client string `json:"client"`
	// List of pip dependencies, as supported by the version of pip in this
	// environment. Each dependency is a pip requirement file line
	// https://pip.pypa.io/en/stable/reference/requirements-file-format/ Allowed
	// dependency could be <requirement specifier>, <archive url/path>, <local
	// project path>(WSFS or Volumes in Databricks), <vcs project url> E.g.
	// dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"]
	Dependencies []string `json:"dependencies,omitempty"`
}

The a environment entity used to preserve serverless environment side panel and jobs' environment for non-notebook task. In this minimal environment spec, only pip dependencies are supported. Next ID: 5

type EventDetails

type EventDetails struct {
	// * For created clusters, the attributes of the cluster. * For edited
	// clusters, the new attributes of the cluster.
	Attributes *ClusterAttributes `json:"attributes,omitempty"`
	// The cause of a change in target size.
	Cause EventDetailsCause `json:"cause,omitempty"`
	// The actual cluster size that was set in the cluster creation or edit.
	ClusterSize *ClusterSize `json:"cluster_size,omitempty"`
	// The current number of vCPUs in the cluster.
	CurrentNumVcpus int `json:"current_num_vcpus,omitempty"`
	// The current number of nodes in the cluster.
	CurrentNumWorkers int `json:"current_num_workers,omitempty"`
	// <needs content added>
	DidNotExpandReason string `json:"did_not_expand_reason,omitempty"`
	// Current disk size in bytes
	DiskSize int64 `json:"disk_size,omitempty"`
	// More details about the change in driver's state
	DriverStateMessage string `json:"driver_state_message,omitempty"`
	// Whether or not a blocklisted node should be terminated. For
	// ClusterEventType NODE_BLACKLISTED.
	EnableTerminationForNodeBlocklisted bool `json:"enable_termination_for_node_blocklisted,omitempty"`
	// <needs content added>
	FreeSpace int64 `json:"free_space,omitempty"`
	// List of global and cluster init scripts associated with this cluster
	// event.
	InitScripts *InitScriptEventDetails `json:"init_scripts,omitempty"`
	// Instance Id where the event originated from
	InstanceId string `json:"instance_id,omitempty"`
	// Unique identifier of the specific job run associated with this cluster
	// event * For clusters created for jobs, this will be the same as the
	// cluster name
	JobRunName string `json:"job_run_name,omitempty"`
	// The cluster attributes before a cluster was edited.
	PreviousAttributes *ClusterAttributes `json:"previous_attributes,omitempty"`
	// The size of the cluster before an edit or resize.
	PreviousClusterSize *ClusterSize `json:"previous_cluster_size,omitempty"`
	// Previous disk size in bytes
	PreviousDiskSize int64 `json:"previous_disk_size,omitempty"`
	// A termination reason: * On a TERMINATED event, this is the reason of the
	// termination. * On a RESIZE_COMPLETE event, this indicates the reason that
	// we failed to acquire some nodes.
	Reason *TerminationReason `json:"reason,omitempty"`
	// The targeted number of vCPUs in the cluster.
	TargetNumVcpus int `json:"target_num_vcpus,omitempty"`
	// The targeted number of nodes in the cluster.
	TargetNumWorkers int `json:"target_num_workers,omitempty"`
	// The user that caused the event to occur. (Empty if it was done by the
	// control plane.)
	User string `json:"user,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EventDetails) MarshalJSON added in v0.23.0

func (s EventDetails) MarshalJSON() ([]byte, error)

func (*EventDetails) UnmarshalJSON added in v0.23.0

func (s *EventDetails) UnmarshalJSON(b []byte) error

type EventDetailsCause

type EventDetailsCause string

The cause of a change in target size.

const EventDetailsCauseAutorecovery EventDetailsCause = `AUTORECOVERY`
const EventDetailsCauseAutoscale EventDetailsCause = `AUTOSCALE`
const EventDetailsCauseReplaceBadNodes EventDetailsCause = `REPLACE_BAD_NODES`
const EventDetailsCauseUserRequest EventDetailsCause = `USER_REQUEST`

func (*EventDetailsCause) Set

func (f *EventDetailsCause) Set(v string) error

Set raw string value and validate it against allowed values

func (*EventDetailsCause) String

func (f *EventDetailsCause) String() string

String representation for fmt.Print

func (*EventDetailsCause) Type

func (f *EventDetailsCause) Type() string

Type always returns EventDetailsCause to satisfy [pflag.Value] interface

type EventType

type EventType string
const EventTypeAutoscalingStatsReport EventType = `AUTOSCALING_STATS_REPORT`
const EventTypeCreating EventType = `CREATING`
const EventTypeDbfsDown EventType = `DBFS_DOWN`
const EventTypeDidNotExpandDisk EventType = `DID_NOT_EXPAND_DISK`
const EventTypeDriverHealthy EventType = `DRIVER_HEALTHY`
const EventTypeDriverNotResponding EventType = `DRIVER_NOT_RESPONDING`
const EventTypeDriverUnavailable EventType = `DRIVER_UNAVAILABLE`
const EventTypeEdited EventType = `EDITED`
const EventTypeExpandedDisk EventType = `EXPANDED_DISK`
const EventTypeFailedToExpandDisk EventType = `FAILED_TO_EXPAND_DISK`
const EventTypeInitScriptsFinished EventType = `INIT_SCRIPTS_FINISHED`
const EventTypeInitScriptsStarted EventType = `INIT_SCRIPTS_STARTED`
const EventTypeMetastoreDown EventType = `METASTORE_DOWN`
const EventTypeNodeBlacklisted EventType = `NODE_BLACKLISTED`
const EventTypeNodeExcludedDecommissioned EventType = `NODE_EXCLUDED_DECOMMISSIONED`
const EventTypeNodesLost EventType = `NODES_LOST`
const EventTypePinned EventType = `PINNED`
const EventTypeResizing EventType = `RESIZING`
const EventTypeRestarting EventType = `RESTARTING`
const EventTypeRunning EventType = `RUNNING`
const EventTypeSparkException EventType = `SPARK_EXCEPTION`
const EventTypeStarting EventType = `STARTING`
const EventTypeTerminating EventType = `TERMINATING`
const EventTypeUnpinned EventType = `UNPINNED`
const EventTypeUpsizeCompleted EventType = `UPSIZE_COMPLETED`

func (*EventType) Set

func (f *EventType) Set(v string) error

Set raw string value and validate it against allowed values

func (*EventType) String

func (f *EventType) String() string

String representation for fmt.Print

func (*EventType) Type

func (f *EventType) Type() string

Type always returns EventType to satisfy [pflag.Value] interface

type GcpAttributes

type GcpAttributes struct {
	// This field determines whether the instance pool will contain preemptible
	// VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs
	// if the former is unavailable.
	Availability GcpAvailability `json:"availability,omitempty"`
	// boot disk size in GB
	BootDiskSize int `json:"boot_disk_size,omitempty"`
	// If provided, the cluster will impersonate the google service account when
	// accessing gcloud services (like GCS). The google service account must
	// have previously been added to the Databricks environment by an account
	// administrator.
	GoogleServiceAccount string `json:"google_service_account,omitempty"`
	// If provided, each node (workers and driver) in the cluster will have this
	// number of local SSDs attached. Each local SSD is 375GB in size. Refer to
	// [GCP documentation] for the supported number of local SSDs for each
	// instance type.
	//
	// [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds
	LocalSsdCount int `json:"local_ssd_count,omitempty"`
	// This field determines whether the spark executors will be scheduled to
	// run on preemptible VMs (when set to true) versus standard compute engine
	// VMs (when set to false; default). Note: Soon to be deprecated, use the
	// availability field instead.
	UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"`
	// Identifier for the availability zone in which the cluster resides. This
	// can be one of the following: - "HA" => High availability, spread nodes
	// across availability zones for a Databricks deployment region [default] -
	// "AUTO" => Databricks picks an availability zone to schedule the cluster
	// on. - A GCP availability zone => Pick One of the available zones for
	// (machine type + region) from
	// https://cloud.google.com/compute/docs/regions-zones.
	ZoneId string `json:"zone_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GcpAttributes) MarshalJSON added in v0.23.0

func (s GcpAttributes) MarshalJSON() ([]byte, error)

func (*GcpAttributes) UnmarshalJSON added in v0.23.0

func (s *GcpAttributes) UnmarshalJSON(b []byte) error

type GcpAvailability

type GcpAvailability string

This field determines whether the instance pool will contain preemptible VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable.

const GcpAvailabilityOnDemandGcp GcpAvailability = `ON_DEMAND_GCP`
const GcpAvailabilityPreemptibleGcp GcpAvailability = `PREEMPTIBLE_GCP`
const GcpAvailabilityPreemptibleWithFallbackGcp GcpAvailability = `PREEMPTIBLE_WITH_FALLBACK_GCP`

func (*GcpAvailability) Set

func (f *GcpAvailability) Set(v string) error

Set raw string value and validate it against allowed values

func (*GcpAvailability) String

func (f *GcpAvailability) String() string

String representation for fmt.Print

func (*GcpAvailability) Type

func (f *GcpAvailability) Type() string

Type always returns GcpAvailability to satisfy [pflag.Value] interface

type GcsStorageInfo added in v0.31.0

type GcsStorageInfo struct {
	// GCS destination/URI, e.g. `gs://my-bucket/some-prefix`
	Destination string `json:"destination"`
}

type GetClusterPermissionLevelsRequest added in v0.15.0

type GetClusterPermissionLevelsRequest struct {
	// The cluster for which to get or manage permissions.
	ClusterId string `json:"-" url:"-"`
}

Get cluster permission levels

type GetClusterPermissionLevelsResponse added in v0.15.0

type GetClusterPermissionLevelsResponse struct {
	// Specific permission levels
	PermissionLevels []ClusterPermissionsDescription `json:"permission_levels,omitempty"`
}

type GetClusterPermissionsRequest added in v0.15.0

type GetClusterPermissionsRequest struct {
	// The cluster for which to get or manage permissions.
	ClusterId string `json:"-" url:"-"`
}

Get cluster permissions

type GetClusterPolicyPermissionLevelsRequest added in v0.15.0

type GetClusterPolicyPermissionLevelsRequest struct {
	// The cluster policy for which to get or manage permissions.
	ClusterPolicyId string `json:"-" url:"-"`
}

Get cluster policy permission levels

type GetClusterPolicyPermissionLevelsResponse added in v0.15.0

type GetClusterPolicyPermissionLevelsResponse struct {
	// Specific permission levels
	PermissionLevels []ClusterPolicyPermissionsDescription `json:"permission_levels,omitempty"`
}

type GetClusterPolicyPermissionsRequest added in v0.15.0

type GetClusterPolicyPermissionsRequest struct {
	// The cluster policy for which to get or manage permissions.
	ClusterPolicyId string `json:"-" url:"-"`
}

Get cluster policy permissions

type GetClusterPolicyRequest

type GetClusterPolicyRequest struct {
	// Canonical unique identifier for the cluster policy.
	PolicyId string `json:"-" url:"policy_id"`
}

Get a cluster policy

type GetClusterRequest

type GetClusterRequest struct {
	// The cluster about which to retrieve information.
	ClusterId string `json:"-" url:"cluster_id"`
}

Get cluster info

type GetEvents

type GetEvents struct {
	// The ID of the cluster to retrieve events about.
	ClusterId string `json:"cluster_id"`
	// The end time in epoch milliseconds. If empty, returns events up to the
	// current time.
	EndTime int64 `json:"end_time,omitempty"`
	// An optional set of event types to filter on. If empty, all event types
	// are returned.
	EventTypes []EventType `json:"event_types,omitempty"`
	// The maximum number of events to include in a page of events. Defaults to
	// 50, and maximum allowed value is 500.
	Limit int64 `json:"limit,omitempty"`
	// The offset in the result set. Defaults to 0 (no offset). When an offset
	// is specified and the results are requested in descending order, the
	// end_time field is required.
	Offset int64 `json:"offset,omitempty"`
	// The order to list events in; either "ASC" or "DESC". Defaults to "DESC".
	Order GetEventsOrder `json:"order,omitempty"`
	// The start time in epoch milliseconds. If empty, returns events starting
	// from the beginning of time.
	StartTime int64 `json:"start_time,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetEvents) MarshalJSON added in v0.23.0

func (s GetEvents) MarshalJSON() ([]byte, error)

func (*GetEvents) UnmarshalJSON added in v0.23.0

func (s *GetEvents) UnmarshalJSON(b []byte) error

type GetEventsOrder

type GetEventsOrder string

The order to list events in; either "ASC" or "DESC". Defaults to "DESC".

const GetEventsOrderAsc GetEventsOrder = `ASC`
const GetEventsOrderDesc GetEventsOrder = `DESC`

func (*GetEventsOrder) Set

func (f *GetEventsOrder) Set(v string) error

Set raw string value and validate it against allowed values

func (*GetEventsOrder) String

func (f *GetEventsOrder) String() string

String representation for fmt.Print

func (*GetEventsOrder) Type

func (f *GetEventsOrder) Type() string

Type always returns GetEventsOrder to satisfy [pflag.Value] interface

type GetEventsResponse

type GetEventsResponse struct {
	// <content needs to be added>
	Events []ClusterEvent `json:"events,omitempty"`
	// The parameters required to retrieve the next page of events. Omitted if
	// there are no more events to read.
	NextPage *GetEvents `json:"next_page,omitempty"`
	// The total number of events filtered by the start_time, end_time, and
	// event_types.
	TotalCount int64 `json:"total_count,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetEventsResponse) MarshalJSON added in v0.23.0

func (s GetEventsResponse) MarshalJSON() ([]byte, error)

func (*GetEventsResponse) UnmarshalJSON added in v0.23.0

func (s *GetEventsResponse) UnmarshalJSON(b []byte) error

type GetGlobalInitScriptRequest

type GetGlobalInitScriptRequest struct {
	// The ID of the global init script.
	ScriptId string `json:"-" url:"-"`
}

Get an init script

type GetInstancePool

type GetInstancePool struct {
	// Attributes related to instance pools running on Amazon Web Services. If
	// not specified at pool creation, a set of default values will be used.
	AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to instance pools running on Azure. If not specified
	// at pool creation, a set of default values will be used.
	AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"`
	// Additional tags for pool resources. Databricks will tag all pool
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Tags that are added by Databricks regardless of any `custom_tags`,
	// including:
	//
	// - Vendor: Databricks
	//
	// - InstancePoolCreator: <user_id_of_creator>
	//
	// - InstancePoolName: <name_of_pool>
	//
	// - InstancePoolId: <id_of_pool>
	DefaultTags map[string]string `json:"default_tags,omitempty"`
	// Defines the specification of the disks that will be attached to all spark
	// containers.
	DiskSpec *DiskSpec `json:"disk_spec,omitempty"`
	// Autoscaling Local Storage: when enabled, this instances in this pool will
	// dynamically acquire additional disk space when its Spark workers are
	// running low on disk space. In AWS, this feature requires specific AWS
	// permissions to function correctly - refer to the User Guide for more
	// details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Attributes related to instance pools running on Google Cloud Platform. If
	// not specified at pool creation, a set of default values will be used.
	GcpAttributes *InstancePoolGcpAttributes `json:"gcp_attributes,omitempty"`
	// Automatically terminates the extra instances in the pool cache after they
	// are inactive for this time in minutes if min_idle_instances requirement
	// is already met. If not set, the extra pool instances will be
	// automatically terminated after a default timeout. If specified, the
	// threshold must be between 0 and 10000 minutes. Users can also set this
	// value to 0 to instantly remove idle instances from the cache if min cache
	// size could still hold.
	IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"`
	// Canonical unique identifier for the pool.
	InstancePoolId string `json:"instance_pool_id"`
	// Pool name requested by the user. Pool name must be unique. Length must be
	// between 1 and 100 characters.
	InstancePoolName string `json:"instance_pool_name,omitempty"`
	// Maximum number of outstanding instances to keep in the pool, including
	// both instances used by clusters and idle instances. Clusters that require
	// further instance provisioning will fail during upsize requests.
	MaxCapacity int `json:"max_capacity,omitempty"`
	// Minimum number of idle instances to keep in the instance pool
	MinIdleInstances int `json:"min_idle_instances,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id,omitempty"`
	// Custom Docker Image BYOC
	PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty"`
	// A list containing at most one preloaded Spark image version for the pool.
	// Pool-backed clusters started with the preloaded Spark version will start
	// faster. A list of available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"`
	// Current state of the instance pool.
	State InstancePoolState `json:"state,omitempty"`
	// Usage statistics about the instance pool.
	Stats *InstancePoolStats `json:"stats,omitempty"`
	// Status of failed pending instances in the pool.
	Status *InstancePoolStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetInstancePool) MarshalJSON added in v0.23.0

func (s GetInstancePool) MarshalJSON() ([]byte, error)

func (*GetInstancePool) UnmarshalJSON added in v0.23.0

func (s *GetInstancePool) UnmarshalJSON(b []byte) error

type GetInstancePoolPermissionLevelsRequest added in v0.15.0

type GetInstancePoolPermissionLevelsRequest struct {
	// The instance pool for which to get or manage permissions.
	InstancePoolId string `json:"-" url:"-"`
}

Get instance pool permission levels

type GetInstancePoolPermissionLevelsResponse added in v0.15.0

type GetInstancePoolPermissionLevelsResponse struct {
	// Specific permission levels
	PermissionLevels []InstancePoolPermissionsDescription `json:"permission_levels,omitempty"`
}

type GetInstancePoolPermissionsRequest added in v0.15.0

type GetInstancePoolPermissionsRequest struct {
	// The instance pool for which to get or manage permissions.
	InstancePoolId string `json:"-" url:"-"`
}

Get instance pool permissions

type GetInstancePoolRequest

type GetInstancePoolRequest struct {
	// The canonical unique identifier for the instance pool.
	InstancePoolId string `json:"-" url:"instance_pool_id"`
}

Get instance pool information

type GetPolicyFamilyRequest

type GetPolicyFamilyRequest struct {
	PolicyFamilyId string `json:"-" url:"-"`
}

Get policy family information

type GetSparkVersionsResponse

type GetSparkVersionsResponse struct {
	// All the available Spark versions.
	Versions []SparkVersion `json:"versions,omitempty"`
}

func (GetSparkVersionsResponse) Select

LatestSparkVersion returns latest version matching the request parameters

type GlobalInitScriptCreateRequest

type GlobalInitScriptCreateRequest struct {
	// Specifies whether the script is enabled. The script runs only if enabled.
	Enabled bool `json:"enabled,omitempty"`
	// The name of the script
	Name string `json:"name"`
	// The position of a global init script, where 0 represents the first script
	// to run, 1 is the second script to run, in ascending order.
	//
	// If you omit the numeric position for a new global init script, it
	// defaults to last position. It will run after all current scripts. Setting
	// any value greater than the position of the last script is equivalent to
	// the last position. Example: Take three existing scripts with positions 0,
	// 1, and 2. Any position of (3) or greater puts the script in the last
	// position. If an explicit position value conflicts with an existing script
	// value, your request succeeds, but the original script at that position
	// and all later scripts have their positions incremented by 1.
	Position int `json:"position,omitempty"`
	// The Base64-encoded content of the script.
	Script string `json:"script"`

	ForceSendFields []string `json:"-"`
}

func (GlobalInitScriptCreateRequest) MarshalJSON added in v0.23.0

func (s GlobalInitScriptCreateRequest) MarshalJSON() ([]byte, error)

func (*GlobalInitScriptCreateRequest) UnmarshalJSON added in v0.23.0

func (s *GlobalInitScriptCreateRequest) UnmarshalJSON(b []byte) error

type GlobalInitScriptDetails

type GlobalInitScriptDetails struct {
	// Time when the script was created, represented as a Unix timestamp in
	// milliseconds.
	CreatedAt int `json:"created_at,omitempty"`
	// The username of the user who created the script.
	CreatedBy string `json:"created_by,omitempty"`
	// Specifies whether the script is enabled. The script runs only if enabled.
	Enabled bool `json:"enabled,omitempty"`
	// The name of the script
	Name string `json:"name,omitempty"`
	// The position of a script, where 0 represents the first script to run, 1
	// is the second script to run, in ascending order.
	Position int `json:"position,omitempty"`
	// The global init script ID.
	ScriptId string `json:"script_id,omitempty"`
	// Time when the script was updated, represented as a Unix timestamp in
	// milliseconds.
	UpdatedAt int `json:"updated_at,omitempty"`
	// The username of the user who last updated the script
	UpdatedBy string `json:"updated_by,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GlobalInitScriptDetails) MarshalJSON added in v0.23.0

func (s GlobalInitScriptDetails) MarshalJSON() ([]byte, error)

func (*GlobalInitScriptDetails) UnmarshalJSON added in v0.23.0

func (s *GlobalInitScriptDetails) UnmarshalJSON(b []byte) error

type GlobalInitScriptDetailsWithContent

type GlobalInitScriptDetailsWithContent struct {
	// Time when the script was created, represented as a Unix timestamp in
	// milliseconds.
	CreatedAt int `json:"created_at,omitempty"`
	// The username of the user who created the script.
	CreatedBy string `json:"created_by,omitempty"`
	// Specifies whether the script is enabled. The script runs only if enabled.
	Enabled bool `json:"enabled,omitempty"`
	// The name of the script
	Name string `json:"name,omitempty"`
	// The position of a script, where 0 represents the first script to run, 1
	// is the second script to run, in ascending order.
	Position int `json:"position,omitempty"`
	// The Base64-encoded content of the script.
	Script string `json:"script,omitempty"`
	// The global init script ID.
	ScriptId string `json:"script_id,omitempty"`
	// Time when the script was updated, represented as a Unix timestamp in
	// milliseconds.
	UpdatedAt int `json:"updated_at,omitempty"`
	// The username of the user who last updated the script
	UpdatedBy string `json:"updated_by,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GlobalInitScriptDetailsWithContent) MarshalJSON added in v0.23.0

func (s GlobalInitScriptDetailsWithContent) MarshalJSON() ([]byte, error)

func (*GlobalInitScriptDetailsWithContent) UnmarshalJSON added in v0.23.0

func (s *GlobalInitScriptDetailsWithContent) UnmarshalJSON(b []byte) error

type GlobalInitScriptUpdateRequest

type GlobalInitScriptUpdateRequest struct {
	// Specifies whether the script is enabled. The script runs only if enabled.
	Enabled bool `json:"enabled,omitempty"`
	// The name of the script
	Name string `json:"name"`
	// The position of a script, where 0 represents the first script to run, 1
	// is the second script to run, in ascending order. To move the script to
	// run first, set its position to 0.
	//
	// To move the script to the end, set its position to any value greater or
	// equal to the position of the last script. Example, three existing scripts
	// with positions 0, 1, and 2. Any position value of 2 or greater puts the
	// script in the last position (2).
	//
	// If an explicit position value conflicts with an existing script, your
	// request succeeds, but the original script at that position and all later
	// scripts have their positions incremented by 1.
	Position int `json:"position,omitempty"`
	// The Base64-encoded content of the script.
	Script string `json:"script"`
	// The ID of the global init script.
	ScriptId string `json:"-" url:"-"`

	ForceSendFields []string `json:"-"`
}

func (GlobalInitScriptUpdateRequest) MarshalJSON added in v0.23.0

func (s GlobalInitScriptUpdateRequest) MarshalJSON() ([]byte, error)

func (*GlobalInitScriptUpdateRequest) UnmarshalJSON added in v0.23.0

func (s *GlobalInitScriptUpdateRequest) UnmarshalJSON(b []byte) error

type GlobalInitScriptsAPI

type GlobalInitScriptsAPI struct {
	// contains filtered or unexported fields
}

The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace. These scripts run on every node in every cluster in the workspace.

**Important:** Existing clusters must be restarted to pick up any changes made to global init scripts. Global init scripts are run in order. If the init script returns with a bad exit code, the Apache Spark container fails to launch and init scripts with later position are skipped. If enough containers fail, the entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` error code.

func NewGlobalInitScripts

func NewGlobalInitScripts(client *client.DatabricksClient) *GlobalInitScriptsAPI

func (*GlobalInitScriptsAPI) Create

Create init script.

Creates a new global init script in this workspace.

Example (GlobalInitScripts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.GlobalInitScripts.Create(ctx, compute.GlobalInitScriptCreateRequest{
	Name:     fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Script:   base64.StdEncoding.EncodeToString([]byte(("echo 1"))),
	Enabled:  true,
	Position: 10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.GlobalInitScripts.DeleteByScriptId(ctx, created.ScriptId)
if err != nil {
	panic(err)
}
Output:

func (*GlobalInitScriptsAPI) Delete

Delete init script.

Deletes a global init script.

func (*GlobalInitScriptsAPI) DeleteByScriptId

func (a *GlobalInitScriptsAPI) DeleteByScriptId(ctx context.Context, scriptId string) error

Delete init script.

Deletes a global init script.

func (*GlobalInitScriptsAPI) Get

Get an init script.

Gets all the details of a script, including its Base64-encoded contents.

Example (GlobalInitScripts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.GlobalInitScripts.Create(ctx, compute.GlobalInitScriptCreateRequest{
	Name:     fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Script:   base64.StdEncoding.EncodeToString([]byte(("echo 1"))),
	Enabled:  true,
	Position: 10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := w.GlobalInitScripts.GetByScriptId(ctx, created.ScriptId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.GlobalInitScripts.DeleteByScriptId(ctx, created.ScriptId)
if err != nil {
	panic(err)
}
Output:

func (*GlobalInitScriptsAPI) GetByName

GetByName calls GlobalInitScriptsAPI.GlobalInitScriptDetailsNameToScriptIdMap and returns a single GlobalInitScriptDetails.

Returns an error if there's more than one GlobalInitScriptDetails with the same .Name.

Note: All GlobalInitScriptDetails instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*GlobalInitScriptsAPI) GetByScriptId

Get an init script.

Gets all the details of a script, including its Base64-encoded contents.

func (*GlobalInitScriptsAPI) GlobalInitScriptDetailsNameToScriptIdMap

func (a *GlobalInitScriptsAPI) GlobalInitScriptDetailsNameToScriptIdMap(ctx context.Context) (map[string]string, error)

GlobalInitScriptDetailsNameToScriptIdMap calls GlobalInitScriptsAPI.ListAll and creates a map of results with GlobalInitScriptDetails.Name as key and GlobalInitScriptDetails.ScriptId as value.

Returns an error if there's more than one GlobalInitScriptDetails with the same .Name.

Note: All GlobalInitScriptDetails instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*GlobalInitScriptsAPI) Impl

Impl returns low-level GlobalInitScripts API implementation Deprecated: use MockGlobalInitScriptsInterface instead.

func (*GlobalInitScriptsAPI) List added in v0.24.0

Get init scripts.

Get a list of all global init scripts for this workspace. This returns all properties for each script but **not** the script contents. To retrieve the contents of a script, use the [get a global init script](:method:globalinitscripts/get) operation.

This method is generated by Databricks SDK Code Generator.

func (*GlobalInitScriptsAPI) ListAll

Get init scripts.

Get a list of all global init scripts for this workspace. This returns all properties for each script but **not** the script contents. To retrieve the contents of a script, use the [get a global init script](:method:globalinitscripts/get) operation.

This method is generated by Databricks SDK Code Generator.

Example (GlobalInitScripts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.GlobalInitScripts.ListAll(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*GlobalInitScriptsAPI) Update

Update init script.

Updates a global init script, specifying only the fields to change. All fields are optional. Unspecified fields retain their current value.

Example (GlobalInitScripts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.GlobalInitScripts.Create(ctx, compute.GlobalInitScriptCreateRequest{
	Name:     fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Script:   base64.StdEncoding.EncodeToString([]byte(("echo 1"))),
	Enabled:  true,
	Position: 10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.GlobalInitScripts.Update(ctx, compute.GlobalInitScriptUpdateRequest{
	ScriptId: created.ScriptId,
	Name:     fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Script:   base64.StdEncoding.EncodeToString([]byte(("echo 2"))),
})
if err != nil {
	panic(err)
}

// cleanup

err = w.GlobalInitScripts.DeleteByScriptId(ctx, created.ScriptId)
if err != nil {
	panic(err)
}
Output:

func (*GlobalInitScriptsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockGlobalInitScriptsInterface instead.

type GlobalInitScriptsInterface added in v0.29.0

type GlobalInitScriptsInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockGlobalInitScriptsInterface instead.
	WithImpl(impl GlobalInitScriptsService) GlobalInitScriptsInterface

	// Impl returns low-level GlobalInitScripts API implementation
	// Deprecated: use MockGlobalInitScriptsInterface instead.
	Impl() GlobalInitScriptsService

	// Create init script.
	//
	// Creates a new global init script in this workspace.
	Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error)

	// Delete init script.
	//
	// Deletes a global init script.
	Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error

	// Delete init script.
	//
	// Deletes a global init script.
	DeleteByScriptId(ctx context.Context, scriptId string) error

	// Get an init script.
	//
	// Gets all the details of a script, including its Base64-encoded contents.
	Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error)

	// Get an init script.
	//
	// Gets all the details of a script, including its Base64-encoded contents.
	GetByScriptId(ctx context.Context, scriptId string) (*GlobalInitScriptDetailsWithContent, error)

	// Get init scripts.
	//
	// Get a list of all global init scripts for this workspace. This returns all
	// properties for each script but **not** the script contents. To retrieve the
	// contents of a script, use the [get a global init
	// script](:method:globalinitscripts/get) operation.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context) listing.Iterator[GlobalInitScriptDetails]

	// Get init scripts.
	//
	// Get a list of all global init scripts for this workspace. This returns all
	// properties for each script but **not** the script contents. To retrieve the
	// contents of a script, use the [get a global init
	// script](:method:globalinitscripts/get) operation.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error)

	// GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value.
	//
	// Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name.
	//
	// Note: All [GlobalInitScriptDetails] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	GlobalInitScriptDetailsNameToScriptIdMap(ctx context.Context) (map[string]string, error)

	// GetByName calls [GlobalInitScriptsAPI.GlobalInitScriptDetailsNameToScriptIdMap] and returns a single [GlobalInitScriptDetails].
	//
	// Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name.
	//
	// Note: All [GlobalInitScriptDetails] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByName(ctx context.Context, name string) (*GlobalInitScriptDetails, error)

	// Update init script.
	//
	// Updates a global init script, specifying only the fields to change. All
	// fields are optional. Unspecified fields retain their current value.
	Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error
}

type GlobalInitScriptsService

type GlobalInitScriptsService interface {

	// Create init script.
	//
	// Creates a new global init script in this workspace.
	Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error)

	// Delete init script.
	//
	// Deletes a global init script.
	Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error

	// Get an init script.
	//
	// Gets all the details of a script, including its Base64-encoded contents.
	Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error)

	// Get init scripts.
	//
	// Get a list of all global init scripts for this workspace. This returns
	// all properties for each script but **not** the script contents. To
	// retrieve the contents of a script, use the [get a global init
	// script](:method:globalinitscripts/get) operation.
	//
	// Use ListAll() to get all GlobalInitScriptDetails instances
	List(ctx context.Context) (*ListGlobalInitScriptsResponse, error)

	// Update init script.
	//
	// Updates a global init script, specifying only the fields to change. All
	// fields are optional. Unspecified fields retain their current value.
	Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error
}

The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace. These scripts run on every node in every cluster in the workspace.

**Important:** Existing clusters must be restarted to pick up any changes made to global init scripts. Global init scripts are run in order. If the init script returns with a bad exit code, the Apache Spark container fails to launch and init scripts with later position are skipped. If enough containers fail, the entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` error code.

type InitScriptEventDetails added in v0.25.0

type InitScriptEventDetails struct {
	// The cluster scoped init scripts associated with this cluster event
	Cluster []InitScriptInfoAndExecutionDetails `json:"cluster,omitempty"`
	// The global init scripts associated with this cluster event
	Global []InitScriptInfoAndExecutionDetails `json:"global,omitempty"`
	// The private ip address of the node where the init scripts were run.
	ReportedForNode string `json:"reported_for_node,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InitScriptEventDetails) MarshalJSON added in v0.25.0

func (s InitScriptEventDetails) MarshalJSON() ([]byte, error)

func (*InitScriptEventDetails) UnmarshalJSON added in v0.25.0

func (s *InitScriptEventDetails) UnmarshalJSON(b []byte) error

type InitScriptExecutionDetails added in v0.25.0

type InitScriptExecutionDetails struct {
	// Addition details regarding errors.
	ErrorMessage string `json:"error_message,omitempty"`
	// The duration of the script execution in seconds.
	ExecutionDurationSeconds int `json:"execution_duration_seconds,omitempty"`
	// The current status of the script
	Status InitScriptExecutionDetailsStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InitScriptExecutionDetails) MarshalJSON added in v0.25.0

func (s InitScriptExecutionDetails) MarshalJSON() ([]byte, error)

func (*InitScriptExecutionDetails) UnmarshalJSON added in v0.25.0

func (s *InitScriptExecutionDetails) UnmarshalJSON(b []byte) error

type InitScriptExecutionDetailsStatus added in v0.25.0

type InitScriptExecutionDetailsStatus string

The current status of the script

const InitScriptExecutionDetailsStatusFailedExecution InitScriptExecutionDetailsStatus = `FAILED_EXECUTION`
const InitScriptExecutionDetailsStatusFailedFetch InitScriptExecutionDetailsStatus = `FAILED_FETCH`
const InitScriptExecutionDetailsStatusNotExecuted InitScriptExecutionDetailsStatus = `NOT_EXECUTED`
const InitScriptExecutionDetailsStatusSkipped InitScriptExecutionDetailsStatus = `SKIPPED`
const InitScriptExecutionDetailsStatusSucceeded InitScriptExecutionDetailsStatus = `SUCCEEDED`
const InitScriptExecutionDetailsStatusUnknown InitScriptExecutionDetailsStatus = `UNKNOWN`

func (*InitScriptExecutionDetailsStatus) Set added in v0.25.0

Set raw string value and validate it against allowed values

func (*InitScriptExecutionDetailsStatus) String added in v0.25.0

String representation for fmt.Print

func (*InitScriptExecutionDetailsStatus) Type added in v0.25.0

Type always returns InitScriptExecutionDetailsStatus to satisfy [pflag.Value] interface

type InitScriptInfo added in v0.9.0

type InitScriptInfo struct {
	// destination needs to be provided. e.g. `{ "abfss" : { "destination" :
	// "abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>"
	// } }
	Abfss *Adlsgen2Info `json:"abfss,omitempty"`
	// destination needs to be provided. e.g. `{ "dbfs" : { "destination" :
	// "dbfs:/home/cluster_log" } }`
	Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"`
	// destination needs to be provided. e.g. `{ "file" : { "destination" :
	// "file:/my/local/file.sh" } }`
	File *LocalFileInfo `json:"file,omitempty"`
	// destination needs to be provided. e.g. `{ "gcs": { "destination":
	// "gs://my-bucket/file.sh" } }`
	Gcs *GcsStorageInfo `json:"gcs,omitempty"`
	// destination and either the region or endpoint need to be provided. e.g.
	// `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" :
	// "us-west-2" } }` Cluster iam role is used to access s3, please make sure
	// the cluster iam role in `instance_profile_arn` has permission to write
	// data to the s3 destination.
	S3 *S3StorageInfo `json:"s3,omitempty"`
	// destination needs to be provided. e.g. `{ "volumes" : { "destination" :
	// "/Volumes/my-init.sh" } }`
	Volumes *VolumesStorageInfo `json:"volumes,omitempty"`
	// destination needs to be provided. e.g. `{ "workspace" : { "destination" :
	// "/Users/user1@databricks.com/my-init.sh" } }`
	Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"`
}

type InitScriptInfoAndExecutionDetails added in v0.25.0

type InitScriptInfoAndExecutionDetails struct {
	// Details about the script
	ExecutionDetails *InitScriptExecutionDetails `json:"execution_details,omitempty"`
	// The script
	Script *InitScriptInfo `json:"script,omitempty"`
}

type InstallLibraries

type InstallLibraries struct {
	// Unique identifier for the cluster on which to install these libraries.
	ClusterId string `json:"cluster_id"`
	// The libraries to install.
	Libraries []Library `json:"libraries"`
}

func (*InstallLibraries) Sort

func (cll *InstallLibraries) Sort()

type InstallLibrariesResponse added in v0.34.0

type InstallLibrariesResponse struct {
}

type InstancePoolAccessControlRequest added in v0.15.0

type InstancePoolAccessControlRequest struct {
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Permission level
	PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"`
	// application ID of a service principal
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolAccessControlRequest) MarshalJSON added in v0.23.0

func (s InstancePoolAccessControlRequest) MarshalJSON() ([]byte, error)

func (*InstancePoolAccessControlRequest) UnmarshalJSON added in v0.23.0

func (s *InstancePoolAccessControlRequest) UnmarshalJSON(b []byte) error

type InstancePoolAccessControlResponse added in v0.15.0

type InstancePoolAccessControlResponse struct {
	// All permissions.
	AllPermissions []InstancePoolPermission `json:"all_permissions,omitempty"`
	// Display name of the user or service principal.
	DisplayName string `json:"display_name,omitempty"`
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Name of the service principal.
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolAccessControlResponse) MarshalJSON added in v0.23.0

func (s InstancePoolAccessControlResponse) MarshalJSON() ([]byte, error)

func (*InstancePoolAccessControlResponse) UnmarshalJSON added in v0.23.0

func (s *InstancePoolAccessControlResponse) UnmarshalJSON(b []byte) error

type InstancePoolAndStats

type InstancePoolAndStats struct {
	// Attributes related to instance pools running on Amazon Web Services. If
	// not specified at pool creation, a set of default values will be used.
	AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"`
	// Attributes related to instance pools running on Azure. If not specified
	// at pool creation, a set of default values will be used.
	AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"`
	// Additional tags for pool resources. Databricks will tag all pool
	// resources (e.g., AWS instances and EBS volumes) with these tags in
	// addition to `default_tags`. Notes:
	//
	// - Currently, Databricks allows at most 45 custom tags
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// Tags that are added by Databricks regardless of any `custom_tags`,
	// including:
	//
	// - Vendor: Databricks
	//
	// - InstancePoolCreator: <user_id_of_creator>
	//
	// - InstancePoolName: <name_of_pool>
	//
	// - InstancePoolId: <id_of_pool>
	DefaultTags map[string]string `json:"default_tags,omitempty"`
	// Defines the specification of the disks that will be attached to all spark
	// containers.
	DiskSpec *DiskSpec `json:"disk_spec,omitempty"`
	// Autoscaling Local Storage: when enabled, this instances in this pool will
	// dynamically acquire additional disk space when its Spark workers are
	// running low on disk space. In AWS, this feature requires specific AWS
	// permissions to function correctly - refer to the User Guide for more
	// details.
	EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"`
	// Attributes related to instance pools running on Google Cloud Platform. If
	// not specified at pool creation, a set of default values will be used.
	GcpAttributes *InstancePoolGcpAttributes `json:"gcp_attributes,omitempty"`
	// Automatically terminates the extra instances in the pool cache after they
	// are inactive for this time in minutes if min_idle_instances requirement
	// is already met. If not set, the extra pool instances will be
	// automatically terminated after a default timeout. If specified, the
	// threshold must be between 0 and 10000 minutes. Users can also set this
	// value to 0 to instantly remove idle instances from the cache if min cache
	// size could still hold.
	IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"`
	// Canonical unique identifier for the pool.
	InstancePoolId string `json:"instance_pool_id,omitempty"`
	// Pool name requested by the user. Pool name must be unique. Length must be
	// between 1 and 100 characters.
	InstancePoolName string `json:"instance_pool_name,omitempty"`
	// Maximum number of outstanding instances to keep in the pool, including
	// both instances used by clusters and idle instances. Clusters that require
	// further instance provisioning will fail during upsize requests.
	MaxCapacity int `json:"max_capacity,omitempty"`
	// Minimum number of idle instances to keep in the instance pool
	MinIdleInstances int `json:"min_idle_instances,omitempty"`
	// This field encodes, through a single value, the resources available to
	// each of the Spark nodes in this cluster. For example, the Spark nodes can
	// be provisioned and optimized for memory or compute intensive workloads. A
	// list of available node types can be retrieved by using the
	// :method:clusters/listNodeTypes API call.
	NodeTypeId string `json:"node_type_id,omitempty"`
	// Custom Docker Image BYOC
	PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty"`
	// A list containing at most one preloaded Spark image version for the pool.
	// Pool-backed clusters started with the preloaded Spark version will start
	// faster. A list of available Spark versions can be retrieved by using the
	// :method:clusters/sparkVersions API call.
	PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"`
	// Current state of the instance pool.
	State InstancePoolState `json:"state,omitempty"`
	// Usage statistics about the instance pool.
	Stats *InstancePoolStats `json:"stats,omitempty"`
	// Status of failed pending instances in the pool.
	Status *InstancePoolStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolAndStats) MarshalJSON added in v0.23.0

func (s InstancePoolAndStats) MarshalJSON() ([]byte, error)

func (*InstancePoolAndStats) UnmarshalJSON added in v0.23.0

func (s *InstancePoolAndStats) UnmarshalJSON(b []byte) error

type InstancePoolAwsAttributes

type InstancePoolAwsAttributes struct {
	// Availability type used for the spot nodes.
	//
	// The default value is defined by
	// InstancePoolConf.instancePoolDefaultAwsAvailability
	Availability InstancePoolAwsAttributesAvailability `json:"availability,omitempty"`
	// Calculates the bid price for AWS spot instances, as a percentage of the
	// corresponding instance type's on-demand price. For example, if this field
	// is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then
	// the bid price is half of the price of on-demand `r3.xlarge` instances.
	// Similarly, if this field is set to 200, the bid price is twice the price
	// of on-demand `r3.xlarge` instances. If not specified, the default value
	// is 100. When spot instances are requested for this cluster, only spot
	// instances whose bid price percentage matches this field will be
	// considered. Note that, for safety, we enforce this field to be no more
	// than 10000.
	//
	// The default value and documentation here should be kept consistent with
	// CommonConf.defaultSpotBidPricePercent and
	// CommonConf.maxSpotBidPricePercent.
	SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"`
	// Identifier for the availability zone/datacenter in which the cluster
	// resides. This string will be of a form like "us-west-2a". The provided
	// availability zone must be in the same region as the Databricks
	// deployment. For example, "us-west-2a" is not a valid zone id if the
	// Databricks deployment resides in the "us-east-1" region. This is an
	// optional field at cluster creation, and if not specified, a default zone
	// will be used. The list of available zones as well as the default value
	// can be found by using the `List Zones` method.
	ZoneId string `json:"zone_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolAwsAttributes) MarshalJSON added in v0.23.0

func (s InstancePoolAwsAttributes) MarshalJSON() ([]byte, error)

func (*InstancePoolAwsAttributes) UnmarshalJSON added in v0.23.0

func (s *InstancePoolAwsAttributes) UnmarshalJSON(b []byte) error

type InstancePoolAwsAttributesAvailability

type InstancePoolAwsAttributesAvailability string

Availability type used for the spot nodes.

The default value is defined by InstancePoolConf.instancePoolDefaultAwsAvailability

const InstancePoolAwsAttributesAvailabilityOnDemand InstancePoolAwsAttributesAvailability = `ON_DEMAND`
const InstancePoolAwsAttributesAvailabilitySpot InstancePoolAwsAttributesAvailability = `SPOT`

func (*InstancePoolAwsAttributesAvailability) Set

Set raw string value and validate it against allowed values

func (*InstancePoolAwsAttributesAvailability) String

String representation for fmt.Print

func (*InstancePoolAwsAttributesAvailability) Type

Type always returns InstancePoolAwsAttributesAvailability to satisfy [pflag.Value] interface

type InstancePoolAzureAttributes

type InstancePoolAzureAttributes struct {
	// Shows the Availability type used for the spot nodes.
	//
	// The default value is defined by
	// InstancePoolConf.instancePoolDefaultAzureAvailability
	Availability InstancePoolAzureAttributesAvailability `json:"availability,omitempty"`
	// The default value and documentation here should be kept consistent with
	// CommonConf.defaultSpotBidMaxPrice.
	SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolAzureAttributes) MarshalJSON added in v0.23.0

func (s InstancePoolAzureAttributes) MarshalJSON() ([]byte, error)

func (*InstancePoolAzureAttributes) UnmarshalJSON added in v0.23.0

func (s *InstancePoolAzureAttributes) UnmarshalJSON(b []byte) error

type InstancePoolAzureAttributesAvailability

type InstancePoolAzureAttributesAvailability string

Shows the Availability type used for the spot nodes.

The default value is defined by InstancePoolConf.instancePoolDefaultAzureAvailability

const InstancePoolAzureAttributesAvailabilityOnDemandAzure InstancePoolAzureAttributesAvailability = `ON_DEMAND_AZURE`
const InstancePoolAzureAttributesAvailabilitySpotAzure InstancePoolAzureAttributesAvailability = `SPOT_AZURE`

func (*InstancePoolAzureAttributesAvailability) Set

Set raw string value and validate it against allowed values

func (*InstancePoolAzureAttributesAvailability) String

String representation for fmt.Print

func (*InstancePoolAzureAttributesAvailability) Type

Type always returns InstancePoolAzureAttributesAvailability to satisfy [pflag.Value] interface

type InstancePoolGcpAttributes added in v0.10.0

type InstancePoolGcpAttributes struct {
	// This field determines whether the instance pool will contain preemptible
	// VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs
	// if the former is unavailable.
	GcpAvailability GcpAvailability `json:"gcp_availability,omitempty"`
	// If provided, each node in the instance pool will have this number of
	// local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP
	// documentation] for the supported number of local SSDs for each instance
	// type.
	//
	// [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds
	LocalSsdCount int `json:"local_ssd_count,omitempty"`
	// Identifier for the availability zone/datacenter in which the cluster
	// resides. This string will be of a form like "us-west1-a". The provided
	// availability zone must be in the same region as the Databricks workspace.
	// For example, "us-west1-a" is not a valid zone id if the Databricks
	// workspace resides in the "us-east1" region. This is an optional field at
	// instance pool creation, and if not specified, a default zone will be
	// used.
	//
	// This field can be one of the following: - "HA" => High availability,
	// spread nodes across availability zones for a Databricks deployment region
	// - A GCP availability zone => Pick One of the available zones for (machine
	// type + region) from https://cloud.google.com/compute/docs/regions-zones
	// (e.g. "us-west1-a").
	//
	// If empty, Databricks picks an availability zone to schedule the cluster
	// on.
	ZoneId string `json:"zone_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolGcpAttributes) MarshalJSON added in v0.23.0

func (s InstancePoolGcpAttributes) MarshalJSON() ([]byte, error)

func (*InstancePoolGcpAttributes) UnmarshalJSON added in v0.23.0

func (s *InstancePoolGcpAttributes) UnmarshalJSON(b []byte) error

type InstancePoolPermission added in v0.15.0

type InstancePoolPermission struct {
	Inherited bool `json:"inherited,omitempty"`

	InheritedFromObject []string `json:"inherited_from_object,omitempty"`
	// Permission level
	PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolPermission) MarshalJSON added in v0.23.0

func (s InstancePoolPermission) MarshalJSON() ([]byte, error)

func (*InstancePoolPermission) UnmarshalJSON added in v0.23.0

func (s *InstancePoolPermission) UnmarshalJSON(b []byte) error

type InstancePoolPermissionLevel added in v0.15.0

type InstancePoolPermissionLevel string

Permission level

const InstancePoolPermissionLevelCanAttachTo InstancePoolPermissionLevel = `CAN_ATTACH_TO`
const InstancePoolPermissionLevelCanManage InstancePoolPermissionLevel = `CAN_MANAGE`

func (*InstancePoolPermissionLevel) Set added in v0.15.0

Set raw string value and validate it against allowed values

func (*InstancePoolPermissionLevel) String added in v0.15.0

func (f *InstancePoolPermissionLevel) String() string

String representation for fmt.Print

func (*InstancePoolPermissionLevel) Type added in v0.15.0

Type always returns InstancePoolPermissionLevel to satisfy [pflag.Value] interface

type InstancePoolPermissions added in v0.15.0

type InstancePoolPermissions struct {
	AccessControlList []InstancePoolAccessControlResponse `json:"access_control_list,omitempty"`

	ObjectId string `json:"object_id,omitempty"`

	ObjectType string `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolPermissions) MarshalJSON added in v0.23.0

func (s InstancePoolPermissions) MarshalJSON() ([]byte, error)

func (*InstancePoolPermissions) UnmarshalJSON added in v0.23.0

func (s *InstancePoolPermissions) UnmarshalJSON(b []byte) error

type InstancePoolPermissionsDescription added in v0.15.0

type InstancePoolPermissionsDescription struct {
	Description string `json:"description,omitempty"`
	// Permission level
	PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolPermissionsDescription) MarshalJSON added in v0.23.0

func (s InstancePoolPermissionsDescription) MarshalJSON() ([]byte, error)

func (*InstancePoolPermissionsDescription) UnmarshalJSON added in v0.23.0

func (s *InstancePoolPermissionsDescription) UnmarshalJSON(b []byte) error

type InstancePoolPermissionsRequest added in v0.15.0

type InstancePoolPermissionsRequest struct {
	AccessControlList []InstancePoolAccessControlRequest `json:"access_control_list,omitempty"`
	// The instance pool for which to get or manage permissions.
	InstancePoolId string `json:"-" url:"-"`
}

type InstancePoolState

type InstancePoolState string

Current state of the instance pool.

const InstancePoolStateActive InstancePoolState = `ACTIVE`
const InstancePoolStateDeleted InstancePoolState = `DELETED`
const InstancePoolStateStopped InstancePoolState = `STOPPED`

func (*InstancePoolState) Set

func (f *InstancePoolState) Set(v string) error

Set raw string value and validate it against allowed values

func (*InstancePoolState) String

func (f *InstancePoolState) String() string

String representation for fmt.Print

func (*InstancePoolState) Type

func (f *InstancePoolState) Type() string

Type always returns InstancePoolState to satisfy [pflag.Value] interface

type InstancePoolStats

type InstancePoolStats struct {
	// Number of active instances in the pool that are NOT part of a cluster.
	IdleCount int `json:"idle_count,omitempty"`
	// Number of pending instances in the pool that are NOT part of a cluster.
	PendingIdleCount int `json:"pending_idle_count,omitempty"`
	// Number of pending instances in the pool that are part of a cluster.
	PendingUsedCount int `json:"pending_used_count,omitempty"`
	// Number of active instances in the pool that are part of a cluster.
	UsedCount int `json:"used_count,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstancePoolStats) MarshalJSON added in v0.23.0

func (s InstancePoolStats) MarshalJSON() ([]byte, error)

func (*InstancePoolStats) UnmarshalJSON added in v0.23.0

func (s *InstancePoolStats) UnmarshalJSON(b []byte) error

type InstancePoolStatus

type InstancePoolStatus struct {
	// List of error messages for the failed pending instances. The
	// pending_instance_errors follows FIFO with maximum length of the min_idle
	// of the pool. The pending_instance_errors is emptied once the number of
	// exiting available instances reaches the min_idle of the pool.
	PendingInstanceErrors []PendingInstanceError `json:"pending_instance_errors,omitempty"`
}

type InstancePoolsAPI

type InstancePoolsAPI struct {
	// contains filtered or unexported fields
}

Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times.

Databricks pools reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. When a cluster is attached to a pool, cluster nodes are created using the pool’s idle instances. If the pool has no idle instances, the pool expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a cluster releases an instance, it returns to the pool and is free for another cluster to use. Only clusters attached to a pool can use that pool’s idle instances.

You can specify a different pool for the driver node and worker nodes, or use the same pool for both.

Databricks does not charge DBUs while instances are idle in the pool. Instance provider billing does apply. See pricing.

func NewInstancePools

func NewInstancePools(client *client.DatabricksClient) *InstancePoolsAPI

func (*InstancePoolsAPI) Create

Create a new instance pool.

Creates a new instance pool using idle and ready-to-use cloud instances.

Example (InstancePools)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

smallest, err := w.Clusters.SelectNodeType(ctx, compute.NodeTypeRequest{
	LocalDisk: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", smallest)

created, err := w.InstancePools.Create(ctx, compute.CreateInstancePool{
	InstancePoolName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	NodeTypeId:       smallest,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.InstancePools.DeleteByInstancePoolId(ctx, created.InstancePoolId)
if err != nil {
	panic(err)
}
Output:

func (*InstancePoolsAPI) Delete

func (a *InstancePoolsAPI) Delete(ctx context.Context, request DeleteInstancePool) error

Delete an instance pool.

Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously.

func (*InstancePoolsAPI) DeleteByInstancePoolId

func (a *InstancePoolsAPI) DeleteByInstancePoolId(ctx context.Context, instancePoolId string) error

Delete an instance pool.

Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously.

func (*InstancePoolsAPI) Edit

func (a *InstancePoolsAPI) Edit(ctx context.Context, request EditInstancePool) error

Edit an existing instance pool.

Modifies the configuration of an existing instance pool.

Example (InstancePools)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

smallest, err := w.Clusters.SelectNodeType(ctx, compute.NodeTypeRequest{
	LocalDisk: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", smallest)

created, err := w.InstancePools.Create(ctx, compute.CreateInstancePool{
	InstancePoolName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	NodeTypeId:       smallest,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.InstancePools.Edit(ctx, compute.EditInstancePool{
	InstancePoolId:   created.InstancePoolId,
	InstancePoolName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	NodeTypeId:       smallest,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.InstancePools.DeleteByInstancePoolId(ctx, created.InstancePoolId)
if err != nil {
	panic(err)
}
Output:

func (*InstancePoolsAPI) Get

Get instance pool information.

Retrieve the information for an instance pool based on its identifier.

Example (InstancePools)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

smallest, err := w.Clusters.SelectNodeType(ctx, compute.NodeTypeRequest{
	LocalDisk: true,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", smallest)

created, err := w.InstancePools.Create(ctx, compute.CreateInstancePool{
	InstancePoolName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	NodeTypeId:       smallest,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := w.InstancePools.GetByInstancePoolId(ctx, created.InstancePoolId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.InstancePools.DeleteByInstancePoolId(ctx, created.InstancePoolId)
if err != nil {
	panic(err)
}
Output:

func (*InstancePoolsAPI) GetByInstancePoolId

func (a *InstancePoolsAPI) GetByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePool, error)

Get instance pool information.

Retrieve the information for an instance pool based on its identifier.

func (*InstancePoolsAPI) GetByInstancePoolName

func (a *InstancePoolsAPI) GetByInstancePoolName(ctx context.Context, name string) (*InstancePoolAndStats, error)

GetByInstancePoolName calls InstancePoolsAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap and returns a single InstancePoolAndStats.

Returns an error if there's more than one InstancePoolAndStats with the same .InstancePoolName.

Note: All InstancePoolAndStats instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*InstancePoolsAPI) GetPermissionLevels added in v0.19.0

Get instance pool permission levels.

Gets the permission levels that a user can have on an object.

func (*InstancePoolsAPI) GetPermissionLevelsByInstancePoolId added in v0.19.0

func (a *InstancePoolsAPI) GetPermissionLevelsByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePoolPermissionLevelsResponse, error)

Get instance pool permission levels.

Gets the permission levels that a user can have on an object.

func (*InstancePoolsAPI) GetPermissions added in v0.19.0

Get instance pool permissions.

Gets the permissions of an instance pool. Instance pools can inherit permissions from their root object.

func (*InstancePoolsAPI) GetPermissionsByInstancePoolId added in v0.19.0

func (a *InstancePoolsAPI) GetPermissionsByInstancePoolId(ctx context.Context, instancePoolId string) (*InstancePoolPermissions, error)

Get instance pool permissions.

Gets the permissions of an instance pool. Instance pools can inherit permissions from their root object.

func (*InstancePoolsAPI) Impl

Impl returns low-level InstancePools API implementation Deprecated: use MockInstancePoolsInterface instead.

func (*InstancePoolsAPI) InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap

func (a *InstancePoolsAPI) InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx context.Context) (map[string]string, error)

InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls InstancePoolsAPI.ListAll and creates a map of results with InstancePoolAndStats.InstancePoolName as key and InstancePoolAndStats.InstancePoolId as value.

Returns an error if there's more than one InstancePoolAndStats with the same .InstancePoolName.

Note: All InstancePoolAndStats instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*InstancePoolsAPI) List added in v0.24.0

List instance pool info.

Gets a list of instance pools with their statistics.

This method is generated by Databricks SDK Code Generator.

func (*InstancePoolsAPI) ListAll

List instance pool info.

Gets a list of instance pools with their statistics.

This method is generated by Databricks SDK Code Generator.

Example (InstancePools)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.InstancePools.ListAll(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*InstancePoolsAPI) SetPermissions added in v0.19.0

Set instance pool permissions.

Sets permissions on an instance pool. Instance pools can inherit permissions from their root object.

func (*InstancePoolsAPI) UpdatePermissions added in v0.19.0

Update instance pool permissions.

Updates the permissions on an instance pool. Instance pools can inherit permissions from their root object.

func (*InstancePoolsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockInstancePoolsInterface instead.

type InstancePoolsInterface added in v0.29.0

type InstancePoolsInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockInstancePoolsInterface instead.
	WithImpl(impl InstancePoolsService) InstancePoolsInterface

	// Impl returns low-level InstancePools API implementation
	// Deprecated: use MockInstancePoolsInterface instead.
	Impl() InstancePoolsService

	// Create a new instance pool.
	//
	// Creates a new instance pool using idle and ready-to-use cloud instances.
	Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error)

	// Delete an instance pool.
	//
	// Deletes the instance pool permanently. The idle instances in the pool are
	// terminated asynchronously.
	Delete(ctx context.Context, request DeleteInstancePool) error

	// Delete an instance pool.
	//
	// Deletes the instance pool permanently. The idle instances in the pool are
	// terminated asynchronously.
	DeleteByInstancePoolId(ctx context.Context, instancePoolId string) error

	// Edit an existing instance pool.
	//
	// Modifies the configuration of an existing instance pool.
	Edit(ctx context.Context, request EditInstancePool) error

	// Get instance pool information.
	//
	// Retrieve the information for an instance pool based on its identifier.
	Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error)

	// Get instance pool information.
	//
	// Retrieve the information for an instance pool based on its identifier.
	GetByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePool, error)

	// Get instance pool permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error)

	// Get instance pool permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevelsByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePoolPermissionLevelsResponse, error)

	// Get instance pool permissions.
	//
	// Gets the permissions of an instance pool. Instance pools can inherit
	// permissions from their root object.
	GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error)

	// Get instance pool permissions.
	//
	// Gets the permissions of an instance pool. Instance pools can inherit
	// permissions from their root object.
	GetPermissionsByInstancePoolId(ctx context.Context, instancePoolId string) (*InstancePoolPermissions, error)

	// List instance pool info.
	//
	// Gets a list of instance pools with their statistics.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context) listing.Iterator[InstancePoolAndStats]

	// List instance pool info.
	//
	// Gets a list of instance pools with their statistics.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context) ([]InstancePoolAndStats, error)

	// InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value.
	//
	// Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName.
	//
	// Note: All [InstancePoolAndStats] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx context.Context) (map[string]string, error)

	// GetByInstancePoolName calls [InstancePoolsAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap] and returns a single [InstancePoolAndStats].
	//
	// Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName.
	//
	// Note: All [InstancePoolAndStats] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByInstancePoolName(ctx context.Context, name string) (*InstancePoolAndStats, error)

	// Set instance pool permissions.
	//
	// Sets permissions on an instance pool. Instance pools can inherit permissions
	// from their root object.
	SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error)

	// Update instance pool permissions.
	//
	// Updates the permissions on an instance pool. Instance pools can inherit
	// permissions from their root object.
	UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error)
}

type InstancePoolsService

type InstancePoolsService interface {

	// Create a new instance pool.
	//
	// Creates a new instance pool using idle and ready-to-use cloud instances.
	Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error)

	// Delete an instance pool.
	//
	// Deletes the instance pool permanently. The idle instances in the pool are
	// terminated asynchronously.
	Delete(ctx context.Context, request DeleteInstancePool) error

	// Edit an existing instance pool.
	//
	// Modifies the configuration of an existing instance pool.
	Edit(ctx context.Context, request EditInstancePool) error

	// Get instance pool information.
	//
	// Retrieve the information for an instance pool based on its identifier.
	Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error)

	// Get instance pool permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error)

	// Get instance pool permissions.
	//
	// Gets the permissions of an instance pool. Instance pools can inherit
	// permissions from their root object.
	GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error)

	// List instance pool info.
	//
	// Gets a list of instance pools with their statistics.
	//
	// Use ListAll() to get all InstancePoolAndStats instances
	List(ctx context.Context) (*ListInstancePools, error)

	// Set instance pool permissions.
	//
	// Sets permissions on an instance pool. Instance pools can inherit
	// permissions from their root object.
	SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error)

	// Update instance pool permissions.
	//
	// Updates the permissions on an instance pool. Instance pools can inherit
	// permissions from their root object.
	UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error)
}

Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times.

Databricks pools reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. When a cluster is attached to a pool, cluster nodes are created using the pool’s idle instances. If the pool has no idle instances, the pool expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a cluster releases an instance, it returns to the pool and is free for another cluster to use. Only clusters attached to a pool can use that pool’s idle instances.

You can specify a different pool for the driver node and worker nodes, or use the same pool for both.

Databricks does not charge DBUs while instances are idle in the pool. Instance provider billing does apply. See pricing.

type InstanceProfile

type InstanceProfile struct {
	// The AWS IAM role ARN of the role associated with the instance profile.
	// This field is required if your role name and instance profile name do not
	// match and you want to use the instance profile with [Databricks SQL
	// Serverless].
	//
	// Otherwise, this field is optional.
	//
	// [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html
	IamRoleArn string `json:"iam_role_arn,omitempty"`
	// The AWS ARN of the instance profile to register with Databricks. This
	// field is required.
	InstanceProfileArn string `json:"instance_profile_arn"`
	// Boolean flag indicating whether the instance profile should only be used
	// in credential passthrough scenarios. If true, it means the instance
	// profile contains an meta IAM role which could assume a wide range of
	// roles. Therefore it should always be used with authorization. This field
	// is optional, the default value is `false`.
	IsMetaInstanceProfile bool `json:"is_meta_instance_profile,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (InstanceProfile) MarshalJSON added in v0.23.0

func (s InstanceProfile) MarshalJSON() ([]byte, error)

func (*InstanceProfile) UnmarshalJSON added in v0.23.0

func (s *InstanceProfile) UnmarshalJSON(b []byte) error

type InstanceProfilesAPI

type InstanceProfilesAPI struct {
	// contains filtered or unexported fields
}

The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with. Regular users can list the instance profiles available to them. See Secure access to S3 buckets using instance profiles for more information.

func NewInstanceProfiles

func NewInstanceProfiles(client *client.DatabricksClient) *InstanceProfilesAPI

func (*InstanceProfilesAPI) Add

Register an instance profile.

In the UI, you can select the instance profile when launching clusters. This API is only available to admin users.

Example (AwsInstanceProfiles)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

arn := "arn:aws:iam::000000000000:instance-profile/abc"

err = w.InstanceProfiles.Add(ctx, compute.AddInstanceProfile{
	InstanceProfileArn: arn,
	SkipValidation:     true,
	IamRoleArn:         "arn:aws:iam::000000000000:role/bcd",
})
if err != nil {
	panic(err)
}
Output:

func (*InstanceProfilesAPI) Edit

Edit an instance profile.

The only supported field to change is the optional IAM role ARN associated with the instance profile. It is required to specify the IAM role ARN if both of the following are true:

* Your role name and instance profile name do not match. The name is the part after the last slash in each ARN. * You want to use the instance profile with Databricks SQL Serverless.

To understand where these fields are in the AWS console, see Enable serverless SQL warehouses.

This API is only available to admin users.

Example (AwsInstanceProfiles)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

arn := "arn:aws:iam::000000000000:instance-profile/abc"

err = w.InstanceProfiles.Edit(ctx, compute.InstanceProfile{
	InstanceProfileArn: arn,
	IamRoleArn:         "arn:aws:iam::000000000000:role/bcdf",
})
if err != nil {
	panic(err)
}
Output:

func (*InstanceProfilesAPI) Impl

Impl returns low-level InstanceProfiles API implementation Deprecated: use MockInstanceProfilesInterface instead.

func (*InstanceProfilesAPI) List added in v0.24.0

List available instance profiles.

List the instance profiles that the calling user can use to launch a cluster.

This API is available to all users.

This method is generated by Databricks SDK Code Generator.

func (*InstanceProfilesAPI) ListAll

List available instance profiles.

List the instance profiles that the calling user can use to launch a cluster.

This API is available to all users.

This method is generated by Databricks SDK Code Generator.

Example (AwsInstanceProfiles)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.InstanceProfiles.ListAll(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*InstanceProfilesAPI) Remove

Remove the instance profile.

Remove the instance profile with the provided ARN. Existing clusters with this instance profile will continue to function.

This API is only accessible to admin users.

func (*InstanceProfilesAPI) RemoveByInstanceProfileArn

func (a *InstanceProfilesAPI) RemoveByInstanceProfileArn(ctx context.Context, instanceProfileArn string) error

Remove the instance profile.

Remove the instance profile with the provided ARN. Existing clusters with this instance profile will continue to function.

This API is only accessible to admin users.

func (*InstanceProfilesAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockInstanceProfilesInterface instead.

type InstanceProfilesInterface added in v0.29.0

type InstanceProfilesInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockInstanceProfilesInterface instead.
	WithImpl(impl InstanceProfilesService) InstanceProfilesInterface

	// Impl returns low-level InstanceProfiles API implementation
	// Deprecated: use MockInstanceProfilesInterface instead.
	Impl() InstanceProfilesService

	// Register an instance profile.
	//
	// In the UI, you can select the instance profile when launching clusters. This
	// API is only available to admin users.
	Add(ctx context.Context, request AddInstanceProfile) error

	// Edit an instance profile.
	//
	// The only supported field to change is the optional IAM role ARN associated
	// with the instance profile. It is required to specify the IAM role ARN if both
	// of the following are true:
	//
	// * Your role name and instance profile name do not match. The name is the part
	// after the last slash in each ARN. * You want to use the instance profile with
	// [Databricks SQL Serverless].
	//
	// To understand where these fields are in the AWS console, see [Enable
	// serverless SQL warehouses].
	//
	// This API is only available to admin users.
	//
	// [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html
	// [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html
	Edit(ctx context.Context, request InstanceProfile) error

	// List available instance profiles.
	//
	// List the instance profiles that the calling user can use to launch a cluster.
	//
	// This API is available to all users.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context) listing.Iterator[InstanceProfile]

	// List available instance profiles.
	//
	// List the instance profiles that the calling user can use to launch a cluster.
	//
	// This API is available to all users.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context) ([]InstanceProfile, error)

	// Remove the instance profile.
	//
	// Remove the instance profile with the provided ARN. Existing clusters with
	// this instance profile will continue to function.
	//
	// This API is only accessible to admin users.
	Remove(ctx context.Context, request RemoveInstanceProfile) error

	// Remove the instance profile.
	//
	// Remove the instance profile with the provided ARN. Existing clusters with
	// this instance profile will continue to function.
	//
	// This API is only accessible to admin users.
	RemoveByInstanceProfileArn(ctx context.Context, instanceProfileArn string) error
}

type InstanceProfilesService

type InstanceProfilesService interface {

	// Register an instance profile.
	//
	// In the UI, you can select the instance profile when launching clusters.
	// This API is only available to admin users.
	Add(ctx context.Context, request AddInstanceProfile) error

	// Edit an instance profile.
	//
	// The only supported field to change is the optional IAM role ARN
	// associated with the instance profile. It is required to specify the IAM
	// role ARN if both of the following are true:
	//
	// * Your role name and instance profile name do not match. The name is the
	// part after the last slash in each ARN. * You want to use the instance
	// profile with [Databricks SQL Serverless].
	//
	// To understand where these fields are in the AWS console, see [Enable
	// serverless SQL warehouses].
	//
	// This API is only available to admin users.
	//
	// [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html
	// [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html
	Edit(ctx context.Context, request InstanceProfile) error

	// List available instance profiles.
	//
	// List the instance profiles that the calling user can use to launch a
	// cluster.
	//
	// This API is available to all users.
	//
	// Use ListAll() to get all InstanceProfile instances
	List(ctx context.Context) (*ListInstanceProfilesResponse, error)

	// Remove the instance profile.
	//
	// Remove the instance profile with the provided ARN. Existing clusters with
	// this instance profile will continue to function.
	//
	// This API is only accessible to admin users.
	Remove(ctx context.Context, request RemoveInstanceProfile) error
}

The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with. Regular users can list the instance profiles available to them. See Secure access to S3 buckets using instance profiles for more information.

type Language

type Language string
const LanguagePython Language = `python`
const LanguageScala Language = `scala`
const LanguageSql Language = `sql`

func (*Language) Set

func (f *Language) Set(v string) error

Set raw string value and validate it against allowed values

func (*Language) String

func (f *Language) String() string

String representation for fmt.Print

func (*Language) Type

func (f *Language) Type() string

Type always returns Language to satisfy [pflag.Value] interface

type LibrariesAPI

type LibrariesAPI struct {
	// contains filtered or unexported fields
}

The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster.

To make third-party or custom code available to notebooks and jobs running on your clusters, you can install a library. Libraries can be written in Python, Java, Scala, and R. You can upload Python, Java, Scala and R libraries and point to external packages in PyPI, Maven, and CRAN repositories.

Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library directly from a public repository such as PyPI or Maven, using a previously installed workspace library, or using an init script.

When you uninstall a library from a cluster, the library is removed only when you restart the cluster. Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart.

func NewLibraries

func NewLibraries(client *client.DatabricksClient) *LibrariesAPI

func (*LibrariesAPI) AllClusterStatuses

func (a *LibrariesAPI) AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses]

Get all statuses.

Get the status of all libraries on all clusters. A status is returned for all libraries installed on this cluster via the API or the libraries UI.

This method is generated by Databricks SDK Code Generator.

func (*LibrariesAPI) AllClusterStatusesAll added in v0.41.0

func (a *LibrariesAPI) AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error)

Get all statuses.

Get the status of all libraries on all clusters. A status is returned for all libraries installed on this cluster via the API or the libraries UI.

This method is generated by Databricks SDK Code Generator.

func (*LibrariesAPI) ClusterStatus

Get status.

Get the status of libraries on a cluster. A status is returned for all libraries installed on this cluster via the API or the libraries UI. The order of returned libraries is as follows: 1. Libraries set to be installed on this cluster, in the order that the libraries were added to the cluster, are returned first. 2. Libraries that were previously requested to be installed on this cluster or, but are now marked for removal, in no particular order, are returned last.

This method is generated by Databricks SDK Code Generator.

func (*LibrariesAPI) ClusterStatusAll added in v0.18.0

func (a *LibrariesAPI) ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error)

Get status.

Get the status of libraries on a cluster. A status is returned for all libraries installed on this cluster via the API or the libraries UI. The order of returned libraries is as follows: 1. Libraries set to be installed on this cluster, in the order that the libraries were added to the cluster, are returned first. 2. Libraries that were previously requested to be installed on this cluster or, but are now marked for removal, in no particular order, are returned last.

This method is generated by Databricks SDK Code Generator.

func (*LibrariesAPI) ClusterStatusByClusterId

func (a *LibrariesAPI) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error)

Get status.

Get the status of libraries on a cluster. A status is returned for all libraries installed on this cluster via the API or the libraries UI. The order of returned libraries is as follows: 1. Libraries set to be installed on this cluster, in the order that the libraries were added to the cluster, are returned first. 2. Libraries that were previously requested to be installed on this cluster or, but are now marked for removal, in no particular order, are returned last.

func (*LibrariesAPI) Impl

func (a *LibrariesAPI) Impl() LibrariesService

Impl returns low-level Libraries API implementation Deprecated: use MockLibrariesInterface instead.

func (*LibrariesAPI) Install

func (a *LibrariesAPI) Install(ctx context.Context, request InstallLibraries) error

Add a library.

Add libraries to install on a cluster. The installation is asynchronous; it happens in the background after the completion of this request.

func (*LibrariesAPI) Uninstall

func (a *LibrariesAPI) Uninstall(ctx context.Context, request UninstallLibraries) error

Uninstall libraries.

Set libraries to uninstall from a cluster. The libraries won't be uninstalled until the cluster is restarted. A request to uninstall a library that is not currently installed is ignored.

func (*LibrariesAPI) UpdateAndWait

func (a *LibrariesAPI) UpdateAndWait(ctx context.Context, update Update,
	options ...retries.Option[ClusterLibraryStatuses]) error

func (*LibrariesAPI) Wait

clusterID string, timeout time.Duration, isActive bool, refresh bool

func (*LibrariesAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockLibrariesInterface instead.

type LibrariesInterface added in v0.29.0

type LibrariesInterface interface {

	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockLibrariesInterface instead.
	WithImpl(impl LibrariesService) LibrariesInterface

	// Impl returns low-level Libraries API implementation
	// Deprecated: use MockLibrariesInterface instead.
	Impl() LibrariesService

	// Get all statuses.
	//
	// Get the status of all libraries on all clusters. A status is returned for all
	// libraries installed on this cluster via the API or the libraries UI.
	//
	// This method is generated by Databricks SDK Code Generator.
	AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses]

	// Get all statuses.
	//
	// Get the status of all libraries on all clusters. A status is returned for all
	// libraries installed on this cluster via the API or the libraries UI.
	//
	// This method is generated by Databricks SDK Code Generator.
	AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error)

	// Get status.
	//
	// Get the status of libraries on a cluster. A status is returned for all
	// libraries installed on this cluster via the API or the libraries UI. The
	// order of returned libraries is as follows: 1. Libraries set to be installed
	// on this cluster, in the order that the libraries were added to the cluster,
	// are returned first. 2. Libraries that were previously requested to be
	// installed on this cluster or, but are now marked for removal, in no
	// particular order, are returned last.
	//
	// This method is generated by Databricks SDK Code Generator.
	ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus]

	// Get status.
	//
	// Get the status of libraries on a cluster. A status is returned for all
	// libraries installed on this cluster via the API or the libraries UI. The
	// order of returned libraries is as follows: 1. Libraries set to be installed
	// on this cluster, in the order that the libraries were added to the cluster,
	// are returned first. 2. Libraries that were previously requested to be
	// installed on this cluster or, but are now marked for removal, in no
	// particular order, are returned last.
	//
	// This method is generated by Databricks SDK Code Generator.
	ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error)

	// Get status.
	//
	// Get the status of libraries on a cluster. A status is returned for all
	// libraries installed on this cluster via the API or the libraries UI. The
	// order of returned libraries is as follows: 1. Libraries set to be installed
	// on this cluster, in the order that the libraries were added to the cluster,
	// are returned first. 2. Libraries that were previously requested to be
	// installed on this cluster or, but are now marked for removal, in no
	// particular order, are returned last.
	ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error)

	// Add a library.
	//
	// Add libraries to install on a cluster. The installation is asynchronous; it
	// happens in the background after the completion of this request.
	Install(ctx context.Context, request InstallLibraries) error

	// Uninstall libraries.
	//
	// Set libraries to uninstall from a cluster. The libraries won't be uninstalled
	// until the cluster is restarted. A request to uninstall a library that is not
	// currently installed is ignored.
	Uninstall(ctx context.Context, request UninstallLibraries) error
	// contains filtered or unexported methods
}

type LibrariesService

type LibrariesService interface {

	// Get all statuses.
	//
	// Get the status of all libraries on all clusters. A status is returned for
	// all libraries installed on this cluster via the API or the libraries UI.
	//
	// Use AllClusterStatusesAll() to get all ClusterLibraryStatuses instances
	AllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error)

	// Get status.
	//
	// Get the status of libraries on a cluster. A status is returned for all
	// libraries installed on this cluster via the API or the libraries UI. The
	// order of returned libraries is as follows: 1. Libraries set to be
	// installed on this cluster, in the order that the libraries were added to
	// the cluster, are returned first. 2. Libraries that were previously
	// requested to be installed on this cluster or, but are now marked for
	// removal, in no particular order, are returned last.
	//
	// Use ClusterStatusAll() to get all LibraryFullStatus instances
	ClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterLibraryStatuses, error)

	// Add a library.
	//
	// Add libraries to install on a cluster. The installation is asynchronous;
	// it happens in the background after the completion of this request.
	Install(ctx context.Context, request InstallLibraries) error

	// Uninstall libraries.
	//
	// Set libraries to uninstall from a cluster. The libraries won't be
	// uninstalled until the cluster is restarted. A request to uninstall a
	// library that is not currently installed is ignored.
	Uninstall(ctx context.Context, request UninstallLibraries) error
}

The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster.

To make third-party or custom code available to notebooks and jobs running on your clusters, you can install a library. Libraries can be written in Python, Java, Scala, and R. You can upload Python, Java, Scala and R libraries and point to external packages in PyPI, Maven, and CRAN repositories.

Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library directly from a public repository such as PyPI or Maven, using a previously installed workspace library, or using an init script.

When you uninstall a library from a cluster, the library is removed only when you restart the cluster. Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart.

type Library

type Library struct {
	// Specification of a CRAN library to be installed as part of the library
	Cran *RCranLibrary `json:"cran,omitempty"`
	// URI of the egg library to install. Supported URIs include Workspace
	// paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "egg":
	// "/Workspace/path/to/library.egg" }`, `{ "egg" :
	// "/Volumes/path/to/library.egg" }` or `{ "egg":
	// "s3://my-bucket/library.egg" }`. If S3 is used, please make sure the
	// cluster has read access on the library. You may need to launch the
	// cluster with an IAM role to access the S3 URI.
	Egg string `json:"egg,omitempty"`
	// URI of the JAR library to install. Supported URIs include Workspace
	// paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "jar":
	// "/Workspace/path/to/library.jar" }`, `{ "jar" :
	// "/Volumes/path/to/library.jar" }` or `{ "jar":
	// "s3://my-bucket/library.jar" }`. If S3 is used, please make sure the
	// cluster has read access on the library. You may need to launch the
	// cluster with an IAM role to access the S3 URI.
	Jar string `json:"jar,omitempty"`
	// Specification of a maven library to be installed. For example: `{
	// "coordinates": "org.jsoup:jsoup:1.7.2" }`
	Maven *MavenLibrary `json:"maven,omitempty"`
	// Specification of a PyPi library to be installed. For example: `{
	// "package": "simplejson" }`
	Pypi *PythonPyPiLibrary `json:"pypi,omitempty"`
	// URI of the requirements.txt file to install. Only Workspace paths and
	// Unity Catalog Volumes paths are supported. For example: `{
	// "requirements": "/Workspace/path/to/requirements.txt" }` or `{
	// "requirements" : "/Volumes/path/to/requirements.txt" }`
	Requirements string `json:"requirements,omitempty"`
	// URI of the wheel library to install. Supported URIs include Workspace
	// paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "whl":
	// "/Workspace/path/to/library.whl" }`, `{ "whl" :
	// "/Volumes/path/to/library.whl" }` or `{ "whl":
	// "s3://my-bucket/library.whl" }`. If S3 is used, please make sure the
	// cluster has read access on the library. You may need to launch the
	// cluster with an IAM role to access the S3 URI.
	Whl string `json:"whl,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Library) MarshalJSON added in v0.23.0

func (s Library) MarshalJSON() ([]byte, error)

func (Library) String

func (library Library) String() string

func (*Library) UnmarshalJSON added in v0.23.0

func (s *Library) UnmarshalJSON(b []byte) error

type LibraryFullStatus

type LibraryFullStatus struct {
	// Whether the library was set to be installed on all clusters via the
	// libraries UI.
	IsLibraryForAllClusters bool `json:"is_library_for_all_clusters,omitempty"`
	// Unique identifier for the library.
	Library *Library `json:"library,omitempty"`
	// All the info and warning messages that have occurred so far for this
	// library.
	Messages []string `json:"messages,omitempty"`
	// Status of installing the library on the cluster.
	Status LibraryInstallStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

The status of the library on a specific cluster.

func (LibraryFullStatus) MarshalJSON added in v0.23.0

func (s LibraryFullStatus) MarshalJSON() ([]byte, error)

func (*LibraryFullStatus) UnmarshalJSON added in v0.23.0

func (s *LibraryFullStatus) UnmarshalJSON(b []byte) error

type LibraryInstallStatus added in v0.39.0

type LibraryInstallStatus string

The status of a library on a specific cluster.

const LibraryInstallStatusFailed LibraryInstallStatus = `FAILED`
const LibraryInstallStatusInstalled LibraryInstallStatus = `INSTALLED`
const LibraryInstallStatusInstalling LibraryInstallStatus = `INSTALLING`
const LibraryInstallStatusPending LibraryInstallStatus = `PENDING`
const LibraryInstallStatusResolving LibraryInstallStatus = `RESOLVING`
const LibraryInstallStatusRestored LibraryInstallStatus = `RESTORED`
const LibraryInstallStatusSkipped LibraryInstallStatus = `SKIPPED`
const LibraryInstallStatusUninstallOnRestart LibraryInstallStatus = `UNINSTALL_ON_RESTART`

func (*LibraryInstallStatus) Set added in v0.39.0

func (f *LibraryInstallStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*LibraryInstallStatus) String added in v0.39.0

func (f *LibraryInstallStatus) String() string

String representation for fmt.Print

func (*LibraryInstallStatus) Type added in v0.39.0

func (f *LibraryInstallStatus) Type() string

Type always returns LibraryInstallStatus to satisfy [pflag.Value] interface

type ListAllClusterLibraryStatusesResponse

type ListAllClusterLibraryStatusesResponse struct {
	// A list of cluster statuses.
	Statuses []ClusterLibraryStatuses `json:"statuses,omitempty"`
}

type ListAvailableZonesResponse

type ListAvailableZonesResponse struct {
	// The availability zone if no `zone_id` is provided in the cluster creation
	// request.
	DefaultZone string `json:"default_zone,omitempty"`
	// The list of available zones (e.g., ['us-west-2c', 'us-east-2']).
	Zones []string `json:"zones,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListAvailableZonesResponse) MarshalJSON added in v0.23.0

func (s ListAvailableZonesResponse) MarshalJSON() ([]byte, error)

func (*ListAvailableZonesResponse) UnmarshalJSON added in v0.23.0

func (s *ListAvailableZonesResponse) UnmarshalJSON(b []byte) error

type ListClusterPoliciesRequest

type ListClusterPoliciesRequest struct {
	// The cluster policy attribute to sort by. * `POLICY_CREATION_TIME` - Sort
	// result list by policy creation time. * `POLICY_NAME` - Sort result list
	// by policy name.
	SortColumn ListSortColumn `json:"-" url:"sort_column,omitempty"`
	// The order in which the policies get listed. * `DESC` - Sort result list
	// in descending order. * `ASC` - Sort result list in ascending order.
	SortOrder ListSortOrder `json:"-" url:"sort_order,omitempty"`
}

List cluster policies

type ListClustersRequest

type ListClustersRequest struct {
	// Filter clusters based on what type of client it can be used for. Could be
	// either NOTEBOOKS or JOBS. No input for this field will get all clusters
	// in the workspace without filtering on its supported client
	CanUseClient string `json:"-" url:"can_use_client,omitempty"`

	ForceSendFields []string `json:"-"`
}

List all clusters

func (ListClustersRequest) MarshalJSON added in v0.23.0

func (s ListClustersRequest) MarshalJSON() ([]byte, error)

func (*ListClustersRequest) UnmarshalJSON added in v0.23.0

func (s *ListClustersRequest) UnmarshalJSON(b []byte) error

type ListClustersResponse

type ListClustersResponse struct {
	// <needs content added>
	Clusters []ClusterDetails `json:"clusters,omitempty"`
}

type ListGlobalInitScriptsResponse

type ListGlobalInitScriptsResponse struct {
	Scripts []GlobalInitScriptDetails `json:"scripts,omitempty"`
}

type ListInstancePools

type ListInstancePools struct {
	InstancePools []InstancePoolAndStats `json:"instance_pools,omitempty"`
}

type ListInstanceProfilesResponse

type ListInstanceProfilesResponse struct {
	// A list of instance profiles that the user can access.
	InstanceProfiles []InstanceProfile `json:"instance_profiles,omitempty"`
}

type ListNodeTypesResponse

type ListNodeTypesResponse struct {
	// The list of available Spark node types.
	NodeTypes []NodeType `json:"node_types,omitempty"`
}

func (*ListNodeTypesResponse) Smallest

func (ntl *ListNodeTypesResponse) Smallest(r NodeTypeRequest) (string, error)

type ListPoliciesResponse

type ListPoliciesResponse struct {
	// List of policies.
	Policies []Policy `json:"policies,omitempty"`
}

type ListPolicyFamiliesRequest

type ListPolicyFamiliesRequest struct {
	// The max number of policy families to return.
	MaxResults int64 `json:"-" url:"max_results,omitempty"`
	// A token that can be used to get the next page of results.
	PageToken string `json:"-" url:"page_token,omitempty"`

	ForceSendFields []string `json:"-"`
}

List policy families

func (ListPolicyFamiliesRequest) MarshalJSON added in v0.23.0

func (s ListPolicyFamiliesRequest) MarshalJSON() ([]byte, error)

func (*ListPolicyFamiliesRequest) UnmarshalJSON added in v0.23.0

func (s *ListPolicyFamiliesRequest) UnmarshalJSON(b []byte) error

type ListPolicyFamiliesResponse

type ListPolicyFamiliesResponse struct {
	// A token that can be used to get the next page of results. If not present,
	// there are no more results to show.
	NextPageToken string `json:"next_page_token,omitempty"`
	// List of policy families.
	PolicyFamilies []PolicyFamily `json:"policy_families"`

	ForceSendFields []string `json:"-"`
}

func (ListPolicyFamiliesResponse) MarshalJSON added in v0.23.0

func (s ListPolicyFamiliesResponse) MarshalJSON() ([]byte, error)

func (*ListPolicyFamiliesResponse) UnmarshalJSON added in v0.23.0

func (s *ListPolicyFamiliesResponse) UnmarshalJSON(b []byte) error

type ListSortColumn

type ListSortColumn string
const ListSortColumnPolicyCreationTime ListSortColumn = `POLICY_CREATION_TIME`
const ListSortColumnPolicyName ListSortColumn = `POLICY_NAME`

func (*ListSortColumn) Set

func (f *ListSortColumn) Set(v string) error

Set raw string value and validate it against allowed values

func (*ListSortColumn) String

func (f *ListSortColumn) String() string

String representation for fmt.Print

func (*ListSortColumn) Type

func (f *ListSortColumn) Type() string

Type always returns ListSortColumn to satisfy [pflag.Value] interface

type ListSortOrder

type ListSortOrder string
const ListSortOrderAsc ListSortOrder = `ASC`
const ListSortOrderDesc ListSortOrder = `DESC`

func (*ListSortOrder) Set

func (f *ListSortOrder) Set(v string) error

Set raw string value and validate it against allowed values

func (*ListSortOrder) String

func (f *ListSortOrder) String() string

String representation for fmt.Print

func (*ListSortOrder) Type

func (f *ListSortOrder) Type() string

Type always returns ListSortOrder to satisfy [pflag.Value] interface

type LocalFileInfo added in v0.25.0

type LocalFileInfo struct {
	// local file destination, e.g. `file:/my/local/file.sh`
	Destination string `json:"destination"`
}

type LogAnalyticsInfo

type LogAnalyticsInfo struct {
	// <needs content added>
	LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"`
	// <needs content added>
	LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (LogAnalyticsInfo) MarshalJSON added in v0.23.0

func (s LogAnalyticsInfo) MarshalJSON() ([]byte, error)

func (*LogAnalyticsInfo) UnmarshalJSON added in v0.23.0

func (s *LogAnalyticsInfo) UnmarshalJSON(b []byte) error

type LogSyncStatus

type LogSyncStatus struct {
	// The timestamp of last attempt. If the last attempt fails,
	// `last_exception` will contain the exception in the last attempt.
	LastAttempted int64 `json:"last_attempted,omitempty"`
	// The exception thrown in the last attempt, it would be null (omitted in
	// the response) if there is no exception in last attempted.
	LastException string `json:"last_exception,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (LogSyncStatus) MarshalJSON added in v0.23.0

func (s LogSyncStatus) MarshalJSON() ([]byte, error)

func (*LogSyncStatus) UnmarshalJSON added in v0.23.0

func (s *LogSyncStatus) UnmarshalJSON(b []byte) error

type MavenLibrary

type MavenLibrary struct {
	// Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2".
	Coordinates string `json:"coordinates"`
	// List of dependences to exclude. For example: `["slf4j:slf4j",
	// "*:hadoop-client"]`.
	//
	// Maven dependency exclusions:
	// https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.
	Exclusions []string `json:"exclusions,omitempty"`
	// Maven repo to install the Maven package from. If omitted, both Maven
	// Central Repository and Spark Packages are searched.
	Repo string `json:"repo,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (MavenLibrary) MarshalJSON added in v0.23.0

func (s MavenLibrary) MarshalJSON() ([]byte, error)

func (*MavenLibrary) UnmarshalJSON added in v0.23.0

func (s *MavenLibrary) UnmarshalJSON(b []byte) error

type NodeInstanceType

type NodeInstanceType struct {
	InstanceTypeId string `json:"instance_type_id,omitempty"`

	LocalDiskSizeGb int `json:"local_disk_size_gb,omitempty"`

	LocalDisks int `json:"local_disks,omitempty"`

	LocalNvmeDiskSizeGb int `json:"local_nvme_disk_size_gb,omitempty"`

	LocalNvmeDisks int `json:"local_nvme_disks,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (NodeInstanceType) MarshalJSON added in v0.23.0

func (s NodeInstanceType) MarshalJSON() ([]byte, error)

func (*NodeInstanceType) UnmarshalJSON added in v0.23.0

func (s *NodeInstanceType) UnmarshalJSON(b []byte) error

type NodeType

type NodeType struct {
	Category string `json:"category,omitempty"`
	// A string description associated with this node type, e.g., "r3.xlarge".
	Description string `json:"description"`

	DisplayOrder int `json:"display_order,omitempty"`
	// An identifier for the type of hardware that this node runs on, e.g.,
	// "r3.2xlarge" in AWS.
	InstanceTypeId string `json:"instance_type_id"`
	// Whether the node type is deprecated. Non-deprecated node types offer
	// greater performance.
	IsDeprecated bool `json:"is_deprecated,omitempty"`
	// AWS specific, whether this instance supports encryption in transit, used
	// for hipaa and pci workloads.
	IsEncryptedInTransit bool `json:"is_encrypted_in_transit,omitempty"`

	IsGraviton bool `json:"is_graviton,omitempty"`

	IsHidden bool `json:"is_hidden,omitempty"`

	IsIoCacheEnabled bool `json:"is_io_cache_enabled,omitempty"`
	// Memory (in MB) available for this node type.
	MemoryMb int `json:"memory_mb"`

	NodeInfo *CloudProviderNodeInfo `json:"node_info,omitempty"`

	NodeInstanceType *NodeInstanceType `json:"node_instance_type,omitempty"`
	// Unique identifier for this node type.
	NodeTypeId string `json:"node_type_id"`
	// Number of CPU cores available for this node type. Note that this can be
	// fractional, e.g., 2.5 cores, if the the number of cores on a machine
	// instance is not divisible by the number of Spark nodes on that machine.
	NumCores float64 `json:"num_cores"`

	NumGpus int `json:"num_gpus,omitempty"`

	PhotonDriverCapable bool `json:"photon_driver_capable,omitempty"`

	PhotonWorkerCapable bool `json:"photon_worker_capable,omitempty"`

	SupportClusterTags bool `json:"support_cluster_tags,omitempty"`

	SupportEbsVolumes bool `json:"support_ebs_volumes,omitempty"`

	SupportPortForwarding bool `json:"support_port_forwarding,omitempty"`
	// Indicates if this node type can be used for an instance pool or cluster
	// with elastic disk enabled. This is true for most node types.
	SupportsElasticDisk bool `json:"supports_elastic_disk,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (NodeType) MarshalJSON added in v0.23.0

func (s NodeType) MarshalJSON() ([]byte, error)

func (*NodeType) UnmarshalJSON added in v0.23.0

func (s *NodeType) UnmarshalJSON(b []byte) error

type NodeTypeRequest

type NodeTypeRequest struct {
	Id                    string `json:"id,omitempty"`
	MinMemoryGB           int32  `json:"min_memory_gb,omitempty"`
	GBPerCore             int32  `json:"gb_per_core,omitempty"`
	MinCores              int32  `json:"min_cores,omitempty"`
	MinGPUs               int32  `json:"min_gpus,omitempty"`
	LocalDisk             bool   `json:"local_disk,omitempty"`
	LocalDiskMinSize      int32  `json:"local_disk_min_size,omitempty"`
	Category              string `json:"category,omitempty"`
	PhotonWorkerCapable   bool   `json:"photon_worker_capable,omitempty"`
	PhotonDriverCapable   bool   `json:"photon_driver_capable,omitempty"`
	Graviton              bool   `json:"graviton,omitempty"`
	IsIOCacheEnabled      bool   `json:"is_io_cache_enabled,omitempty"`
	SupportPortForwarding bool   `json:"support_port_forwarding,omitempty"`
	Fleet                 bool   `json:"fleet,omitempty"`
}

NodeTypeRequest is a wrapper for local filtering of node types

type PendingInstanceError

type PendingInstanceError struct {
	InstanceId string `json:"instance_id,omitempty"`

	Message string `json:"message,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (PendingInstanceError) MarshalJSON added in v0.23.0

func (s PendingInstanceError) MarshalJSON() ([]byte, error)

func (*PendingInstanceError) UnmarshalJSON added in v0.23.0

func (s *PendingInstanceError) UnmarshalJSON(b []byte) error

type PermanentDeleteCluster

type PermanentDeleteCluster struct {
	// The cluster to be deleted.
	ClusterId string `json:"cluster_id"`
}

type PermanentDeleteClusterResponse added in v0.34.0

type PermanentDeleteClusterResponse struct {
}

type PinCluster

type PinCluster struct {
	// <needs content added>
	ClusterId string `json:"cluster_id"`
}

type PinClusterResponse added in v0.34.0

type PinClusterResponse struct {
}

type Policy

type Policy struct {
	// Creation time. The timestamp (in millisecond) when this Cluster Policy
	// was created.
	CreatedAtTimestamp int64 `json:"created_at_timestamp,omitempty"`
	// Creator user name. The field won't be included in the response if the
	// user has already been deleted.
	CreatorUserName string `json:"creator_user_name,omitempty"`
	// Policy definition document expressed in [Databricks Cluster Policy
	// Definition Language].
	//
	// [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html
	Definition string `json:"definition,omitempty"`
	// Additional human-readable description of the cluster policy.
	Description string `json:"description,omitempty"`
	// If true, policy is a default policy created and managed by <Databricks>.
	// Default policies cannot be deleted, and their policy families cannot be
	// changed.
	IsDefault bool `json:"is_default,omitempty"`
	// A list of libraries to be installed on the next cluster restart that uses
	// this policy. The maximum number of libraries is 500.
	Libraries []Library `json:"libraries,omitempty"`
	// Max number of clusters per user that can be active using this policy. If
	// not present, there is no max limit.
	MaxClustersPerUser int64 `json:"max_clusters_per_user,omitempty"`
	// Cluster Policy name requested by the user. This has to be unique. Length
	// must be between 1 and 100 characters.
	Name string `json:"name,omitempty"`
	// Policy definition JSON document expressed in [Databricks Policy
	// Definition Language]. The JSON document must be passed as a string and
	// cannot be embedded in the requests.
	//
	// You can use this to customize the policy definition inherited from the
	// policy family. Policy rules specified here are merged into the inherited
	// policy definition.
	//
	// [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html
	PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
	// ID of the policy family.
	PolicyFamilyId string `json:"policy_family_id,omitempty"`
	// Canonical unique identifier for the Cluster Policy.
	PolicyId string `json:"policy_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Policy) MarshalJSON added in v0.23.0

func (s Policy) MarshalJSON() ([]byte, error)

func (*Policy) UnmarshalJSON added in v0.23.0

func (s *Policy) UnmarshalJSON(b []byte) error

type PolicyFamiliesAPI

type PolicyFamiliesAPI struct {
	// contains filtered or unexported fields
}

View available policy families. A policy family contains a policy definition providing best practices for configuring clusters for a particular use case.

Databricks manages and provides policy families for several common cluster use cases. You cannot create, edit, or delete policy families.

Policy families cannot be used directly to create clusters. Instead, you create cluster policies using a policy family. Cluster policies created using a policy family inherit the policy family's policy definition.

func NewPolicyFamilies

func NewPolicyFamilies(client *client.DatabricksClient) *PolicyFamiliesAPI

func (*PolicyFamiliesAPI) Get

Get policy family information.

Retrieve the information for an policy family based on its identifier.

Example (ClusterPolicyFamilies)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.PolicyFamilies.ListAll(ctx, compute.ListPolicyFamiliesRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)

firstFamily, err := w.PolicyFamilies.Get(ctx, compute.GetPolicyFamilyRequest{
	PolicyFamilyId: all[0].PolicyFamilyId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", firstFamily)
Output:

func (*PolicyFamiliesAPI) GetByPolicyFamilyId

func (a *PolicyFamiliesAPI) GetByPolicyFamilyId(ctx context.Context, policyFamilyId string) (*PolicyFamily, error)

Get policy family information.

Retrieve the information for an policy family based on its identifier.

func (*PolicyFamiliesAPI) Impl

Impl returns low-level PolicyFamilies API implementation Deprecated: use MockPolicyFamiliesInterface instead.

func (*PolicyFamiliesAPI) List added in v0.24.0

List policy families.

Retrieve a list of policy families. This API is paginated.

This method is generated by Databricks SDK Code Generator.

func (*PolicyFamiliesAPI) ListAll

List policy families.

Retrieve a list of policy families. This API is paginated.

This method is generated by Databricks SDK Code Generator.

Example (ClusterPolicyFamilies)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.PolicyFamilies.ListAll(ctx, compute.ListPolicyFamiliesRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*PolicyFamiliesAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockPolicyFamiliesInterface instead.

type PolicyFamiliesInterface added in v0.29.0

type PolicyFamiliesInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockPolicyFamiliesInterface instead.
	WithImpl(impl PolicyFamiliesService) PolicyFamiliesInterface

	// Impl returns low-level PolicyFamilies API implementation
	// Deprecated: use MockPolicyFamiliesInterface instead.
	Impl() PolicyFamiliesService

	// Get policy family information.
	//
	// Retrieve the information for an policy family based on its identifier.
	Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error)

	// Get policy family information.
	//
	// Retrieve the information for an policy family based on its identifier.
	GetByPolicyFamilyId(ctx context.Context, policyFamilyId string) (*PolicyFamily, error)

	// List policy families.
	//
	// Retrieve a list of policy families. This API is paginated.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListPolicyFamiliesRequest) listing.Iterator[PolicyFamily]

	// List policy families.
	//
	// Retrieve a list of policy families. This API is paginated.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error)
}

type PolicyFamiliesService

type PolicyFamiliesService interface {

	// Get policy family information.
	//
	// Retrieve the information for an policy family based on its identifier.
	Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error)

	// List policy families.
	//
	// Retrieve a list of policy families. This API is paginated.
	//
	// Use ListAll() to get all PolicyFamily instances, which will iterate over every result page.
	List(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error)
}

View available policy families. A policy family contains a policy definition providing best practices for configuring clusters for a particular use case.

Databricks manages and provides policy families for several common cluster use cases. You cannot create, edit, or delete policy families.

Policy families cannot be used directly to create clusters. Instead, you create cluster policies using a policy family. Cluster policies created using a policy family inherit the policy family's policy definition.

type PolicyFamily

type PolicyFamily struct {
	// Policy definition document expressed in [Databricks Cluster Policy
	// Definition Language].
	//
	// [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html
	Definition string `json:"definition"`
	// Human-readable description of the purpose of the policy family.
	Description string `json:"description"`
	// Name of the policy family.
	Name string `json:"name"`
	// ID of the policy family.
	PolicyFamilyId string `json:"policy_family_id"`
}

type PythonPyPiLibrary

type PythonPyPiLibrary struct {
	// The name of the pypi package to install. An optional exact version
	// specification is also supported. Examples: "simplejson" and
	// "simplejson==3.8.0".
	Package string `json:"package"`
	// The repository where the package can be found. If not specified, the
	// default pip index is used.
	Repo string `json:"repo,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (PythonPyPiLibrary) MarshalJSON added in v0.23.0

func (s PythonPyPiLibrary) MarshalJSON() ([]byte, error)

func (*PythonPyPiLibrary) UnmarshalJSON added in v0.23.0

func (s *PythonPyPiLibrary) UnmarshalJSON(b []byte) error

type RCranLibrary

type RCranLibrary struct {
	// The name of the CRAN package to install.
	Package string `json:"package"`
	// The repository where the package can be found. If not specified, the
	// default CRAN repo is used.
	Repo string `json:"repo,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (RCranLibrary) MarshalJSON added in v0.23.0

func (s RCranLibrary) MarshalJSON() ([]byte, error)

func (*RCranLibrary) UnmarshalJSON added in v0.23.0

func (s *RCranLibrary) UnmarshalJSON(b []byte) error

type RemoveInstanceProfile

type RemoveInstanceProfile struct {
	// The ARN of the instance profile to remove. This field is required.
	InstanceProfileArn string `json:"instance_profile_arn"`
}

type RemoveResponse added in v0.34.0

type RemoveResponse struct {
}

type ResizeCluster

type ResizeCluster struct {
	// Parameters needed in order to automatically scale clusters up and down
	// based on load. Note: autoscaling works best with DB runtime versions 3.0
	// or later.
	Autoscale *AutoScale `json:"autoscale,omitempty"`
	// The cluster to be resized.
	ClusterId string `json:"cluster_id"`
	// Number of worker nodes that this cluster should have. A cluster has one
	// Spark Driver and `num_workers` Executors for a total of `num_workers` + 1
	// Spark nodes.
	//
	// Note: When reading the properties of a cluster, this field reflects the
	// desired number of workers rather than the actual current number of
	// workers. For instance, if a cluster is resized from 5 to 10 workers, this
	// field will immediately be updated to reflect the target size of 10
	// workers, whereas the workers listed in `spark_info` will gradually
	// increase from 5 to 10 as the new nodes are provisioned.
	NumWorkers int `json:"num_workers,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ResizeCluster) MarshalJSON added in v0.23.0

func (s ResizeCluster) MarshalJSON() ([]byte, error)

func (*ResizeCluster) UnmarshalJSON added in v0.23.0

func (s *ResizeCluster) UnmarshalJSON(b []byte) error

type ResizeClusterResponse added in v0.34.0

type ResizeClusterResponse struct {
}

type RestartCluster

type RestartCluster struct {
	// The cluster to be started.
	ClusterId string `json:"cluster_id"`
	// <needs content added>
	RestartUser string `json:"restart_user,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (RestartCluster) MarshalJSON added in v0.23.0

func (s RestartCluster) MarshalJSON() ([]byte, error)

func (*RestartCluster) UnmarshalJSON added in v0.23.0

func (s *RestartCluster) UnmarshalJSON(b []byte) error

type RestartClusterResponse added in v0.34.0

type RestartClusterResponse struct {
}

type ResultType

type ResultType string
const ResultTypeError ResultType = `error`
const ResultTypeImage ResultType = `image`
const ResultTypeImages ResultType = `images`
const ResultTypeTable ResultType = `table`
const ResultTypeText ResultType = `text`

func (*ResultType) Set

func (f *ResultType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ResultType) String

func (f *ResultType) String() string

String representation for fmt.Print

func (*ResultType) Type

func (f *ResultType) Type() string

Type always returns ResultType to satisfy [pflag.Value] interface

type Results

type Results struct {
	// The cause of the error
	Cause string `json:"cause,omitempty"`

	Data any `json:"data,omitempty"`
	// The image filename
	FileName string `json:"fileName,omitempty"`

	FileNames []string `json:"fileNames,omitempty"`
	// true if a JSON schema is returned instead of a string representation of
	// the Hive type.
	IsJsonSchema bool `json:"isJsonSchema,omitempty"`
	// internal field used by SDK
	Pos int `json:"pos,omitempty"`

	ResultType ResultType `json:"resultType,omitempty"`
	// The table schema
	Schema []map[string]any `json:"schema,omitempty"`
	// The summary of the error
	Summary string `json:"summary,omitempty"`
	// true if partial results are returned.
	Truncated bool `json:"truncated,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (*Results) Err

func (r *Results) Err() error

Err returns error type

func (*Results) Error

func (r *Results) Error() string

Error returns error in a bit more friendly way

func (*Results) Failed

func (r *Results) Failed() bool

Failed tells if command execution failed

func (Results) MarshalJSON added in v0.23.0

func (s Results) MarshalJSON() ([]byte, error)

func (*Results) Scan

func (r *Results) Scan(dest ...any) bool

Scan scans for results TODO: change API, also in terraform (databricks_sql_permissions) for now we're adding `pos` field artificially. this must be removed before this repo is public.

func (*Results) Text

func (r *Results) Text() string

Text returns plain text results

func (*Results) UnmarshalJSON added in v0.23.0

func (s *Results) UnmarshalJSON(b []byte) error

type RuntimeEngine

type RuntimeEngine string

Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime engine is inferred from spark_version.

const RuntimeEngineNull RuntimeEngine = `NULL`
const RuntimeEnginePhoton RuntimeEngine = `PHOTON`
const RuntimeEngineStandard RuntimeEngine = `STANDARD`

func (*RuntimeEngine) Set

func (f *RuntimeEngine) Set(v string) error

Set raw string value and validate it against allowed values

func (*RuntimeEngine) String

func (f *RuntimeEngine) String() string

String representation for fmt.Print

func (*RuntimeEngine) Type

func (f *RuntimeEngine) Type() string

Type always returns RuntimeEngine to satisfy [pflag.Value] interface

type S3StorageInfo

type S3StorageInfo struct {
	// (Optional) Set canned access control list for the logs, e.g.
	// `bucket-owner-full-control`. If `canned_cal` is set, please make sure the
	// cluster iam role has `s3:PutObjectAcl` permission on the destination
	// bucket and prefix. The full list of possible canned acl can be found at
	// http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.
	// Please also note that by default only the object owner gets full
	// controls. If you are using cross account role for writing data, you may
	// want to set `bucket-owner-full-control` to make bucket owner able to read
	// the logs.
	CannedAcl string `json:"canned_acl,omitempty"`
	// S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be
	// delivered using cluster iam role, please make sure you set cluster iam
	// role and the role has write access to the destination. Please also note
	// that you cannot use AWS keys to deliver logs.
	Destination string `json:"destination"`
	// (Optional) Flag to enable server side encryption, `false` by default.
	EnableEncryption bool `json:"enable_encryption,omitempty"`
	// (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It
	// will be used only when encryption is enabled and the default type is
	// `sse-s3`.
	EncryptionType string `json:"encryption_type,omitempty"`
	// S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or
	// endpoint needs to be set. If both are set, endpoint will be used.
	Endpoint string `json:"endpoint,omitempty"`
	// (Optional) Kms key which will be used if encryption is enabled and
	// encryption type is set to `sse-kms`.
	KmsKey string `json:"kms_key,omitempty"`
	// S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set.
	// If both are set, endpoint will be used.
	Region string `json:"region,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (S3StorageInfo) MarshalJSON added in v0.23.0

func (s S3StorageInfo) MarshalJSON() ([]byte, error)

func (*S3StorageInfo) UnmarshalJSON added in v0.23.0

func (s *S3StorageInfo) UnmarshalJSON(b []byte) error

type SparkNode

type SparkNode struct {
	// The private IP address of the host instance.
	HostPrivateIp string `json:"host_private_ip,omitempty"`
	// Globally unique identifier for the host instance from the cloud provider.
	InstanceId string `json:"instance_id,omitempty"`
	// Attributes specific to AWS for a Spark node.
	NodeAwsAttributes *SparkNodeAwsAttributes `json:"node_aws_attributes,omitempty"`
	// Globally unique identifier for this node.
	NodeId string `json:"node_id,omitempty"`
	// Private IP address (typically a 10.x.x.x address) of the Spark node. Note
	// that this is different from the private IP address of the host instance.
	PrivateIp string `json:"private_ip,omitempty"`
	// Public DNS address of this node. This address can be used to access the
	// Spark JDBC server on the driver node. To communicate with the JDBC
	// server, traffic must be manually authorized by adding security group
	// rules to the "worker-unmanaged" security group via the AWS console.
	//
	// Actually it's the public DNS address of the host instance.
	PublicDns string `json:"public_dns,omitempty"`
	// The timestamp (in millisecond) when the Spark node is launched.
	//
	// The start_timestamp is set right before the container is being launched.
	// The timestamp when the container is placed on the ResourceManager, before
	// its launch and setup by the NodeDaemon. This timestamp is the same as the
	// creation timestamp in the database.
	StartTimestamp int64 `json:"start_timestamp,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (SparkNode) MarshalJSON added in v0.23.0

func (s SparkNode) MarshalJSON() ([]byte, error)

func (*SparkNode) UnmarshalJSON added in v0.23.0

func (s *SparkNode) UnmarshalJSON(b []byte) error

type SparkNodeAwsAttributes

type SparkNodeAwsAttributes struct {
	// Whether this node is on an Amazon spot instance.
	IsSpot bool `json:"is_spot,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (SparkNodeAwsAttributes) MarshalJSON added in v0.23.0

func (s SparkNodeAwsAttributes) MarshalJSON() ([]byte, error)

func (*SparkNodeAwsAttributes) UnmarshalJSON added in v0.23.0

func (s *SparkNodeAwsAttributes) UnmarshalJSON(b []byte) error

type SparkVersion

type SparkVersion struct {
	// Spark version key, for example "2.1.x-scala2.11". This is the value which
	// should be provided as the "spark_version" when creating a new cluster.
	// Note that the exact Spark version may change over time for a "wildcard"
	// version (i.e., "2.1.x-scala2.11" is a "wildcard" version) with minor bug
	// fixes.
	Key string `json:"key,omitempty"`
	// A descriptive name for this Spark version, for example "Spark 2.1".
	Name string `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (SparkVersion) MarshalJSON added in v0.23.0

func (s SparkVersion) MarshalJSON() ([]byte, error)

func (*SparkVersion) UnmarshalJSON added in v0.23.0

func (s *SparkVersion) UnmarshalJSON(b []byte) error

type SparkVersionRequest

type SparkVersionRequest struct {
	Id              string `json:"id,omitempty"`
	LongTermSupport bool   `json:"long_term_support,omitempty" tf:"optional,default:false"`
	Beta            bool   `json:"beta,omitempty" tf:"optional,default:false,conflicts:long_term_support"`
	Latest          bool   `json:"latest,omitempty" tf:"optional,default:true"`
	ML              bool   `json:"ml,omitempty" tf:"optional,default:false"`
	Genomics        bool   `json:"genomics,omitempty" tf:"optional,default:false"`
	GPU             bool   `json:"gpu,omitempty" tf:"optional,default:false"`
	Scala           string `json:"scala,omitempty" tf:"optional,default:2.12"`
	SparkVersion    string `json:"spark_version,omitempty" tf:"optional,default:"`
	Photon          bool   `json:"photon,omitempty" tf:"optional,default:false"`
}

SparkVersionRequest - filtering request

type StartCluster

type StartCluster struct {
	// The cluster to be started.
	ClusterId string `json:"cluster_id"`
}

type StartClusterResponse added in v0.34.0

type StartClusterResponse struct {
}

type State

type State string

Current state of the cluster.

const StateError State = `ERROR`
const StatePending State = `PENDING`
const StateResizing State = `RESIZING`
const StateRestarting State = `RESTARTING`
const StateRunning State = `RUNNING`
const StateTerminated State = `TERMINATED`
const StateTerminating State = `TERMINATING`
const StateUnknown State = `UNKNOWN`

func (*State) Set

func (f *State) Set(v string) error

Set raw string value and validate it against allowed values

func (*State) String

func (f *State) String() string

String representation for fmt.Print

func (*State) Type

func (f *State) Type() string

Type always returns State to satisfy [pflag.Value] interface

type TerminationReason

type TerminationReason struct {
	// status code indicating why the cluster was terminated
	Code TerminationReasonCode `json:"code,omitempty"`
	// list of parameters that provide additional information about why the
	// cluster was terminated
	Parameters map[string]string `json:"parameters,omitempty"`
	// type of the termination
	Type TerminationReasonType `json:"type,omitempty"`
}

type TerminationReasonCode

type TerminationReasonCode string

status code indicating why the cluster was terminated

const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED`
const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE`
const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE`
const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`
const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`
const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`
const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED`
const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE`
const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE`
const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE`
const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE`
const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`
const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION`
const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING`
const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING`
const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`
const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE`
const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE`
const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT`
const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`
const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE`
const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE`
const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT`
const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN`
const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST`
const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE`
const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE`
const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE`
const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY`
const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE`
const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE`
const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE`
const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY`
const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED`
const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED`
const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE`
const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE`
const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED`
const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY`
const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE`
const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE`
const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE`
const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR`
const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT`
const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE`
const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE`
const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED`
const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE`
const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`
const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY`
const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT`
const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE`
const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE`
const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE`
const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE`
const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED`
const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED`
const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR`
const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION`
const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE`
const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES`
const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD`
const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR`
const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE`
const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION`
const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE`
const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE`
const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE`
const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED`
const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE`
const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN`
const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE`
const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE`
const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST`
const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE`
const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR`
const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR`

func (*TerminationReasonCode) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonCode) String

func (f *TerminationReasonCode) String() string

String representation for fmt.Print

func (*TerminationReasonCode) Type

func (f *TerminationReasonCode) Type() string

Type always returns TerminationReasonCode to satisfy [pflag.Value] interface

type TerminationReasonType

type TerminationReasonType string

type of the termination

const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR`
const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE`
const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT`
const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS`

func (*TerminationReasonType) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonType) String

func (f *TerminationReasonType) String() string

String representation for fmt.Print

func (*TerminationReasonType) Type

func (f *TerminationReasonType) Type() string

Type always returns TerminationReasonType to satisfy [pflag.Value] interface

type UninstallLibraries

type UninstallLibraries struct {
	// Unique identifier for the cluster on which to uninstall these libraries.
	ClusterId string `json:"cluster_id"`
	// The libraries to uninstall.
	Libraries []Library `json:"libraries"`
}

type UninstallLibrariesResponse added in v0.34.0

type UninstallLibrariesResponse struct {
}

type UnpinCluster

type UnpinCluster struct {
	// <needs content added>
	ClusterId string `json:"cluster_id"`
}

type UnpinClusterResponse added in v0.34.0

type UnpinClusterResponse struct {
}

type Update

type Update struct {
	ClusterId string
	// The libraries to install.
	Install []Library
	// The libraries to install.
	Uninstall []Library
}

type UpdateResponse added in v0.34.0

type UpdateResponse struct {
}

type VolumesStorageInfo added in v0.18.0

type VolumesStorageInfo struct {
	// Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`
	Destination string `json:"destination"`
}

type Wait

type Wait struct {
	ClusterID string
	Libraries []Library
	IsRunning bool
	IsRefresh bool
}

func (*Wait) IsNotInScope

func (w *Wait) IsNotInScope(lib *Library) bool

type WaitCommandStatusCommandExecutionCancelled added in v0.10.0

type WaitCommandStatusCommandExecutionCancelled[R any] struct {
	Response  *R
	ClusterId string `json:"clusterId"`
	CommandId string `json:"commandId"`
	ContextId string `json:"contextId"`
	Poll      func(time.Duration, func(*CommandStatusResponse)) (*CommandStatusResponse, error)
	// contains filtered or unexported fields
}

WaitCommandStatusCommandExecutionCancelled is a wrapper that calls CommandExecutionAPI.WaitCommandStatusCommandExecutionCancelled and waits to reach Cancelled state.

func (*WaitCommandStatusCommandExecutionCancelled[R]) Get added in v0.10.0

Get the CommandStatusResponse with the default timeout of 20 minutes.

func (*WaitCommandStatusCommandExecutionCancelled[R]) GetWithTimeout added in v0.10.0

Get the CommandStatusResponse with custom timeout.

func (*WaitCommandStatusCommandExecutionCancelled[R]) OnProgress added in v0.10.0

OnProgress invokes a callback every time it polls for the status update.

type WaitCommandStatusCommandExecutionFinishedOrError added in v0.10.0

type WaitCommandStatusCommandExecutionFinishedOrError[R any] struct {
	Response  *R
	ClusterId string `json:"clusterId"`
	CommandId string `json:"commandId"`
	ContextId string `json:"contextId"`
	Poll      func(time.Duration, func(*CommandStatusResponse)) (*CommandStatusResponse, error)
	// contains filtered or unexported fields
}

WaitCommandStatusCommandExecutionFinishedOrError is a wrapper that calls CommandExecutionAPI.WaitCommandStatusCommandExecutionFinishedOrError and waits to reach Finished or Error state.

func (*WaitCommandStatusCommandExecutionFinishedOrError[R]) Get added in v0.10.0

Get the CommandStatusResponse with the default timeout of 20 minutes.

func (*WaitCommandStatusCommandExecutionFinishedOrError[R]) GetWithTimeout added in v0.10.0

Get the CommandStatusResponse with custom timeout.

func (*WaitCommandStatusCommandExecutionFinishedOrError[R]) OnProgress added in v0.10.0

OnProgress invokes a callback every time it polls for the status update.

type WaitContextStatusCommandExecutionRunning added in v0.10.0

type WaitContextStatusCommandExecutionRunning[R any] struct {
	Response  *R
	ClusterId string `json:"clusterId"`
	ContextId string `json:"contextId"`
	Poll      func(time.Duration, func(*ContextStatusResponse)) (*ContextStatusResponse, error)
	// contains filtered or unexported fields
}

WaitContextStatusCommandExecutionRunning is a wrapper that calls CommandExecutionAPI.WaitContextStatusCommandExecutionRunning and waits to reach Running state.

func (*WaitContextStatusCommandExecutionRunning[R]) Get added in v0.10.0

Get the ContextStatusResponse with the default timeout of 20 minutes.

func (*WaitContextStatusCommandExecutionRunning[R]) GetWithTimeout added in v0.10.0

Get the ContextStatusResponse with custom timeout.

func (*WaitContextStatusCommandExecutionRunning[R]) OnProgress added in v0.10.0

OnProgress invokes a callback every time it polls for the status update.

type WaitGetClusterRunning added in v0.10.0

type WaitGetClusterRunning[R any] struct {
	Response  *R
	ClusterId string `json:"cluster_id"`
	Poll      func(time.Duration, func(*ClusterDetails)) (*ClusterDetails, error)
	// contains filtered or unexported fields
}

WaitGetClusterRunning is a wrapper that calls ClustersAPI.WaitGetClusterRunning and waits to reach RUNNING state.

func (*WaitGetClusterRunning[R]) Get added in v0.10.0

func (w *WaitGetClusterRunning[R]) Get() (*ClusterDetails, error)

Get the ClusterDetails with the default timeout of 20 minutes.

func (*WaitGetClusterRunning[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetClusterRunning[R]) GetWithTimeout(timeout time.Duration) (*ClusterDetails, error)

Get the ClusterDetails with custom timeout.

func (*WaitGetClusterRunning[R]) OnProgress added in v0.10.0

func (w *WaitGetClusterRunning[R]) OnProgress(callback func(*ClusterDetails)) *WaitGetClusterRunning[R]

OnProgress invokes a callback every time it polls for the status update.

type WaitGetClusterTerminated added in v0.10.0

type WaitGetClusterTerminated[R any] struct {
	Response  *R
	ClusterId string `json:"cluster_id"`
	Poll      func(time.Duration, func(*ClusterDetails)) (*ClusterDetails, error)
	// contains filtered or unexported fields
}

WaitGetClusterTerminated is a wrapper that calls ClustersAPI.WaitGetClusterTerminated and waits to reach TERMINATED state.

func (*WaitGetClusterTerminated[R]) Get added in v0.10.0

Get the ClusterDetails with the default timeout of 20 minutes.

func (*WaitGetClusterTerminated[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetClusterTerminated[R]) GetWithTimeout(timeout time.Duration) (*ClusterDetails, error)

Get the ClusterDetails with custom timeout.

func (*WaitGetClusterTerminated[R]) OnProgress added in v0.10.0

func (w *WaitGetClusterTerminated[R]) OnProgress(callback func(*ClusterDetails)) *WaitGetClusterTerminated[R]

OnProgress invokes a callback every time it polls for the status update.

type WorkloadType

type WorkloadType struct {
	// defined what type of clients can use the cluster. E.g. Notebooks, Jobs
	Clients ClientsTypes `json:"clients"`
}

type WorkspaceStorageInfo added in v0.9.0

type WorkspaceStorageInfo struct {
	// workspace files destination, e.g.
	// `/Users/user1@databricks.com/my-init.sh`
	Destination string `json:"destination"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL