provisioning

package
v0.29.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 16, 2024 License: Apache-2.0 Imports: 8 Imported by: 8

Documentation

Overview

These APIs allow you to manage Credentials, Encryption Keys, Networks, Private Access, Storage, Vpc Endpoints, Workspaces, etc.

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AwsCredentials

type AwsCredentials struct {
	StsRole *StsRole `json:"sts_role,omitempty"`
}

type AwsKeyInfo

type AwsKeyInfo struct {
	// The AWS KMS key alias.
	KeyAlias string `json:"key_alias,omitempty"`
	// The AWS KMS key's Amazon Resource Name (ARN).
	KeyArn string `json:"key_arn"`
	// The AWS KMS key region.
	KeyRegion string `json:"key_region"`
	// This field applies only if the `use_cases` property includes `STORAGE`.
	// If this is set to `true` or omitted, the key is also used to encrypt
	// cluster EBS volumes. If you do not want to use this key for encrypting
	// EBS volumes, set to `false`.
	ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AwsKeyInfo) MarshalJSON added in v0.23.0

func (s AwsKeyInfo) MarshalJSON() ([]byte, error)

func (*AwsKeyInfo) UnmarshalJSON added in v0.23.0

func (s *AwsKeyInfo) UnmarshalJSON(b []byte) error

type AzureWorkspaceInfo added in v0.28.0

type AzureWorkspaceInfo struct {
	// Azure Resource Group name
	ResourceGroup string `json:"resource_group,omitempty"`
	// Azure Subscription ID
	SubscriptionId string `json:"subscription_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AzureWorkspaceInfo) MarshalJSON added in v0.28.0

func (s AzureWorkspaceInfo) MarshalJSON() ([]byte, error)

func (*AzureWorkspaceInfo) UnmarshalJSON added in v0.28.0

func (s *AzureWorkspaceInfo) UnmarshalJSON(b []byte) error

type CloudResourceContainer

type CloudResourceContainer struct {
	// The general workspace configurations that are specific to Google Cloud.
	Gcp *CustomerFacingGcpCloudResourceContainer `json:"gcp,omitempty"`
}

The general workspace configurations that are specific to cloud providers.

type CreateAwsKeyInfo

type CreateAwsKeyInfo struct {
	// The AWS KMS key alias.
	KeyAlias string `json:"key_alias,omitempty"`
	// The AWS KMS key's Amazon Resource Name (ARN). Note that the key's AWS
	// region is inferred from the ARN.
	KeyArn string `json:"key_arn"`
	// This field applies only if the `use_cases` property includes `STORAGE`.
	// If this is set to `true` or omitted, the key is also used to encrypt
	// cluster EBS volumes. To not use this key also for encrypting EBS volumes,
	// set this to `false`.
	ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateAwsKeyInfo) MarshalJSON added in v0.23.0

func (s CreateAwsKeyInfo) MarshalJSON() ([]byte, error)

func (*CreateAwsKeyInfo) UnmarshalJSON added in v0.23.0

func (s *CreateAwsKeyInfo) UnmarshalJSON(b []byte) error

type CreateCredentialAwsCredentials

type CreateCredentialAwsCredentials struct {
	StsRole *CreateCredentialStsRole `json:"sts_role,omitempty"`
}

type CreateCredentialRequest

type CreateCredentialRequest struct {
	AwsCredentials CreateCredentialAwsCredentials `json:"aws_credentials"`
	// The human-readable name of the credential configuration object.
	CredentialsName string `json:"credentials_name"`
}

type CreateCredentialStsRole

type CreateCredentialStsRole struct {
	// The Amazon Resource Name (ARN) of the cross account role.
	RoleArn string `json:"role_arn,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateCredentialStsRole) MarshalJSON added in v0.23.0

func (s CreateCredentialStsRole) MarshalJSON() ([]byte, error)

func (*CreateCredentialStsRole) UnmarshalJSON added in v0.23.0

func (s *CreateCredentialStsRole) UnmarshalJSON(b []byte) error

type CreateCustomerManagedKeyRequest

type CreateCustomerManagedKeyRequest struct {
	AwsKeyInfo *CreateAwsKeyInfo `json:"aws_key_info,omitempty"`

	GcpKeyInfo *CreateGcpKeyInfo `json:"gcp_key_info,omitempty"`
	// The cases that the key can be used for.
	UseCases []KeyUseCase `json:"use_cases"`
}

type CreateGcpKeyInfo added in v0.9.0

type CreateGcpKeyInfo struct {
	// The GCP KMS key's resource name
	KmsKeyId string `json:"kms_key_id"`
}

type CreateNetworkRequest

type CreateNetworkRequest struct {
	// The Google Cloud specific information for this network (for example, the
	// VPC ID, subnet ID, and secondary IP ranges).
	GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"`
	// The human-readable name of the network configuration.
	NetworkName string `json:"network_name"`
	// IDs of one to five security groups associated with this network. Security
	// group IDs **cannot** be used in multiple network configurations.
	SecurityGroupIds []string `json:"security_group_ids,omitempty"`
	// IDs of at least two subnets associated with this network. Subnet IDs
	// **cannot** be used in multiple network configurations.
	SubnetIds []string `json:"subnet_ids,omitempty"`
	// If specified, contains the VPC endpoints used to allow cluster
	// communication from this VPC over [AWS PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink/
	VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"`
	// The ID of the VPC associated with this network. VPC IDs can be used in
	// multiple network configurations.
	VpcId string `json:"vpc_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateNetworkRequest) MarshalJSON added in v0.23.0

func (s CreateNetworkRequest) MarshalJSON() ([]byte, error)

func (*CreateNetworkRequest) UnmarshalJSON added in v0.23.0

func (s *CreateNetworkRequest) UnmarshalJSON(b []byte) error

type CreateStorageConfigurationRequest

type CreateStorageConfigurationRequest struct {
	// Root S3 bucket information.
	RootBucketInfo RootBucketInfo `json:"root_bucket_info"`
	// The human-readable name of the storage configuration.
	StorageConfigurationName string `json:"storage_configuration_name"`
}

type CreateVpcEndpointRequest

type CreateVpcEndpointRequest struct {
	// The ID of the VPC endpoint object in AWS.
	AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"`
	// The Google Cloud specific information for this Private Service Connect
	// endpoint.
	GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"`
	// The AWS region in which this VPC endpoint object exists.
	Region string `json:"region,omitempty"`
	// The human-readable name of the storage configuration.
	VpcEndpointName string `json:"vpc_endpoint_name"`

	ForceSendFields []string `json:"-"`
}

func (CreateVpcEndpointRequest) MarshalJSON added in v0.23.0

func (s CreateVpcEndpointRequest) MarshalJSON() ([]byte, error)

func (*CreateVpcEndpointRequest) UnmarshalJSON added in v0.23.0

func (s *CreateVpcEndpointRequest) UnmarshalJSON(b []byte) error

type CreateWorkspaceRequest

type CreateWorkspaceRequest struct {
	// The AWS region of the workspace's data plane.
	AwsRegion string `json:"aws_region,omitempty"`
	// The cloud provider which the workspace uses. For Google Cloud workspaces,
	// always set this field to `gcp`.
	Cloud string `json:"cloud,omitempty"`
	// The general workspace configurations that are specific to cloud
	// providers.
	CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"`
	// ID of the workspace's credential configuration object.
	CredentialsId string `json:"credentials_id,omitempty"`
	// The custom tags key-value pairing that is attached to this workspace. The
	// key-value pair is a string of utf-8 characters. The value can be an empty
	// string, with maximum length of 255 characters. The key can be of maximum
	// length of 127 characters, and cannot be empty.
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// The deployment name defines part of the subdomain for the workspace. The
	// workspace URL for the web application and REST APIs is
	// `<workspace-deployment-name>.cloud.databricks.com`. For example, if the
	// deployment name is `abcsales`, your workspace URL will be
	// `https://abcsales.cloud.databricks.com`. Hyphens are allowed. This
	// property supports only the set of characters that are allowed in a
	// subdomain.
	//
	// To set this value, you must have a deployment name prefix. Contact your
	// Databricks account team to add an account deployment name prefix to your
	// account.
	//
	// Workspace deployment names follow the account prefix and a hyphen. For
	// example, if your account's deployment prefix is `acme` and the workspace
	// deployment name is `workspace-1`, the JSON response for the
	// `deployment_name` field becomes `acme-workspace-1`. The workspace URL
	// would be `acme-workspace-1.cloud.databricks.com`.
	//
	// You can also set the `deployment_name` to the reserved keyword `EMPTY` if
	// you want the deployment name to only include the deployment prefix. For
	// example, if your account's deployment prefix is `acme` and the workspace
	// deployment name is `EMPTY`, the `deployment_name` becomes `acme` only and
	// the workspace URL is `acme.cloud.databricks.com`.
	//
	// This value must be unique across all non-deleted deployments across all
	// AWS regions.
	//
	// If a new workspace omits this property, the server generates a unique
	// deployment name for you with the pattern `dbc-xxxxxxxx-xxxx`.
	DeploymentName string `json:"deployment_name,omitempty"`
	// The network settings for the workspace. The configurations are only for
	// Databricks-managed VPCs. It is ignored if you specify a customer-managed
	// VPC in the `network_id` field.", All the IP range configurations must be
	// mutually exclusive. An attempt to create a workspace fails if Databricks
	// detects an IP range overlap.
	//
	// Specify custom IP ranges in CIDR format. The IP ranges for these fields
	// must not overlap, and all IP addresses must be entirely within the
	// following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`,
	// `192.168.0.0/16`, and `240.0.0.0/4`.
	//
	// The sizes of these IP ranges affect the maximum number of nodes for the
	// workspace.
	//
	// **Important**: Confirm the IP ranges used by your Databricks workspace
	// before creating the workspace. You cannot change them after your
	// workspace is deployed. If the IP address ranges for your Databricks are
	// too small, IP exhaustion can occur, causing your Databricks jobs to fail.
	// To determine the address range sizes that you need, Databricks provides a
	// calculator as a Microsoft Excel spreadsheet. See [calculate subnet sizes
	// for a new workspace].
	//
	// [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html
	GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"`
	// The configurations for the GKE cluster of a Databricks workspace.
	GkeConfig *GkeConfig `json:"gke_config,omitempty"`
	// The Google Cloud region of the workspace data plane in your Google
	// account. For example, `us-east4`.
	Location string `json:"location,omitempty"`
	// The ID of the workspace's managed services encryption key configuration
	// object. This is used to help protect and control access to the
	// workspace's notebooks, secrets, Databricks SQL queries, and query
	// history. The provided key configuration object property `use_cases` must
	// contain `MANAGED_SERVICES`.
	ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"`

	NetworkId string `json:"network_id,omitempty"`
	// The pricing tier of the workspace. For pricing tier information, see [AWS
	// Pricing].
	//
	// [AWS Pricing]: https://databricks.com/product/aws-pricing
	PricingTier PricingTier `json:"pricing_tier,omitempty"`
	// ID of the workspace's private access settings object. Only used for
	// PrivateLink. This ID must be specified for customers using [AWS
	// PrivateLink] for either front-end (user-to-workspace connection),
	// back-end (data plane to control plane connection), or both connection
	// types.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink/
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"`
	// The ID of the workspace's storage configuration object.
	StorageConfigurationId string `json:"storage_configuration_id,omitempty"`
	// The ID of the workspace's storage encryption key configuration object.
	// This is used to encrypt the workspace's root S3 bucket (root DBFS and
	// system data) and, optionally, cluster EBS volumes. The provided key
	// configuration object property `use_cases` must contain `STORAGE`.
	StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"`
	// The workspace's human-readable name.
	WorkspaceName string `json:"workspace_name"`

	ForceSendFields []string `json:"-"`
}

func (CreateWorkspaceRequest) MarshalJSON added in v0.23.0

func (s CreateWorkspaceRequest) MarshalJSON() ([]byte, error)

func (*CreateWorkspaceRequest) UnmarshalJSON added in v0.23.0

func (s *CreateWorkspaceRequest) UnmarshalJSON(b []byte) error

type Credential

type Credential struct {
	// The Databricks account ID that hosts the credential.
	AccountId string `json:"account_id,omitempty"`

	AwsCredentials *AwsCredentials `json:"aws_credentials,omitempty"`
	// Time in epoch milliseconds when the credential was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// Databricks credential configuration ID.
	CredentialsId string `json:"credentials_id,omitempty"`
	// The human-readable name of the credential configuration object.
	CredentialsName string `json:"credentials_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Credential) MarshalJSON added in v0.23.0

func (s Credential) MarshalJSON() ([]byte, error)

func (*Credential) UnmarshalJSON added in v0.23.0

func (s *Credential) UnmarshalJSON(b []byte) error

type CredentialsAPI

type CredentialsAPI struct {
	// contains filtered or unexported fields
}

These APIs manage credential configurations for this workspace. Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters in the appropriate VPC for the new workspace. A credential configuration encapsulates this role information, and its ID is used when creating a new workspace.

func NewCredentials

func NewCredentials(client *client.DatabricksClient) *CredentialsAPI

func (*CredentialsAPI) Create

Create credential configuration.

Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks clusters. For your AWS IAM role, you need to trust the External ID (the Databricks Account API account ID) in the returned credential object, and configure the required access policy.

Save the response's `credentials_id` field, which is the ID for your new credential configuration object.

For information about how to create a new workspace with this API, see Create a new workspace using the Account API

Example (Credentials)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

// cleanup

err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
Output:

Example (LogDelivery)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

creds, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_LOGDELIVERY_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", creds)

// cleanup

err = a.Credentials.DeleteByCredentialsId(ctx, creds.CredentialsId)
if err != nil {
	panic(err)
}
Output:

Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

// cleanup

err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
Output:

func (*CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap

func (a *CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error)

CredentialCredentialsNameToCredentialsIdMap calls CredentialsAPI.List and creates a map of results with Credential.CredentialsName as key and Credential.CredentialsId as value.

Returns an error if there's more than one Credential with the same .CredentialsName.

Note: All Credential instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*CredentialsAPI) Delete

Delete credential configuration.

Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace.

func (*CredentialsAPI) DeleteByCredentialsId

func (a *CredentialsAPI) DeleteByCredentialsId(ctx context.Context, credentialsId string) error

Delete credential configuration.

Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace.

func (*CredentialsAPI) Get

Get credential configuration.

Gets a Databricks credential configuration object for an account, both specified by ID.

Example (Credentials)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

byId, err := a.Credentials.GetByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
Output:

func (*CredentialsAPI) GetByCredentialsId

func (a *CredentialsAPI) GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)

Get credential configuration.

Gets a Databricks credential configuration object for an account, both specified by ID.

func (*CredentialsAPI) GetByCredentialsName

func (a *CredentialsAPI) GetByCredentialsName(ctx context.Context, name string) (*Credential, error)

GetByCredentialsName calls CredentialsAPI.CredentialCredentialsNameToCredentialsIdMap and returns a single Credential.

Returns an error if there's more than one Credential with the same .CredentialsName.

Note: All Credential instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*CredentialsAPI) Impl

Impl returns low-level Credentials API implementation Deprecated: use MockCredentialsInterface instead.

func (*CredentialsAPI) List

func (a *CredentialsAPI) List(ctx context.Context) ([]Credential, error)

Get all credential configurations.

Gets all Databricks credential configurations associated with an account specified by ID.

func (*CredentialsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockCredentialsInterface instead.

type CredentialsInterface added in v0.29.0

type CredentialsInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockCredentialsInterface instead.
	WithImpl(impl CredentialsService) CredentialsInterface

	// Impl returns low-level Credentials API implementation
	// Deprecated: use MockCredentialsInterface instead.
	Impl() CredentialsService

	// Create credential configuration.
	//
	// Creates a Databricks credential configuration that represents cloud
	// cross-account credentials for a specified account. Databricks uses this to
	// set up network infrastructure properly to host Databricks clusters. For your
	// AWS IAM role, you need to trust the External ID (the Databricks Account API
	// account ID) in the returned credential object, and configure the required
	// access policy.
	//
	// Save the response's `credentials_id` field, which is the ID for your new
	// credential configuration object.
	//
	// For information about how to create a new workspace with this API, see
	// [Create a new workspace using the Account API]
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error)

	// Delete credential configuration.
	//
	// Deletes a Databricks credential configuration object for an account, both
	// specified by ID. You cannot delete a credential that is associated with any
	// workspace.
	Delete(ctx context.Context, request DeleteCredentialRequest) error

	// Delete credential configuration.
	//
	// Deletes a Databricks credential configuration object for an account, both
	// specified by ID. You cannot delete a credential that is associated with any
	// workspace.
	DeleteByCredentialsId(ctx context.Context, credentialsId string) error

	// Get credential configuration.
	//
	// Gets a Databricks credential configuration object for an account, both
	// specified by ID.
	Get(ctx context.Context, request GetCredentialRequest) (*Credential, error)

	// Get credential configuration.
	//
	// Gets a Databricks credential configuration object for an account, both
	// specified by ID.
	GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)

	// Get all credential configurations.
	//
	// Gets all Databricks credential configurations associated with an account
	// specified by ID.
	List(ctx context.Context) ([]Credential, error)

	// CredentialCredentialsNameToCredentialsIdMap calls [CredentialsAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value.
	//
	// Returns an error if there's more than one [Credential] with the same .CredentialsName.
	//
	// Note: All [Credential] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error)

	// GetByCredentialsName calls [CredentialsAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential].
	//
	// Returns an error if there's more than one [Credential] with the same .CredentialsName.
	//
	// Note: All [Credential] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByCredentialsName(ctx context.Context, name string) (*Credential, error)
}

type CredentialsService

type CredentialsService interface {

	// Create credential configuration.
	//
	// Creates a Databricks credential configuration that represents cloud
	// cross-account credentials for a specified account. Databricks uses this
	// to set up network infrastructure properly to host Databricks clusters.
	// For your AWS IAM role, you need to trust the External ID (the Databricks
	// Account API account ID) in the returned credential object, and configure
	// the required access policy.
	//
	// Save the response's `credentials_id` field, which is the ID for your new
	// credential configuration object.
	//
	// For information about how to create a new workspace with this API, see
	// [Create a new workspace using the Account API]
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error)

	// Delete credential configuration.
	//
	// Deletes a Databricks credential configuration object for an account, both
	// specified by ID. You cannot delete a credential that is associated with
	// any workspace.
	Delete(ctx context.Context, request DeleteCredentialRequest) error

	// Get credential configuration.
	//
	// Gets a Databricks credential configuration object for an account, both
	// specified by ID.
	Get(ctx context.Context, request GetCredentialRequest) (*Credential, error)

	// Get all credential configurations.
	//
	// Gets all Databricks credential configurations associated with an account
	// specified by ID.
	List(ctx context.Context) ([]Credential, error)
}

These APIs manage credential configurations for this workspace. Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters in the appropriate VPC for the new workspace. A credential configuration encapsulates this role information, and its ID is used when creating a new workspace.

type CustomTags added in v0.18.0

type CustomTags map[string]string

The custom tags key-value pairing that is attached to this workspace. The key-value pair is a string of utf-8 characters. The value can be an empty string, with maximum length of 255 characters. The key can be of maximum length of 127 characters, and cannot be empty.

type CustomerFacingGcpCloudResourceContainer

type CustomerFacingGcpCloudResourceContainer struct {
	// The Google Cloud project ID, which the workspace uses to instantiate
	// cloud resources for your workspace.
	ProjectId string `json:"project_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

The general workspace configurations that are specific to Google Cloud.

func (CustomerFacingGcpCloudResourceContainer) MarshalJSON added in v0.23.0

func (s CustomerFacingGcpCloudResourceContainer) MarshalJSON() ([]byte, error)

func (*CustomerFacingGcpCloudResourceContainer) UnmarshalJSON added in v0.23.0

func (s *CustomerFacingGcpCloudResourceContainer) UnmarshalJSON(b []byte) error

type CustomerManagedKey

type CustomerManagedKey struct {
	// The Databricks account ID that holds the customer-managed key.
	AccountId string `json:"account_id,omitempty"`

	AwsKeyInfo *AwsKeyInfo `json:"aws_key_info,omitempty"`
	// Time in epoch milliseconds when the customer key was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// ID of the encryption key configuration object.
	CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"`

	GcpKeyInfo *GcpKeyInfo `json:"gcp_key_info,omitempty"`
	// The cases that the key can be used for.
	UseCases []KeyUseCase `json:"use_cases,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CustomerManagedKey) MarshalJSON added in v0.23.0

func (s CustomerManagedKey) MarshalJSON() ([]byte, error)

func (*CustomerManagedKey) UnmarshalJSON added in v0.23.0

func (s *CustomerManagedKey) UnmarshalJSON(b []byte) error

type DeleteCredentialRequest

type DeleteCredentialRequest struct {
	// Databricks Account API credential configuration ID
	CredentialsId string `json:"-" url:"-"`
}

Delete credential configuration

type DeleteEncryptionKeyRequest

type DeleteEncryptionKeyRequest struct {
	// Databricks encryption key configuration ID.
	CustomerManagedKeyId string `json:"-" url:"-"`
}

Delete encryption key configuration

type DeleteNetworkRequest

type DeleteNetworkRequest struct {
	// Databricks Account API network configuration ID.
	NetworkId string `json:"-" url:"-"`
}

Delete a network configuration

type DeletePrivateAccesRequest

type DeletePrivateAccesRequest struct {
	// Databricks Account API private access settings ID.
	PrivateAccessSettingsId string `json:"-" url:"-"`
}

Delete a private access settings object

type DeleteStorageRequest

type DeleteStorageRequest struct {
	// Databricks Account API storage configuration ID.
	StorageConfigurationId string `json:"-" url:"-"`
}

Delete storage configuration

type DeleteVpcEndpointRequest

type DeleteVpcEndpointRequest struct {
	// Databricks VPC endpoint ID.
	VpcEndpointId string `json:"-" url:"-"`
}

Delete VPC endpoint configuration

type DeleteWorkspaceRequest

type DeleteWorkspaceRequest struct {
	// Workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

Delete a workspace

type EncryptionKeysAPI

type EncryptionKeysAPI struct {
	// contains filtered or unexported fields
}

These APIs manage encryption key configurations for this workspace (optional). A key configuration encapsulates the AWS KMS key information and some information about how the key configuration can be used. There are two possible uses for key configurations:

* Managed services: A key configuration can be used to encrypt a workspace's notebook and secret data in the control plane, as well as Databricks SQL queries and query history. * Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in the data plane.

In both of these cases, the key configuration's ID is used when creating a new workspace. This Preview feature is available if your account is on the E2 version of the platform. Updating a running workspace with workspace storage encryption requires that the workspace is on the E2 version of the platform. If you have an older workspace, it might not be on the E2 version of the platform. If you are not sure, contact your Databricks representative.

func NewEncryptionKeys

func NewEncryptionKeys(client *client.DatabricksClient) *EncryptionKeysAPI

func (*EncryptionKeysAPI) Create

Create encryption key configuration.

Creates a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If the key is assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for workspace storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data.

**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions that currently support creation of Databricks workspaces.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

Example (EncryptionKeys)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.EncryptionKeys.Create(ctx, provisioning.CreateCustomerManagedKeyRequest{
	AwsKeyInfo: &provisioning.CreateAwsKeyInfo{
		KeyArn:   os.Getenv("TEST_MANAGED_KMS_KEY_ARN"),
		KeyAlias: os.Getenv("TEST_STORAGE_KMS_KEY_ALIAS"),
	},
	UseCases: []provisioning.KeyUseCase{provisioning.KeyUseCaseManagedServices},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = a.EncryptionKeys.DeleteByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId)
if err != nil {
	panic(err)
}
Output:

func (*EncryptionKeysAPI) Delete

Delete encryption key configuration.

Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace.

func (*EncryptionKeysAPI) DeleteByCustomerManagedKeyId

func (a *EncryptionKeysAPI) DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error

Delete encryption key configuration.

Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace.

func (*EncryptionKeysAPI) Get

Get encryption key configuration.

Gets a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data.

**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions.

This operation is available only if your account is on the E2 version of the platform.",

Example (EncryptionKeys)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.EncryptionKeys.Create(ctx, provisioning.CreateCustomerManagedKeyRequest{
	AwsKeyInfo: &provisioning.CreateAwsKeyInfo{
		KeyArn:   os.Getenv("TEST_MANAGED_KMS_KEY_ARN"),
		KeyAlias: os.Getenv("TEST_STORAGE_KMS_KEY_ALIAS"),
	},
	UseCases: []provisioning.KeyUseCase{provisioning.KeyUseCaseManagedServices},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.EncryptionKeys.GetByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = a.EncryptionKeys.DeleteByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId)
if err != nil {
	panic(err)
}
Output:

func (*EncryptionKeysAPI) GetByCustomerManagedKeyId

func (a *EncryptionKeysAPI) GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)

Get encryption key configuration.

Gets a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data.

**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions.

This operation is available only if your account is on the E2 version of the platform.",

func (*EncryptionKeysAPI) Impl

Impl returns low-level EncryptionKeys API implementation Deprecated: use MockEncryptionKeysInterface instead.

func (*EncryptionKeysAPI) List

Get all encryption key configurations.

Gets all customer-managed key configuration objects for an account. If the key is specified as a workspace's managed services customer-managed key, Databricks uses the key to encrypt the workspace's notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane.

**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions.

This operation is available only if your account is on the E2 version of the platform.

func (*EncryptionKeysAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockEncryptionKeysInterface instead.

type EncryptionKeysInterface added in v0.29.0

type EncryptionKeysInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockEncryptionKeysInterface instead.
	WithImpl(impl EncryptionKeysService) EncryptionKeysInterface

	// Impl returns low-level EncryptionKeys API implementation
	// Deprecated: use MockEncryptionKeysInterface instead.
	Impl() EncryptionKeysService

	// Create encryption key configuration.
	//
	// Creates a customer-managed key configuration object for an account, specified
	// by ID. This operation uploads a reference to a customer-managed key to
	// Databricks. If the key is assigned as a workspace's customer-managed key for
	// managed services, Databricks uses the key to encrypt the workspaces notebooks
	// and secrets in the control plane, in addition to Databricks SQL queries and
	// query history. If it is specified as a workspace's customer-managed key for
	// workspace storage, the key encrypts the workspace's root S3 bucket (which
	// contains the workspace's root DBFS and system data) and, optionally, cluster
	// EBS volume data.
	//
	// **Important**: Customer-managed keys are supported only for some deployment
	// types, subscription types, and AWS regions that currently support creation of
	// Databricks workspaces.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error)

	// Delete encryption key configuration.
	//
	// Deletes a customer-managed key configuration object for an account. You
	// cannot delete a configuration that is associated with a running workspace.
	Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error

	// Delete encryption key configuration.
	//
	// Deletes a customer-managed key configuration object for an account. You
	// cannot delete a configuration that is associated with a running workspace.
	DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error

	// Get encryption key configuration.
	//
	// Gets a customer-managed key configuration object for an account, specified by
	// ID. This operation uploads a reference to a customer-managed key to
	// Databricks. If assigned as a workspace's customer-managed key for managed
	// services, Databricks uses the key to encrypt the workspaces notebooks and
	// secrets in the control plane, in addition to Databricks SQL queries and query
	// history. If it is specified as a workspace's customer-managed key for
	// storage, the key encrypts the workspace's root S3 bucket (which contains the
	// workspace's root DBFS and system data) and, optionally, cluster EBS volume
	// data.
	//
	// **Important**: Customer-managed keys are supported only for some deployment
	// types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.",
	Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error)

	// Get encryption key configuration.
	//
	// Gets a customer-managed key configuration object for an account, specified by
	// ID. This operation uploads a reference to a customer-managed key to
	// Databricks. If assigned as a workspace's customer-managed key for managed
	// services, Databricks uses the key to encrypt the workspaces notebooks and
	// secrets in the control plane, in addition to Databricks SQL queries and query
	// history. If it is specified as a workspace's customer-managed key for
	// storage, the key encrypts the workspace's root S3 bucket (which contains the
	// workspace's root DBFS and system data) and, optionally, cluster EBS volume
	// data.
	//
	// **Important**: Customer-managed keys are supported only for some deployment
	// types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.",
	GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)

	// Get all encryption key configurations.
	//
	// Gets all customer-managed key configuration objects for an account. If the
	// key is specified as a workspace's managed services customer-managed key,
	// Databricks uses the key to encrypt the workspace's notebooks and secrets in
	// the control plane, in addition to Databricks SQL queries and query history.
	// If the key is specified as a workspace's storage customer-managed key, the
	// key is used to encrypt the workspace's root S3 bucket and optionally can
	// encrypt cluster EBS volumes data in the data plane.
	//
	// **Important**: Customer-managed keys are supported only for some deployment
	// types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.
	List(ctx context.Context) ([]CustomerManagedKey, error)
}

type EncryptionKeysService

type EncryptionKeysService interface {

	// Create encryption key configuration.
	//
	// Creates a customer-managed key configuration object for an account,
	// specified by ID. This operation uploads a reference to a customer-managed
	// key to Databricks. If the key is assigned as a workspace's
	// customer-managed key for managed services, Databricks uses the key to
	// encrypt the workspaces notebooks and secrets in the control plane, in
	// addition to Databricks SQL queries and query history. If it is specified
	// as a workspace's customer-managed key for workspace storage, the key
	// encrypts the workspace's root S3 bucket (which contains the workspace's
	// root DBFS and system data) and, optionally, cluster EBS volume data.
	//
	// **Important**: Customer-managed keys are supported only for some
	// deployment types, subscription types, and AWS regions that currently
	// support creation of Databricks workspaces.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform or on a select custom plan that allows multiple workspaces
	// per account.
	Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error)

	// Delete encryption key configuration.
	//
	// Deletes a customer-managed key configuration object for an account. You
	// cannot delete a configuration that is associated with a running
	// workspace.
	Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error

	// Get encryption key configuration.
	//
	// Gets a customer-managed key configuration object for an account,
	// specified by ID. This operation uploads a reference to a customer-managed
	// key to Databricks. If assigned as a workspace's customer-managed key for
	// managed services, Databricks uses the key to encrypt the workspaces
	// notebooks and secrets in the control plane, in addition to Databricks SQL
	// queries and query history. If it is specified as a workspace's
	// customer-managed key for storage, the key encrypts the workspace's root
	// S3 bucket (which contains the workspace's root DBFS and system data) and,
	// optionally, cluster EBS volume data.
	//
	// **Important**: Customer-managed keys are supported only for some
	// deployment types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform.",
	Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error)

	// Get all encryption key configurations.
	//
	// Gets all customer-managed key configuration objects for an account. If
	// the key is specified as a workspace's managed services customer-managed
	// key, Databricks uses the key to encrypt the workspace's notebooks and
	// secrets in the control plane, in addition to Databricks SQL queries and
	// query history. If the key is specified as a workspace's storage
	// customer-managed key, the key is used to encrypt the workspace's root S3
	// bucket and optionally can encrypt cluster EBS volumes data in the data
	// plane.
	//
	// **Important**: Customer-managed keys are supported only for some
	// deployment types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform.
	List(ctx context.Context) ([]CustomerManagedKey, error)
}

These APIs manage encryption key configurations for this workspace (optional). A key configuration encapsulates the AWS KMS key information and some information about how the key configuration can be used. There are two possible uses for key configurations:

* Managed services: A key configuration can be used to encrypt a workspace's notebook and secret data in the control plane, as well as Databricks SQL queries and query history. * Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in the data plane.

In both of these cases, the key configuration's ID is used when creating a new workspace. This Preview feature is available if your account is on the E2 version of the platform. Updating a running workspace with workspace storage encryption requires that the workspace is on the E2 version of the platform. If you have an older workspace, it might not be on the E2 version of the platform. If you are not sure, contact your Databricks representative.

type EndpointUseCase

type EndpointUseCase string

This enumeration represents the type of Databricks VPC endpoint service that was used when creating this VPC endpoint.

const EndpointUseCaseDataplaneRelayAccess EndpointUseCase = `DATAPLANE_RELAY_ACCESS`
const EndpointUseCaseWorkspaceAccess EndpointUseCase = `WORKSPACE_ACCESS`

func (*EndpointUseCase) Set

func (f *EndpointUseCase) Set(v string) error

Set raw string value and validate it against allowed values

func (*EndpointUseCase) String

func (f *EndpointUseCase) String() string

String representation for fmt.Print

func (*EndpointUseCase) Type

func (f *EndpointUseCase) Type() string

Type always returns EndpointUseCase to satisfy [pflag.Value] interface

type ErrorType

type ErrorType string

The AWS resource associated with this error: credentials, VPC, subnet, security group, or network ACL.

const ErrorTypeCredentials ErrorType = `credentials`
const ErrorTypeNetworkAcl ErrorType = `networkAcl`
const ErrorTypeSecurityGroup ErrorType = `securityGroup`
const ErrorTypeSubnet ErrorType = `subnet`
const ErrorTypeVpc ErrorType = `vpc`

func (*ErrorType) Set

func (f *ErrorType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ErrorType) String

func (f *ErrorType) String() string

String representation for fmt.Print

func (*ErrorType) Type

func (f *ErrorType) Type() string

Type always returns ErrorType to satisfy [pflag.Value] interface

type GcpKeyInfo added in v0.9.0

type GcpKeyInfo struct {
	// The GCP KMS key's resource name
	KmsKeyId string `json:"kms_key_id"`
}

type GcpManagedNetworkConfig

type GcpManagedNetworkConfig struct {
	// The IP range from which to allocate GKE cluster pods. No bigger than `/9`
	// and no smaller than `/21`.
	GkeClusterPodIpRange string `json:"gke_cluster_pod_ip_range,omitempty"`
	// The IP range from which to allocate GKE cluster services. No bigger than
	// `/16` and no smaller than `/27`.
	GkeClusterServiceIpRange string `json:"gke_cluster_service_ip_range,omitempty"`
	// The IP range from which to allocate GKE cluster nodes. No bigger than
	// `/9` and no smaller than `/29`.
	SubnetCidr string `json:"subnet_cidr,omitempty"`

	ForceSendFields []string `json:"-"`
}

The network settings for the workspace. The configurations are only for Databricks-managed VPCs. It is ignored if you specify a customer-managed VPC in the `network_id` field.", All the IP range configurations must be mutually exclusive. An attempt to create a workspace fails if Databricks detects an IP range overlap.

Specify custom IP ranges in CIDR format. The IP ranges for these fields must not overlap, and all IP addresses must be entirely within the following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, `192.168.0.0/16`, and `240.0.0.0/4`.

The sizes of these IP ranges affect the maximum number of nodes for the workspace.

**Important**: Confirm the IP ranges used by your Databricks workspace before creating the workspace. You cannot change them after your workspace is deployed. If the IP address ranges for your Databricks are too small, IP exhaustion can occur, causing your Databricks jobs to fail. To determine the address range sizes that you need, Databricks provides a calculator as a Microsoft Excel spreadsheet. See calculate subnet sizes for a new workspace.

func (GcpManagedNetworkConfig) MarshalJSON added in v0.23.0

func (s GcpManagedNetworkConfig) MarshalJSON() ([]byte, error)

func (*GcpManagedNetworkConfig) UnmarshalJSON added in v0.23.0

func (s *GcpManagedNetworkConfig) UnmarshalJSON(b []byte) error

type GcpNetworkInfo

type GcpNetworkInfo struct {
	// The Google Cloud project ID of the VPC network.
	NetworkProjectId string `json:"network_project_id"`
	// The name of the secondary IP range for pods. A Databricks-managed GKE
	// cluster uses this IP range for its pods. This secondary IP range can be
	// used by only one workspace.
	PodIpRangeName string `json:"pod_ip_range_name"`
	// The name of the secondary IP range for services. A Databricks-managed GKE
	// cluster uses this IP range for its services. This secondary IP range can
	// be used by only one workspace.
	ServiceIpRangeName string `json:"service_ip_range_name"`
	// The ID of the subnet associated with this network.
	SubnetId string `json:"subnet_id"`
	// The Google Cloud region of the workspace data plane (for example,
	// `us-east4`).
	SubnetRegion string `json:"subnet_region"`
	// The ID of the VPC associated with this network. VPC IDs can be used in
	// multiple network configurations.
	VpcId string `json:"vpc_id"`
}

The Google Cloud specific information for this network (for example, the VPC ID, subnet ID, and secondary IP ranges).

type GcpVpcEndpointInfo

type GcpVpcEndpointInfo struct {
	// Region of the PSC endpoint.
	EndpointRegion string `json:"endpoint_region"`
	// The Google Cloud project ID of the VPC network where the PSC connection
	// resides.
	ProjectId string `json:"project_id"`
	// The unique ID of this PSC connection.
	PscConnectionId string `json:"psc_connection_id,omitempty"`
	// The name of the PSC endpoint in the Google Cloud project.
	PscEndpointName string `json:"psc_endpoint_name"`
	// The service attachment this PSC connection connects to.
	ServiceAttachmentId string `json:"service_attachment_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

The Google Cloud specific information for this Private Service Connect endpoint.

func (GcpVpcEndpointInfo) MarshalJSON added in v0.23.0

func (s GcpVpcEndpointInfo) MarshalJSON() ([]byte, error)

func (*GcpVpcEndpointInfo) UnmarshalJSON added in v0.23.0

func (s *GcpVpcEndpointInfo) UnmarshalJSON(b []byte) error

type GetCredentialRequest

type GetCredentialRequest struct {
	// Databricks Account API credential configuration ID
	CredentialsId string `json:"-" url:"-"`
}

Get credential configuration

type GetEncryptionKeyRequest

type GetEncryptionKeyRequest struct {
	// Databricks encryption key configuration ID.
	CustomerManagedKeyId string `json:"-" url:"-"`
}

Get encryption key configuration

type GetNetworkRequest

type GetNetworkRequest struct {
	// Databricks Account API network configuration ID.
	NetworkId string `json:"-" url:"-"`
}

Get a network configuration

type GetPrivateAccesRequest

type GetPrivateAccesRequest struct {
	// Databricks Account API private access settings ID.
	PrivateAccessSettingsId string `json:"-" url:"-"`
}

Get a private access settings object

type GetStorageRequest

type GetStorageRequest struct {
	// Databricks Account API storage configuration ID.
	StorageConfigurationId string `json:"-" url:"-"`
}

Get storage configuration

type GetVpcEndpointRequest

type GetVpcEndpointRequest struct {
	// Databricks VPC endpoint ID.
	VpcEndpointId string `json:"-" url:"-"`
}

Get a VPC endpoint configuration

type GetWorkspaceRequest

type GetWorkspaceRequest struct {
	// Workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

Get a workspace

type GkeConfig

type GkeConfig struct {
	// Specifies the network connectivity types for the GKE nodes and the GKE
	// master network.
	//
	// Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the
	// workspace. The GKE nodes will not have public IPs.
	//
	// Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of
	// a public GKE cluster have public IP addresses.
	ConnectivityType GkeConfigConnectivityType `json:"connectivity_type,omitempty"`
	// The IP range from which to allocate GKE cluster master resources. This
	// field will be ignored if GKE private cluster is not enabled.
	//
	// It must be exactly as big as `/28`.
	MasterIpRange string `json:"master_ip_range,omitempty"`

	ForceSendFields []string `json:"-"`
}

The configurations for the GKE cluster of a Databricks workspace.

func (GkeConfig) MarshalJSON added in v0.23.0

func (s GkeConfig) MarshalJSON() ([]byte, error)

func (*GkeConfig) UnmarshalJSON added in v0.23.0

func (s *GkeConfig) UnmarshalJSON(b []byte) error

type GkeConfigConnectivityType

type GkeConfigConnectivityType string

Specifies the network connectivity types for the GKE nodes and the GKE master network.

Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the workspace. The GKE nodes will not have public IPs.

Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a public GKE cluster have public IP addresses.

const GkeConfigConnectivityTypePrivateNodePublicMaster GkeConfigConnectivityType = `PRIVATE_NODE_PUBLIC_MASTER`
const GkeConfigConnectivityTypePublicNodePublicMaster GkeConfigConnectivityType = `PUBLIC_NODE_PUBLIC_MASTER`

func (*GkeConfigConnectivityType) Set

Set raw string value and validate it against allowed values

func (*GkeConfigConnectivityType) String

func (f *GkeConfigConnectivityType) String() string

String representation for fmt.Print

func (*GkeConfigConnectivityType) Type

Type always returns GkeConfigConnectivityType to satisfy [pflag.Value] interface

type KeyUseCase

type KeyUseCase string

Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data in the control plane * `STORAGE`: Encrypts the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes.

const KeyUseCaseManagedServices KeyUseCase = `MANAGED_SERVICES`

Encrypts notebook and secret data in the control plane

const KeyUseCaseStorage KeyUseCase = `STORAGE`

Encrypts the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes.

func (*KeyUseCase) Set

func (f *KeyUseCase) Set(v string) error

Set raw string value and validate it against allowed values

func (*KeyUseCase) String

func (f *KeyUseCase) String() string

String representation for fmt.Print

func (*KeyUseCase) Type

func (f *KeyUseCase) Type() string

Type always returns KeyUseCase to satisfy [pflag.Value] interface

type Network

type Network struct {
	// The Databricks account ID associated with this network configuration.
	AccountId string `json:"account_id,omitempty"`
	// Time in epoch milliseconds when the network was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// Array of error messages about the network configuration.
	ErrorMessages []NetworkHealth `json:"error_messages,omitempty"`
	// The Google Cloud specific information for this network (for example, the
	// VPC ID, subnet ID, and secondary IP ranges).
	GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"`
	// The Databricks network configuration ID.
	NetworkId string `json:"network_id,omitempty"`
	// The human-readable name of the network configuration.
	NetworkName string `json:"network_name,omitempty"`

	SecurityGroupIds []string `json:"security_group_ids,omitempty"`

	SubnetIds []string `json:"subnet_ids,omitempty"`
	// If specified, contains the VPC endpoints used to allow cluster
	// communication from this VPC over [AWS PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink/
	VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"`
	// The ID of the VPC associated with this network configuration. VPC IDs can
	// be used in multiple networks.
	VpcId string `json:"vpc_id,omitempty"`
	// The status of this network configuration object in terms of its use in a
	// workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`:
	// Broken. * `WARNED`: Warned.
	VpcStatus VpcStatus `json:"vpc_status,omitempty"`
	// Array of warning messages about the network configuration.
	WarningMessages []NetworkWarning `json:"warning_messages,omitempty"`
	// Workspace ID associated with this network configuration.
	WorkspaceId int64 `json:"workspace_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Network) MarshalJSON added in v0.23.0

func (s Network) MarshalJSON() ([]byte, error)

func (*Network) UnmarshalJSON added in v0.23.0

func (s *Network) UnmarshalJSON(b []byte) error

type NetworkHealth

type NetworkHealth struct {
	// Details of the error.
	ErrorMessage string `json:"error_message,omitempty"`
	// The AWS resource associated with this error: credentials, VPC, subnet,
	// security group, or network ACL.
	ErrorType ErrorType `json:"error_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (NetworkHealth) MarshalJSON added in v0.23.0

func (s NetworkHealth) MarshalJSON() ([]byte, error)

func (*NetworkHealth) UnmarshalJSON added in v0.23.0

func (s *NetworkHealth) UnmarshalJSON(b []byte) error

type NetworkVpcEndpoints

type NetworkVpcEndpoints struct {
	// The VPC endpoint ID used by this network to access the Databricks secure
	// cluster connectivity relay.
	DataplaneRelay []string `json:"dataplane_relay"`
	// The VPC endpoint ID used by this network to access the Databricks REST
	// API.
	RestApi []string `json:"rest_api"`
}

If specified, contains the VPC endpoints used to allow cluster communication from this VPC over AWS PrivateLink.

type NetworkWarning

type NetworkWarning struct {
	// Details of the warning.
	WarningMessage string `json:"warning_message,omitempty"`
	// The AWS resource associated with this warning: a subnet or a security
	// group.
	WarningType WarningType `json:"warning_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (NetworkWarning) MarshalJSON added in v0.23.0

func (s NetworkWarning) MarshalJSON() ([]byte, error)

func (*NetworkWarning) UnmarshalJSON added in v0.23.0

func (s *NetworkWarning) UnmarshalJSON(b []byte) error

type NetworksAPI

type NetworksAPI struct {
	// contains filtered or unexported fields
}

These APIs manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs.

func NewNetworks

func NewNetworks(client *client.DatabricksClient) *NetworksAPI

func (*NetworksAPI) Create

func (a *NetworksAPI) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)

Create network configuration.

Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a pre-existing VPC and subnets.

Example (Networks)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

netw, err := a.Networks.Create(ctx, provisioning.CreateNetworkRequest{
	NetworkName:      fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	VpcId:            fmt.Sprintf("%x", time.Now().UnixNano()),
	SubnetIds:        []string{fmt.Sprintf("%x", time.Now().UnixNano()), fmt.Sprintf("%x", time.Now().UnixNano())},
	SecurityGroupIds: []string{fmt.Sprintf("%x", time.Now().UnixNano())},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", netw)
Output:

func (*NetworksAPI) Delete

func (a *NetworksAPI) Delete(ctx context.Context, request DeleteNetworkRequest) error

Delete a network configuration.

Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace.

This operation is available only if your account is on the E2 version of the platform.

func (*NetworksAPI) DeleteByNetworkId

func (a *NetworksAPI) DeleteByNetworkId(ctx context.Context, networkId string) error

Delete a network configuration.

Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace.

This operation is available only if your account is on the E2 version of the platform.

func (*NetworksAPI) Get

func (a *NetworksAPI) Get(ctx context.Context, request GetNetworkRequest) (*Network, error)

Get a network configuration.

Gets a Databricks network configuration, which represents a cloud VPC and its resources.

Example (Networks)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

netw, err := a.Networks.Create(ctx, provisioning.CreateNetworkRequest{
	NetworkName:      fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	VpcId:            fmt.Sprintf("%x", time.Now().UnixNano()),
	SubnetIds:        []string{fmt.Sprintf("%x", time.Now().UnixNano()), fmt.Sprintf("%x", time.Now().UnixNano())},
	SecurityGroupIds: []string{fmt.Sprintf("%x", time.Now().UnixNano())},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", netw)

byId, err := a.Networks.GetByNetworkId(ctx, netw.NetworkId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)
Output:

func (*NetworksAPI) GetByNetworkId

func (a *NetworksAPI) GetByNetworkId(ctx context.Context, networkId string) (*Network, error)

Get a network configuration.

Gets a Databricks network configuration, which represents a cloud VPC and its resources.

func (*NetworksAPI) GetByNetworkName

func (a *NetworksAPI) GetByNetworkName(ctx context.Context, name string) (*Network, error)

GetByNetworkName calls NetworksAPI.NetworkNetworkNameToNetworkIdMap and returns a single Network.

Returns an error if there's more than one Network with the same .NetworkName.

Note: All Network instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*NetworksAPI) Impl

func (a *NetworksAPI) Impl() NetworksService

Impl returns low-level Networks API implementation Deprecated: use MockNetworksInterface instead.

func (*NetworksAPI) List

func (a *NetworksAPI) List(ctx context.Context) ([]Network, error)

Get all network configurations.

Gets a list of all Databricks network configurations for an account, specified by ID.

This operation is available only if your account is on the E2 version of the platform.

func (*NetworksAPI) NetworkNetworkNameToNetworkIdMap

func (a *NetworksAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error)

NetworkNetworkNameToNetworkIdMap calls NetworksAPI.List and creates a map of results with Network.NetworkName as key and Network.NetworkId as value.

Returns an error if there's more than one Network with the same .NetworkName.

Note: All Network instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*NetworksAPI) WithImpl

func (a *NetworksAPI) WithImpl(impl NetworksService) NetworksInterface

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockNetworksInterface instead.

type NetworksInterface added in v0.29.0

type NetworksInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockNetworksInterface instead.
	WithImpl(impl NetworksService) NetworksInterface

	// Impl returns low-level Networks API implementation
	// Deprecated: use MockNetworksInterface instead.
	Impl() NetworksService

	// Create network configuration.
	//
	// Creates a Databricks network configuration that represents an VPC and its
	// resources. The VPC will be used for new Databricks clusters. This requires a
	// pre-existing VPC and subnets.
	Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)

	// Delete a network configuration.
	//
	// Deletes a Databricks network configuration, which represents a cloud VPC and
	// its resources. You cannot delete a network that is associated with a
	// workspace.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.
	Delete(ctx context.Context, request DeleteNetworkRequest) error

	// Delete a network configuration.
	//
	// Deletes a Databricks network configuration, which represents a cloud VPC and
	// its resources. You cannot delete a network that is associated with a
	// workspace.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.
	DeleteByNetworkId(ctx context.Context, networkId string) error

	// Get a network configuration.
	//
	// Gets a Databricks network configuration, which represents a cloud VPC and its
	// resources.
	Get(ctx context.Context, request GetNetworkRequest) (*Network, error)

	// Get a network configuration.
	//
	// Gets a Databricks network configuration, which represents a cloud VPC and its
	// resources.
	GetByNetworkId(ctx context.Context, networkId string) (*Network, error)

	// Get all network configurations.
	//
	// Gets a list of all Databricks network configurations for an account,
	// specified by ID.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.
	List(ctx context.Context) ([]Network, error)

	// NetworkNetworkNameToNetworkIdMap calls [NetworksAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value.
	//
	// Returns an error if there's more than one [Network] with the same .NetworkName.
	//
	// Note: All [Network] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error)

	// GetByNetworkName calls [NetworksAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network].
	//
	// Returns an error if there's more than one [Network] with the same .NetworkName.
	//
	// Note: All [Network] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByNetworkName(ctx context.Context, name string) (*Network, error)
}

type NetworksService

type NetworksService interface {

	// Create network configuration.
	//
	// Creates a Databricks network configuration that represents an VPC and its
	// resources. The VPC will be used for new Databricks clusters. This
	// requires a pre-existing VPC and subnets.
	Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)

	// Delete a network configuration.
	//
	// Deletes a Databricks network configuration, which represents a cloud VPC
	// and its resources. You cannot delete a network that is associated with a
	// workspace.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform.
	Delete(ctx context.Context, request DeleteNetworkRequest) error

	// Get a network configuration.
	//
	// Gets a Databricks network configuration, which represents a cloud VPC and
	// its resources.
	Get(ctx context.Context, request GetNetworkRequest) (*Network, error)

	// Get all network configurations.
	//
	// Gets a list of all Databricks network configurations for an account,
	// specified by ID.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform.
	List(ctx context.Context) ([]Network, error)
}

These APIs manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs.

type PricingTier

type PricingTier string

The pricing tier of the workspace. For pricing tier information, see AWS Pricing.

const PricingTierCommunityEdition PricingTier = `COMMUNITY_EDITION`
const PricingTierDedicated PricingTier = `DEDICATED`
const PricingTierEnterprise PricingTier = `ENTERPRISE`
const PricingTierPremium PricingTier = `PREMIUM`
const PricingTierStandard PricingTier = `STANDARD`
const PricingTierUnknown PricingTier = `UNKNOWN`

func (*PricingTier) Set

func (f *PricingTier) Set(v string) error

Set raw string value and validate it against allowed values

func (*PricingTier) String

func (f *PricingTier) String() string

String representation for fmt.Print

func (*PricingTier) Type

func (f *PricingTier) Type() string

Type always returns PricingTier to satisfy [pflag.Value] interface

type PrivateAccessAPI

type PrivateAccessAPI struct {
	// contains filtered or unexported fields
}

These APIs manage private access settings for this account.

func NewPrivateAccess

func NewPrivateAccess(client *client.DatabricksClient) *PrivateAccessAPI

func (*PrivateAccessAPI) Create

Create private access settings.

Creates a private access settings object, which specifies how your workspace is accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object referenced by ID in the workspace's `private_access_settings_id` property.

You can share one private access settings with multiple workspaces in a single account. However, private access settings are specific to AWS regions, so only workspaces in the same AWS region can use a given private access settings object.

Before configuring PrivateLink, read the Databricks article about PrivateLink.

Example (PrivateAccess)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.PrivateAccess.Create(ctx, provisioning.UpsertPrivateAccessSettingsRequest{
	PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Region:                    os.Getenv("AWS_REGION"),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}
Output:

func (*PrivateAccessAPI) Delete

Delete a private access settings object.

Deletes a private access settings object, which determines how your workspace is accessed over AWS PrivateLink.

Before configuring PrivateLink, read the Databricks article about PrivateLink.",

func (*PrivateAccessAPI) DeleteByPrivateAccessSettingsId

func (a *PrivateAccessAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error

Delete a private access settings object.

Deletes a private access settings object, which determines how your workspace is accessed over AWS PrivateLink.

Before configuring PrivateLink, read the Databricks article about PrivateLink.",

func (*PrivateAccessAPI) Get

Get a private access settings object.

Gets a private access settings object, which specifies how your workspace is accessed over AWS PrivateLink.

Before configuring PrivateLink, read the Databricks article about PrivateLink.",

Example (PrivateAccess)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.PrivateAccess.Create(ctx, provisioning.UpsertPrivateAccessSettingsRequest{
	PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Region:                    os.Getenv("AWS_REGION"),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.PrivateAccess.GetByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}
Output:

func (*PrivateAccessAPI) GetByPrivateAccessSettingsId

func (a *PrivateAccessAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)

Get a private access settings object.

Gets a private access settings object, which specifies how your workspace is accessed over AWS PrivateLink.

Before configuring PrivateLink, read the Databricks article about PrivateLink.",

func (*PrivateAccessAPI) GetByPrivateAccessSettingsName

func (a *PrivateAccessAPI) GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error)

GetByPrivateAccessSettingsName calls PrivateAccessAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap and returns a single PrivateAccessSettings.

Returns an error if there's more than one PrivateAccessSettings with the same .PrivateAccessSettingsName.

Note: All PrivateAccessSettings instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*PrivateAccessAPI) Impl

Impl returns low-level PrivateAccess API implementation Deprecated: use MockPrivateAccessInterface instead.

func (*PrivateAccessAPI) List

Get all private access settings objects.

Gets a list of all private access settings objects for an account, specified by ID.

func (*PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap

func (a *PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error)

PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls PrivateAccessAPI.List and creates a map of results with PrivateAccessSettings.PrivateAccessSettingsName as key and PrivateAccessSettings.PrivateAccessSettingsId as value.

Returns an error if there's more than one PrivateAccessSettings with the same .PrivateAccessSettingsName.

Note: All PrivateAccessSettings instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*PrivateAccessAPI) Replace

Replace private access settings.

Updates an existing private access settings object, which specifies how your workspace is accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object referenced by ID in the workspace's `private_access_settings_id` property.

This operation completely overwrites your existing private access settings object attached to your workspaces. All workspaces attached to the private access settings are affected by any change. If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of these changes might take several minutes to propagate to the workspace API.

You can share one private access settings object with multiple workspaces in a single account. However, private access settings are specific to AWS regions, so only workspaces in the same AWS region can use a given private access settings object.

Before configuring PrivateLink, read the Databricks article about PrivateLink.

Example (PrivateAccess)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.PrivateAccess.Create(ctx, provisioning.UpsertPrivateAccessSettingsRequest{
	PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Region:                    os.Getenv("AWS_REGION"),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = a.PrivateAccess.Replace(ctx, provisioning.UpsertPrivateAccessSettingsRequest{
	PrivateAccessSettingsId:   created.PrivateAccessSettingsId,
	PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Region:                    os.Getenv("AWS_REGION"),
})
if err != nil {
	panic(err)
}

// cleanup

err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}
Output:

func (*PrivateAccessAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockPrivateAccessInterface instead.

type PrivateAccessInterface added in v0.29.0

type PrivateAccessInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockPrivateAccessInterface instead.
	WithImpl(impl PrivateAccessService) PrivateAccessInterface

	// Impl returns low-level PrivateAccess API implementation
	// Deprecated: use MockPrivateAccessInterface instead.
	Impl() PrivateAccessService

	// Create private access settings.
	//
	// Creates a private access settings object, which specifies how your workspace
	// is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must
	// have a private access settings object referenced by ID in the workspace's
	// `private_access_settings_id` property.
	//
	// You can share one private access settings with multiple workspaces in a
	// single account. However, private access settings are specific to AWS regions,
	// so only workspaces in the same AWS region can use a given private access
	// settings object.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error)

	// Delete a private access settings object.
	//
	// Deletes a private access settings object, which determines how your workspace
	// is accessed over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Delete(ctx context.Context, request DeletePrivateAccesRequest) error

	// Delete a private access settings object.
	//
	// Deletes a private access settings object, which determines how your workspace
	// is accessed over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error

	// Get a private access settings object.
	//
	// Gets a private access settings object, which specifies how your workspace is
	// accessed over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error)

	// Get a private access settings object.
	//
	// Gets a private access settings object, which specifies how your workspace is
	// accessed over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)

	// Get all private access settings objects.
	//
	// Gets a list of all private access settings objects for an account, specified
	// by ID.
	List(ctx context.Context) ([]PrivateAccessSettings, error)

	// PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value.
	//
	// Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName.
	//
	// Note: All [PrivateAccessSettings] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error)

	// GetByPrivateAccessSettingsName calls [PrivateAccessAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings].
	//
	// Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName.
	//
	// Note: All [PrivateAccessSettings] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error)

	// Replace private access settings.
	//
	// Updates an existing private access settings object, which specifies how your
	// workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a
	// workspace must have a private access settings object referenced by ID in the
	// workspace's `private_access_settings_id` property.
	//
	// This operation completely overwrites your existing private access settings
	// object attached to your workspaces. All workspaces attached to the private
	// access settings are affected by any change. If `public_access_enabled`,
	// `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of
	// these changes might take several minutes to propagate to the workspace API.
	//
	// You can share one private access settings object with multiple workspaces in
	// a single account. However, private access settings are specific to AWS
	// regions, so only workspaces in the same AWS region can use a given private
	// access settings object.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error
}

type PrivateAccessLevel

type PrivateAccessLevel string

The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. * `ACCOUNT` level access (the default) allows only VPC endpoints that are registered in your Databricks account connect to your workspace. * `ENDPOINT` level access allows only specified VPC endpoints connect to your workspace. For details, see `allowed_vpc_endpoint_ids`.

const PrivateAccessLevelAccount PrivateAccessLevel = `ACCOUNT`
const PrivateAccessLevelEndpoint PrivateAccessLevel = `ENDPOINT`

func (*PrivateAccessLevel) Set

func (f *PrivateAccessLevel) Set(v string) error

Set raw string value and validate it against allowed values

func (*PrivateAccessLevel) String

func (f *PrivateAccessLevel) String() string

String representation for fmt.Print

func (*PrivateAccessLevel) Type

func (f *PrivateAccessLevel) Type() string

Type always returns PrivateAccessLevel to satisfy [pflag.Value] interface

type PrivateAccessService

type PrivateAccessService interface {

	// Create private access settings.
	//
	// Creates a private access settings object, which specifies how your
	// workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a
	// workspace must have a private access settings object referenced by ID in
	// the workspace's `private_access_settings_id` property.
	//
	// You can share one private access settings with multiple workspaces in a
	// single account. However, private access settings are specific to AWS
	// regions, so only workspaces in the same AWS region can use a given
	// private access settings object.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error)

	// Delete a private access settings object.
	//
	// Deletes a private access settings object, which determines how your
	// workspace is accessed over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Delete(ctx context.Context, request DeletePrivateAccesRequest) error

	// Get a private access settings object.
	//
	// Gets a private access settings object, which specifies how your workspace
	// is accessed over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error)

	// Get all private access settings objects.
	//
	// Gets a list of all private access settings objects for an account,
	// specified by ID.
	List(ctx context.Context) ([]PrivateAccessSettings, error)

	// Replace private access settings.
	//
	// Updates an existing private access settings object, which specifies how
	// your workspace is accessed over [AWS PrivateLink]. To use AWS
	// PrivateLink, a workspace must have a private access settings object
	// referenced by ID in the workspace's `private_access_settings_id`
	// property.
	//
	// This operation completely overwrites your existing private access
	// settings object attached to your workspaces. All workspaces attached to
	// the private access settings are affected by any change. If
	// `public_access_enabled`, `private_access_level`, or
	// `allowed_vpc_endpoint_ids` are updated, effects of these changes might
	// take several minutes to propagate to the workspace API.
	//
	// You can share one private access settings object with multiple workspaces
	// in a single account. However, private access settings are specific to AWS
	// regions, so only workspaces in the same AWS region can use a given
	// private access settings object.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error
}

These APIs manage private access settings for this account.

type PrivateAccessSettings

type PrivateAccessSettings struct {
	// The Databricks account ID that hosts the credential.
	AccountId string `json:"account_id,omitempty"`
	// An array of Databricks VPC endpoint IDs.
	AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"`
	// The private access level controls which VPC endpoints can connect to the
	// UI or API of any workspace that attaches this private access settings
	// object. * `ACCOUNT` level access (the default) allows only VPC endpoints
	// that are registered in your Databricks account connect to your workspace.
	// * `ENDPOINT` level access allows only specified VPC endpoints connect to
	// your workspace. For details, see `allowed_vpc_endpoint_ids`.
	PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"`
	// Databricks private access settings ID.
	PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"`
	// The human-readable name of the private access settings object.
	PrivateAccessSettingsName string `json:"private_access_settings_name,omitempty"`
	// Determines if the workspace can be accessed over public internet. For
	// fully private workspaces, you can optionally specify `false`, but only if
	// you implement both the front-end and the back-end PrivateLink
	// connections. Otherwise, specify `true`, which means that public access is
	// enabled.
	PublicAccessEnabled bool `json:"public_access_enabled,omitempty"`
	// The cloud region for workspaces attached to this private access settings
	// object.
	Region string `json:"region,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (PrivateAccessSettings) MarshalJSON added in v0.23.0

func (s PrivateAccessSettings) MarshalJSON() ([]byte, error)

func (*PrivateAccessSettings) UnmarshalJSON added in v0.23.0

func (s *PrivateAccessSettings) UnmarshalJSON(b []byte) error

type RootBucketInfo

type RootBucketInfo struct {
	// The name of the S3 bucket.
	BucketName string `json:"bucket_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

Root S3 bucket information.

func (RootBucketInfo) MarshalJSON added in v0.23.0

func (s RootBucketInfo) MarshalJSON() ([]byte, error)

func (*RootBucketInfo) UnmarshalJSON added in v0.23.0

func (s *RootBucketInfo) UnmarshalJSON(b []byte) error

type StorageAPI

type StorageAPI struct {
	// contains filtered or unexported fields
}

These APIs manage storage configurations for this workspace. A root storage S3 bucket in your account is required to store objects like cluster logs, notebook revisions, and job results. You can also use the root storage S3 bucket for storage of non-production DBFS data. A storage configuration encapsulates this bucket information, and its ID is used when creating a new workspace.

func NewStorage

func NewStorage(client *client.DatabricksClient) *StorageAPI

func (*StorageAPI) Create

Create new storage configuration.

Creates new storage configuration for an account, specified by ID. Uploads a storage configuration object that represents the root AWS S3 bucket in your account. Databricks stores related workspace assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the required bucket policy.

For information about how to create a new workspace with this API, see Create a new workspace using the Account API

Example (LogDelivery)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

bucket, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", bucket)

// cleanup

err = a.Storage.DeleteByStorageConfigurationId(ctx, bucket.StorageConfigurationId)
if err != nil {
	panic(err)
}
Output:

Example (Storage)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)
Output:

Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

// cleanup

err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
Output:

func (*StorageAPI) Delete

func (a *StorageAPI) Delete(ctx context.Context, request DeleteStorageRequest) error

Delete storage configuration.

Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace.

func (*StorageAPI) DeleteByStorageConfigurationId

func (a *StorageAPI) DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error

Delete storage configuration.

Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace.

func (*StorageAPI) Get

Get storage configuration.

Gets a Databricks storage configuration for an account, both specified by ID.

Example (Storage)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

byId, err := a.Storage.GetByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)
Output:

func (*StorageAPI) GetByStorageConfigurationId

func (a *StorageAPI) GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)

Get storage configuration.

Gets a Databricks storage configuration for an account, both specified by ID.

func (*StorageAPI) GetByStorageConfigurationName

func (a *StorageAPI) GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error)

GetByStorageConfigurationName calls StorageAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap and returns a single StorageConfiguration.

Returns an error if there's more than one StorageConfiguration with the same .StorageConfigurationName.

Note: All StorageConfiguration instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*StorageAPI) Impl

func (a *StorageAPI) Impl() StorageService

Impl returns low-level Storage API implementation Deprecated: use MockStorageInterface instead.

func (*StorageAPI) List

Get all storage configurations.

Gets a list of all Databricks storage configurations for your account, specified by ID.

func (*StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap

func (a *StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error)

StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls StorageAPI.List and creates a map of results with StorageConfiguration.StorageConfigurationName as key and StorageConfiguration.StorageConfigurationId as value.

Returns an error if there's more than one StorageConfiguration with the same .StorageConfigurationName.

Note: All StorageConfiguration instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*StorageAPI) WithImpl

func (a *StorageAPI) WithImpl(impl StorageService) StorageInterface

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockStorageInterface instead.

type StorageConfiguration

type StorageConfiguration struct {
	// The Databricks account ID that hosts the credential.
	AccountId string `json:"account_id,omitempty"`
	// Time in epoch milliseconds when the storage configuration was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// Root S3 bucket information.
	RootBucketInfo *RootBucketInfo `json:"root_bucket_info,omitempty"`
	// Databricks storage configuration ID.
	StorageConfigurationId string `json:"storage_configuration_id,omitempty"`
	// The human-readable name of the storage configuration.
	StorageConfigurationName string `json:"storage_configuration_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (StorageConfiguration) MarshalJSON added in v0.23.0

func (s StorageConfiguration) MarshalJSON() ([]byte, error)

func (*StorageConfiguration) UnmarshalJSON added in v0.23.0

func (s *StorageConfiguration) UnmarshalJSON(b []byte) error

type StorageInterface added in v0.29.0

type StorageInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockStorageInterface instead.
	WithImpl(impl StorageService) StorageInterface

	// Impl returns low-level Storage API implementation
	// Deprecated: use MockStorageInterface instead.
	Impl() StorageService

	// Create new storage configuration.
	//
	// Creates new storage configuration for an account, specified by ID. Uploads a
	// storage configuration object that represents the root AWS S3 bucket in your
	// account. Databricks stores related workspace assets including DBFS, cluster
	// logs, and job results. For the AWS S3 bucket, you need to configure the
	// required bucket policy.
	//
	// For information about how to create a new workspace with this API, see
	// [Create a new workspace using the Account API]
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error)

	// Delete storage configuration.
	//
	// Deletes a Databricks storage configuration. You cannot delete a storage
	// configuration that is associated with any workspace.
	Delete(ctx context.Context, request DeleteStorageRequest) error

	// Delete storage configuration.
	//
	// Deletes a Databricks storage configuration. You cannot delete a storage
	// configuration that is associated with any workspace.
	DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error

	// Get storage configuration.
	//
	// Gets a Databricks storage configuration for an account, both specified by ID.
	Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error)

	// Get storage configuration.
	//
	// Gets a Databricks storage configuration for an account, both specified by ID.
	GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)

	// Get all storage configurations.
	//
	// Gets a list of all Databricks storage configurations for your account,
	// specified by ID.
	List(ctx context.Context) ([]StorageConfiguration, error)

	// StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StorageAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value.
	//
	// Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName.
	//
	// Note: All [StorageConfiguration] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error)

	// GetByStorageConfigurationName calls [StorageAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration].
	//
	// Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName.
	//
	// Note: All [StorageConfiguration] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error)
}

type StorageService

type StorageService interface {

	// Create new storage configuration.
	//
	// Creates new storage configuration for an account, specified by ID.
	// Uploads a storage configuration object that represents the root AWS S3
	// bucket in your account. Databricks stores related workspace assets
	// including DBFS, cluster logs, and job results. For the AWS S3 bucket, you
	// need to configure the required bucket policy.
	//
	// For information about how to create a new workspace with this API, see
	// [Create a new workspace using the Account API]
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error)

	// Delete storage configuration.
	//
	// Deletes a Databricks storage configuration. You cannot delete a storage
	// configuration that is associated with any workspace.
	Delete(ctx context.Context, request DeleteStorageRequest) error

	// Get storage configuration.
	//
	// Gets a Databricks storage configuration for an account, both specified by
	// ID.
	Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error)

	// Get all storage configurations.
	//
	// Gets a list of all Databricks storage configurations for your account,
	// specified by ID.
	List(ctx context.Context) ([]StorageConfiguration, error)
}

These APIs manage storage configurations for this workspace. A root storage S3 bucket in your account is required to store objects like cluster logs, notebook revisions, and job results. You can also use the root storage S3 bucket for storage of non-production DBFS data. A storage configuration encapsulates this bucket information, and its ID is used when creating a new workspace.

type StsRole

type StsRole struct {
	// The external ID that needs to be trusted by the cross-account role. This
	// is always your Databricks account ID.
	ExternalId string `json:"external_id,omitempty"`
	// The Amazon Resource Name (ARN) of the cross account role.
	RoleArn string `json:"role_arn,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (StsRole) MarshalJSON added in v0.23.0

func (s StsRole) MarshalJSON() ([]byte, error)

func (*StsRole) UnmarshalJSON added in v0.23.0

func (s *StsRole) UnmarshalJSON(b []byte) error

type UpdateWorkspaceRequest

type UpdateWorkspaceRequest struct {
	// The AWS region of the workspace's data plane (for example, `us-west-2`).
	// This parameter is available only for updating failed workspaces.
	AwsRegion string `json:"aws_region,omitempty"`
	// ID of the workspace's credential configuration object. This parameter is
	// available for updating both failed and running workspaces.
	CredentialsId string `json:"credentials_id,omitempty"`
	// The custom tags key-value pairing that is attached to this workspace. The
	// key-value pair is a string of utf-8 characters. The value can be an empty
	// string, with maximum length of 255 characters. The key can be of maximum
	// length of 127 characters, and cannot be empty.
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// The ID of the workspace's managed services encryption key configuration
	// object. This parameter is available only for updating failed workspaces.
	ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"`
	// The ID of the network connectivity configuration object, which is the
	// parent resource of this private endpoint rule object.
	NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"`
	// The ID of the workspace's network configuration object. Used only if you
	// already use a customer-managed VPC. For failed workspaces only, you can
	// switch from a Databricks-managed VPC to a customer-managed VPC by
	// updating the workspace to add a network configuration ID.
	NetworkId string `json:"network_id,omitempty"`
	// The ID of the workspace's storage configuration object. This parameter is
	// available only for updating failed workspaces.
	StorageConfigurationId string `json:"storage_configuration_id,omitempty"`
	// The ID of the key configuration object for workspace storage. This
	// parameter is available for updating both failed and running workspaces.
	StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"`
	// Workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`

	ForceSendFields []string `json:"-"`
}

func (UpdateWorkspaceRequest) MarshalJSON added in v0.23.0

func (s UpdateWorkspaceRequest) MarshalJSON() ([]byte, error)

func (*UpdateWorkspaceRequest) UnmarshalJSON added in v0.23.0

func (s *UpdateWorkspaceRequest) UnmarshalJSON(b []byte) error

type UpsertPrivateAccessSettingsRequest

type UpsertPrivateAccessSettingsRequest struct {
	// An array of Databricks VPC endpoint IDs. This is the Databricks ID that
	// is returned when registering the VPC endpoint configuration in your
	// Databricks account. This is not the ID of the VPC endpoint in AWS.
	//
	// Only used when `private_access_level` is set to `ENDPOINT`. This is an
	// allow list of VPC endpoints that in your account that can connect to your
	// workspace over AWS PrivateLink.
	//
	// If hybrid access to your workspace is enabled by setting
	// `public_access_enabled` to `true`, this control only works for
	// PrivateLink connections. To control how your workspace is accessed via
	// public internet, see [IP access lists].
	//
	// [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html
	AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"`
	// The private access level controls which VPC endpoints can connect to the
	// UI or API of any workspace that attaches this private access settings
	// object. * `ACCOUNT` level access (the default) allows only VPC endpoints
	// that are registered in your Databricks account connect to your workspace.
	// * `ENDPOINT` level access allows only specified VPC endpoints connect to
	// your workspace. For details, see `allowed_vpc_endpoint_ids`.
	PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"`
	// Databricks Account API private access settings ID.
	PrivateAccessSettingsId string `json:"-" url:"-"`
	// The human-readable name of the private access settings object.
	PrivateAccessSettingsName string `json:"private_access_settings_name"`
	// Determines if the workspace can be accessed over public internet. For
	// fully private workspaces, you can optionally specify `false`, but only if
	// you implement both the front-end and the back-end PrivateLink
	// connections. Otherwise, specify `true`, which means that public access is
	// enabled.
	PublicAccessEnabled bool `json:"public_access_enabled,omitempty"`
	// The cloud region for workspaces associated with this private access
	// settings object.
	Region string `json:"region"`

	ForceSendFields []string `json:"-"`
}

func (UpsertPrivateAccessSettingsRequest) MarshalJSON added in v0.23.0

func (s UpsertPrivateAccessSettingsRequest) MarshalJSON() ([]byte, error)

func (*UpsertPrivateAccessSettingsRequest) UnmarshalJSON added in v0.23.0

func (s *UpsertPrivateAccessSettingsRequest) UnmarshalJSON(b []byte) error

type VpcEndpoint

type VpcEndpoint struct {
	// The Databricks account ID that hosts the VPC endpoint configuration.
	AccountId string `json:"account_id,omitempty"`
	// The AWS Account in which the VPC endpoint object exists.
	AwsAccountId string `json:"aws_account_id,omitempty"`
	// The ID of the Databricks [endpoint service] that this VPC endpoint is
	// connected to. For a list of endpoint service IDs for each supported AWS
	// region, see the [Databricks PrivateLink documentation].
	//
	// [Databricks PrivateLink documentation]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html
	AwsEndpointServiceId string `json:"aws_endpoint_service_id,omitempty"`
	// The ID of the VPC endpoint object in AWS.
	AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"`
	// The Google Cloud specific information for this Private Service Connect
	// endpoint.
	GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"`
	// The AWS region in which this VPC endpoint object exists.
	Region string `json:"region,omitempty"`
	// The current state (such as `available` or `rejected`) of the VPC
	// endpoint. Derived from AWS. For the full set of values, see [AWS
	// DescribeVpcEndpoint documentation].
	//
	// [AWS DescribeVpcEndpoint documentation]: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html
	State string `json:"state,omitempty"`
	// This enumeration represents the type of Databricks VPC [endpoint service]
	// that was used when creating this VPC endpoint.
	//
	// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html
	UseCase EndpointUseCase `json:"use_case,omitempty"`
	// Databricks VPC endpoint ID. This is the Databricks-specific name of the
	// VPC endpoint. Do not confuse this with the `aws_vpc_endpoint_id`, which
	// is the ID within AWS of the VPC endpoint.
	VpcEndpointId string `json:"vpc_endpoint_id,omitempty"`
	// The human-readable name of the storage configuration.
	VpcEndpointName string `json:"vpc_endpoint_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (VpcEndpoint) MarshalJSON added in v0.23.0

func (s VpcEndpoint) MarshalJSON() ([]byte, error)

func (*VpcEndpoint) UnmarshalJSON added in v0.23.0

func (s *VpcEndpoint) UnmarshalJSON(b []byte) error

type VpcEndpointsAPI

type VpcEndpointsAPI struct {
	// contains filtered or unexported fields
}

These APIs manage VPC endpoint configurations for this account.

func NewVpcEndpoints

func NewVpcEndpoints(client *client.DatabricksClient) *VpcEndpointsAPI

func (*VpcEndpointsAPI) Create

Create VPC endpoint configuration.

Creates a VPC endpoint configuration, which represents a VPC endpoint object in AWS used to communicate privately with Databricks over AWS PrivateLink.

After you create the VPC endpoint configuration, the Databricks endpoint service automatically accepts the VPC endpoint.

Before configuring PrivateLink, read the Databricks article about PrivateLink.

Example (VpcEndpoints)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.VpcEndpoints.Create(ctx, provisioning.CreateVpcEndpointRequest{
	AwsVpcEndpointId: os.Getenv("TEST_RELAY_VPC_ENDPOINT"),
	Region:           os.Getenv("AWS_REGION"),
	VpcEndpointName:  fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = a.VpcEndpoints.DeleteByVpcEndpointId(ctx, created.VpcEndpointId)
if err != nil {
	panic(err)
}
Output:

func (*VpcEndpointsAPI) Delete

Delete VPC endpoint configuration.

Deletes a VPC endpoint configuration, which represents an AWS VPC endpoint that can communicate privately with Databricks over AWS PrivateLink.

Before configuring PrivateLink, read the Databricks article about PrivateLink.

func (*VpcEndpointsAPI) DeleteByVpcEndpointId

func (a *VpcEndpointsAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error

Delete VPC endpoint configuration.

Deletes a VPC endpoint configuration, which represents an AWS VPC endpoint that can communicate privately with Databricks over AWS PrivateLink.

Before configuring PrivateLink, read the Databricks article about PrivateLink.

func (*VpcEndpointsAPI) Get

Get a VPC endpoint configuration.

Gets a VPC endpoint configuration, which represents a VPC endpoint object in AWS used to communicate privately with Databricks over AWS PrivateLink.

Example (VpcEndpoints)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.VpcEndpoints.Create(ctx, provisioning.CreateVpcEndpointRequest{
	AwsVpcEndpointId: os.Getenv("TEST_RELAY_VPC_ENDPOINT"),
	Region:           os.Getenv("AWS_REGION"),
	VpcEndpointName:  fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.VpcEndpoints.GetByVpcEndpointId(ctx, created.VpcEndpointId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = a.VpcEndpoints.DeleteByVpcEndpointId(ctx, created.VpcEndpointId)
if err != nil {
	panic(err)
}
Output:

func (*VpcEndpointsAPI) GetByVpcEndpointId

func (a *VpcEndpointsAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)

Get a VPC endpoint configuration.

Gets a VPC endpoint configuration, which represents a VPC endpoint object in AWS used to communicate privately with Databricks over AWS PrivateLink.

func (*VpcEndpointsAPI) GetByVpcEndpointName

func (a *VpcEndpointsAPI) GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error)

GetByVpcEndpointName calls VpcEndpointsAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap and returns a single VpcEndpoint.

Returns an error if there's more than one VpcEndpoint with the same .VpcEndpointName.

Note: All VpcEndpoint instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*VpcEndpointsAPI) Impl

Impl returns low-level VpcEndpoints API implementation Deprecated: use MockVpcEndpointsInterface instead.

func (*VpcEndpointsAPI) List

func (a *VpcEndpointsAPI) List(ctx context.Context) ([]VpcEndpoint, error)

Get all VPC endpoint configurations.

Gets a list of all VPC endpoints for an account, specified by ID.

Before configuring PrivateLink, read the Databricks article about PrivateLink.

func (*VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap

func (a *VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error)

VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls VpcEndpointsAPI.List and creates a map of results with VpcEndpoint.VpcEndpointName as key and VpcEndpoint.VpcEndpointId as value.

Returns an error if there's more than one VpcEndpoint with the same .VpcEndpointName.

Note: All VpcEndpoint instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*VpcEndpointsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockVpcEndpointsInterface instead.

type VpcEndpointsInterface added in v0.29.0

type VpcEndpointsInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockVpcEndpointsInterface instead.
	WithImpl(impl VpcEndpointsService) VpcEndpointsInterface

	// Impl returns low-level VpcEndpoints API implementation
	// Deprecated: use MockVpcEndpointsInterface instead.
	Impl() VpcEndpointsService

	// Create VPC endpoint configuration.
	//
	// Creates a VPC endpoint configuration, which represents a [VPC endpoint]
	// object in AWS used to communicate privately with Databricks over [AWS
	// PrivateLink].
	//
	// After you create the VPC endpoint configuration, the Databricks [endpoint
	// service] automatically accepts the VPC endpoint.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html
	// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html
	Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error)

	// Delete VPC endpoint configuration.
	//
	// Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint]
	// that can communicate privately with Databricks over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Delete(ctx context.Context, request DeleteVpcEndpointRequest) error

	// Delete VPC endpoint configuration.
	//
	// Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint]
	// that can communicate privately with Databricks over [AWS PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error

	// Get a VPC endpoint configuration.
	//
	// Gets a VPC endpoint configuration, which represents a [VPC endpoint] object
	// in AWS used to communicate privately with Databricks over [AWS PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error)

	// Get a VPC endpoint configuration.
	//
	// Gets a VPC endpoint configuration, which represents a [VPC endpoint] object
	// in AWS used to communicate privately with Databricks over [AWS PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)

	// Get all VPC endpoint configurations.
	//
	// Gets a list of all VPC endpoints for an account, specified by ID.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	List(ctx context.Context) ([]VpcEndpoint, error)

	// VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value.
	//
	// Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName.
	//
	// Note: All [VpcEndpoint] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error)

	// GetByVpcEndpointName calls [VpcEndpointsAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint].
	//
	// Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName.
	//
	// Note: All [VpcEndpoint] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error)
}

type VpcEndpointsService

type VpcEndpointsService interface {

	// Create VPC endpoint configuration.
	//
	// Creates a VPC endpoint configuration, which represents a [VPC endpoint]
	// object in AWS used to communicate privately with Databricks over [AWS
	// PrivateLink].
	//
	// After you create the VPC endpoint configuration, the Databricks [endpoint
	// service] automatically accepts the VPC endpoint.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html
	// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html
	Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error)

	// Delete VPC endpoint configuration.
	//
	// Deletes a VPC endpoint configuration, which represents an [AWS VPC
	// endpoint] that can communicate privately with Databricks over [AWS
	// PrivateLink].
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	Delete(ctx context.Context, request DeleteVpcEndpointRequest) error

	// Get a VPC endpoint configuration.
	//
	// Gets a VPC endpoint configuration, which represents a [VPC endpoint]
	// object in AWS used to communicate privately with Databricks over [AWS
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error)

	// Get all VPC endpoint configurations.
	//
	// Gets a list of all VPC endpoints for an account, specified by ID.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	List(ctx context.Context) ([]VpcEndpoint, error)
}

These APIs manage VPC endpoint configurations for this account.

type VpcStatus

type VpcStatus string

The status of this network configuration object in terms of its use in a workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: Broken. * `WARNED`: Warned.

const VpcStatusBroken VpcStatus = `BROKEN`

Broken.

const VpcStatusUnattached VpcStatus = `UNATTACHED`

Unattached.

const VpcStatusValid VpcStatus = `VALID`

Valid.

const VpcStatusWarned VpcStatus = `WARNED`

Warned.

func (*VpcStatus) Set

func (f *VpcStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*VpcStatus) String

func (f *VpcStatus) String() string

String representation for fmt.Print

func (*VpcStatus) Type

func (f *VpcStatus) Type() string

Type always returns VpcStatus to satisfy [pflag.Value] interface

type WaitGetWorkspaceRunning added in v0.10.0

type WaitGetWorkspaceRunning[R any] struct {
	Response    *R
	WorkspaceId int64 `json:"workspace_id"`
	Poll        func(time.Duration, func(*Workspace)) (*Workspace, error)
	// contains filtered or unexported fields
}

WaitGetWorkspaceRunning is a wrapper that calls WorkspacesAPI.WaitGetWorkspaceRunning and waits to reach RUNNING state.

func (*WaitGetWorkspaceRunning[R]) Get added in v0.10.0

func (w *WaitGetWorkspaceRunning[R]) Get() (*Workspace, error)

Get the Workspace with the default timeout of 20 minutes.

func (*WaitGetWorkspaceRunning[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWorkspaceRunning[R]) GetWithTimeout(timeout time.Duration) (*Workspace, error)

Get the Workspace with custom timeout.

func (*WaitGetWorkspaceRunning[R]) OnProgress added in v0.10.0

func (w *WaitGetWorkspaceRunning[R]) OnProgress(callback func(*Workspace)) *WaitGetWorkspaceRunning[R]

OnProgress invokes a callback every time it polls for the status update.

type WarningType

type WarningType string

The AWS resource associated with this warning: a subnet or a security group.

const WarningTypeSecurityGroup WarningType = `securityGroup`
const WarningTypeSubnet WarningType = `subnet`

func (*WarningType) Set

func (f *WarningType) Set(v string) error

Set raw string value and validate it against allowed values

func (*WarningType) String

func (f *WarningType) String() string

String representation for fmt.Print

func (*WarningType) Type

func (f *WarningType) Type() string

Type always returns WarningType to satisfy [pflag.Value] interface

type Workspace

type Workspace struct {
	// Databricks account ID.
	AccountId string `json:"account_id,omitempty"`
	// The AWS region of the workspace data plane (for example, `us-west-2`).
	AwsRegion string `json:"aws_region,omitempty"`

	AzureWorkspaceInfo *AzureWorkspaceInfo `json:"azure_workspace_info,omitempty"`
	// The cloud name. This field always has the value `gcp`.
	Cloud string `json:"cloud,omitempty"`
	// The general workspace configurations that are specific to cloud
	// providers.
	CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"`
	// Time in epoch milliseconds when the workspace was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// ID of the workspace's credential configuration object.
	CredentialsId string `json:"credentials_id,omitempty"`
	// The custom tags key-value pairing that is attached to this workspace. The
	// key-value pair is a string of utf-8 characters. The value can be an empty
	// string, with maximum length of 255 characters. The key can be of maximum
	// length of 127 characters, and cannot be empty.
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// The deployment name defines part of the subdomain for the workspace. The
	// workspace URL for web application and REST APIs is
	// `<deployment-name>.cloud.databricks.com`.
	//
	// This value must be unique across all non-deleted deployments across all
	// AWS regions.
	DeploymentName string `json:"deployment_name,omitempty"`
	// The network settings for the workspace. The configurations are only for
	// Databricks-managed VPCs. It is ignored if you specify a customer-managed
	// VPC in the `network_id` field.", All the IP range configurations must be
	// mutually exclusive. An attempt to create a workspace fails if Databricks
	// detects an IP range overlap.
	//
	// Specify custom IP ranges in CIDR format. The IP ranges for these fields
	// must not overlap, and all IP addresses must be entirely within the
	// following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`,
	// `192.168.0.0/16`, and `240.0.0.0/4`.
	//
	// The sizes of these IP ranges affect the maximum number of nodes for the
	// workspace.
	//
	// **Important**: Confirm the IP ranges used by your Databricks workspace
	// before creating the workspace. You cannot change them after your
	// workspace is deployed. If the IP address ranges for your Databricks are
	// too small, IP exhaustion can occur, causing your Databricks jobs to fail.
	// To determine the address range sizes that you need, Databricks provides a
	// calculator as a Microsoft Excel spreadsheet. See [calculate subnet sizes
	// for a new workspace].
	//
	// [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html
	GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"`
	// The configurations for the GKE cluster of a Databricks workspace.
	GkeConfig *GkeConfig `json:"gke_config,omitempty"`
	// The Google Cloud region of the workspace data plane in your Google
	// account (for example, `us-east4`).
	Location string `json:"location,omitempty"`
	// ID of the key configuration for encrypting managed services.
	ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"`
	// The network configuration ID that is attached to the workspace. This
	// field is available only if the network is a customer-managed network.
	NetworkId string `json:"network_id,omitempty"`
	// The pricing tier of the workspace. For pricing tier information, see [AWS
	// Pricing].
	//
	// [AWS Pricing]: https://databricks.com/product/aws-pricing
	PricingTier PricingTier `json:"pricing_tier,omitempty"`
	// ID of the workspace's private access settings object. Only used for
	// PrivateLink. You must specify this ID if you are using [AWS PrivateLink]
	// for either front-end (user-to-workspace connection), back-end (data plane
	// to control plane connection), or both connection types.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink/
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"`
	// ID of the workspace's storage configuration object.
	StorageConfigurationId string `json:"storage_configuration_id,omitempty"`
	// ID of the key configuration for encrypting workspace storage.
	StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"`
	// A unique integer ID for the workspace
	WorkspaceId int64 `json:"workspace_id,omitempty"`
	// The human-readable name of the workspace.
	WorkspaceName string `json:"workspace_name,omitempty"`
	// The status of the workspace. For workspace creation, usually it is set to
	// `PROVISIONING` initially. Continue to check the status until the status
	// is `RUNNING`.
	WorkspaceStatus WorkspaceStatus `json:"workspace_status,omitempty"`
	// Message describing the current workspace status.
	WorkspaceStatusMessage string `json:"workspace_status_message,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Workspace) MarshalJSON added in v0.23.0

func (s Workspace) MarshalJSON() ([]byte, error)

func (*Workspace) UnmarshalJSON added in v0.23.0

func (s *Workspace) UnmarshalJSON(b []byte) error

type WorkspaceStatus

type WorkspaceStatus string

The status of the workspace. For workspace creation, usually it is set to `PROVISIONING` initially. Continue to check the status until the status is `RUNNING`.

const WorkspaceStatusBanned WorkspaceStatus = `BANNED`
const WorkspaceStatusCancelling WorkspaceStatus = `CANCELLING`
const WorkspaceStatusFailed WorkspaceStatus = `FAILED`
const WorkspaceStatusNotProvisioned WorkspaceStatus = `NOT_PROVISIONED`
const WorkspaceStatusProvisioning WorkspaceStatus = `PROVISIONING`
const WorkspaceStatusRunning WorkspaceStatus = `RUNNING`

func (*WorkspaceStatus) Set

func (f *WorkspaceStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*WorkspaceStatus) String

func (f *WorkspaceStatus) String() string

String representation for fmt.Print

func (*WorkspaceStatus) Type

func (f *WorkspaceStatus) Type() string

Type always returns WorkspaceStatus to satisfy [pflag.Value] interface

type WorkspacesAPI

type WorkspacesAPI struct {
	// contains filtered or unexported fields
}

These APIs manage workspaces for this account. A Databricks workspace is an environment for accessing all of your Databricks assets. The workspace organizes objects (notebooks, libraries, and experiments) into folders, and provides access to data and computational resources such as clusters and jobs.

These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

func NewWorkspaces

func NewWorkspaces(client *client.DatabricksClient) *WorkspacesAPI

func (*WorkspacesAPI) Create

func (a *WorkspacesAPI) Create(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)

Create a new workspace.

Creates a new workspace.

**Important**: This operation is asynchronous. A response with HTTP status code 200 means the request has been accepted and is in progress, but does not mean that the workspace deployed successfully and is running. The initial workspace status is typically `PROVISIONING`. Use the workspace ID (`workspace_id`) field in the response to identify the new workspace and make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`.

Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{
	WorkspaceName:          fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsRegion:              os.Getenv("AWS_REGION"),
	CredentialsId:          role.CredentialsId,
	StorageConfigurationId: storage.StorageConfigurationId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}
Output:

func (*WorkspacesAPI) CreateAndWait deprecated

func (a *WorkspacesAPI) CreateAndWait(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)

Calls WorkspacesAPI.Create and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Workspace](60*time.Minute) functional option.

Deprecated: use WorkspacesAPI.Create.Get() or WorkspacesAPI.WaitGetWorkspaceRunning

func (*WorkspacesAPI) Delete

func (a *WorkspacesAPI) Delete(ctx context.Context, request DeleteWorkspaceRequest) error

Delete a workspace.

Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. However, it might take a few minutes for all workspaces resources to be deleted, depending on the size and number of workspace resources.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

func (*WorkspacesAPI) DeleteByWorkspaceId

func (a *WorkspacesAPI) DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error

Delete a workspace.

Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. However, it might take a few minutes for all workspaces resources to be deleted, depending on the size and number of workspace resources.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

func (*WorkspacesAPI) Get

Get a workspace.

Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`.

For information about how to create a new workspace with this API **including error handling**, see Create a new workspace using the Account API.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{
	WorkspaceName:          fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsRegion:              os.Getenv("AWS_REGION"),
	CredentialsId:          role.CredentialsId,
	StorageConfigurationId: storage.StorageConfigurationId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.Workspaces.GetByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}
Output:

func (*WorkspacesAPI) GetByWorkspaceId

func (a *WorkspacesAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)

Get a workspace.

Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`.

For information about how to create a new workspace with this API **including error handling**, see Create a new workspace using the Account API.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

func (*WorkspacesAPI) GetByWorkspaceName

func (a *WorkspacesAPI) GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error)

GetByWorkspaceName calls WorkspacesAPI.WorkspaceWorkspaceNameToWorkspaceIdMap and returns a single Workspace.

Returns an error if there's more than one Workspace with the same .WorkspaceName.

Note: All Workspace instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*WorkspacesAPI) Impl

func (a *WorkspacesAPI) Impl() WorkspacesService

Impl returns low-level Workspaces API implementation Deprecated: use MockWorkspacesInterface instead.

func (*WorkspacesAPI) List

func (a *WorkspacesAPI) List(ctx context.Context) ([]Workspace, error)

Get all workspaces.

Gets a list of all workspaces associated with an account, specified by ID.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

func (*WorkspacesAPI) Update

func (a *WorkspacesAPI) Update(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest) (*WaitGetWorkspaceRunning[struct{}], error)

Update workspace configuration.

Updates a workspace configuration for either a running workspace or a failed workspace. The elements that can be updated varies between these two use cases.

### Update a failed workspace You can update a Databricks workspace configuration for failed workspace deployment for some fields, but not all fields. For a failed workspace, this request supports updates to the following fields only: - Credential configuration ID - Storage configuration ID - Network configuration ID. Used only to add or change a network configuration for a customer-managed VPC. For a failed workspace only, you can convert a workspace with Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the network configuration for a failed or running workspace to add PrivateLink support, though you must also add a private access settings object. - Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Used only if you use customer-managed keys for managed services. - Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for workspace storage. **Important**: If the workspace was ever in the running state, even if briefly before becoming a failed workspace, you cannot add a new key configuration ID for workspace storage. - Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be applied.

After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status. The workspace is successful if the status changes to `RUNNING`.

For information about how to create a new workspace with this API **including error handling**, see Create a new workspace using the Account API.

### Update a running workspace You can update a Databricks workspace configuration for running workspaces for some fields, but not all fields. For a running workspace, this request supports updating the following fields only: - Credential configuration ID

- Network configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a network configuration update in this API for a failed or running workspace to add support for PrivateLink, although you also need to add a private access settings object.

- Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Databricks does not directly encrypt the data with the customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK) that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to encrypt your workspace's managed services persisted data. If the workspace does not already have a CMK for managed services, adding this ID enables managed services encryption for new or updated data. Existing managed services data that existed before adding the key remains not encrypted with the DEK until it is modified. If the workspace already has customer-managed keys for managed services, this request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not already have a customer-managed key configuration for workspace storage. - Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be applied.

**Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not terminate all cluster instances in the workspace before calling this API, the request will fail.

### Wait until changes take effect. After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status and the status of the fields. * For workspaces with a Databricks-managed VPC, the workspace status becomes `PROVISIONING` temporarily (typically under 20 minutes). If the workspace update is successful, the workspace status changes to `RUNNING`. Note that you can also check the workspace status in the Account Console. However, you cannot use or create clusters for another 20 minutes after that status change. This results in a total of up to 40 minutes in which you cannot create clusters. If you create or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.

* For workspaces with a customer-managed VPC, the workspace status stays at status `RUNNING` and the VPC change happens immediately. A change to the storage customer-managed key configuration ID might take a few minutes to update, so continue to check the workspace until you observe that it has been updated. If the update fails, the workspace might revert silently to its original configuration. After the workspace has been updated, you cannot use or create clusters for another 20 minutes. If you create or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.

If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the changes to fully take effect. During the 20 minute wait, it is important that you stop all REST API calls to the DBFS API. If you are modifying _only the managed services key configuration_, you can omit the 20 minute wait.

**Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment types and subscription types. If you have questions about availability, contact your Databricks representative.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

updateRole, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", updateRole)

created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{
	WorkspaceName:          fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsRegion:              os.Getenv("AWS_REGION"),
	CredentialsId:          role.CredentialsId,
	StorageConfigurationId: storage.StorageConfigurationId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = a.Workspaces.UpdateAndWait(ctx, provisioning.UpdateWorkspaceRequest{
	WorkspaceId:   created.WorkspaceId,
	CredentialsId: updateRole.CredentialsId,
})
if err != nil {
	panic(err)
}

// cleanup

err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
err = a.Credentials.DeleteByCredentialsId(ctx, updateRole.CredentialsId)
if err != nil {
	panic(err)
}
err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}
Output:

func (*WorkspacesAPI) UpdateAndWait deprecated

func (a *WorkspacesAPI) UpdateAndWait(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)

Calls WorkspacesAPI.Update and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Workspace](60*time.Minute) functional option.

Deprecated: use WorkspacesAPI.Update.Get() or WorkspacesAPI.WaitGetWorkspaceRunning

func (*WorkspacesAPI) WaitGetWorkspaceRunning added in v0.10.0

func (a *WorkspacesAPI) WaitGetWorkspaceRunning(ctx context.Context, workspaceId int64,
	timeout time.Duration, callback func(*Workspace)) (*Workspace, error)

WaitGetWorkspaceRunning repeatedly calls WorkspacesAPI.Get and waits to reach RUNNING state

func (*WorkspacesAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks. Deprecated: use MockWorkspacesInterface instead.

func (*WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap

func (a *WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error)

WorkspaceWorkspaceNameToWorkspaceIdMap calls WorkspacesAPI.List and creates a map of results with Workspace.WorkspaceName as key and Workspace.WorkspaceId as value.

Returns an error if there's more than one Workspace with the same .WorkspaceName.

Note: All Workspace instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

type WorkspacesInterface added in v0.29.0

type WorkspacesInterface interface {
	// WithImpl could be used to override low-level API implementations for unit
	// testing purposes with [github.com/golang/mock] or other mocking frameworks.
	// Deprecated: use MockWorkspacesInterface instead.
	WithImpl(impl WorkspacesService) WorkspacesInterface

	// Impl returns low-level Workspaces API implementation
	// Deprecated: use MockWorkspacesInterface instead.
	Impl() WorkspacesService

	// WaitGetWorkspaceRunning repeatedly calls [WorkspacesAPI.Get] and waits to reach RUNNING state
	WaitGetWorkspaceRunning(ctx context.Context, workspaceId int64,
		timeout time.Duration, callback func(*Workspace)) (*Workspace, error)

	// Create a new workspace.
	//
	// Creates a new workspace.
	//
	// **Important**: This operation is asynchronous. A response with HTTP status
	// code 200 means the request has been accepted and is in progress, but does not
	// mean that the workspace deployed successfully and is running. The initial
	// workspace status is typically `PROVISIONING`. Use the workspace ID
	// (`workspace_id`) field in the response to identify the new workspace and make
	// repeated `GET` requests with the workspace ID and check its status. The
	// workspace becomes available when the status changes to `RUNNING`.
	Create(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)

	// Calls [WorkspacesAPIInterface.Create] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[Workspace](60*time.Minute) functional option.
	//
	// Deprecated: use [WorkspacesAPIInterface.Create].Get() or [WorkspacesAPIInterface.WaitGetWorkspaceRunning]
	CreateAndWait(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)

	// Delete a workspace.
	//
	// Terminates and deletes a Databricks workspace. From an API perspective,
	// deletion is immediate. However, it might take a few minutes for all
	// workspaces resources to be deleted, depending on the size and number of
	// workspace resources.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	Delete(ctx context.Context, request DeleteWorkspaceRequest) error

	// Delete a workspace.
	//
	// Terminates and deletes a Databricks workspace. From an API perspective,
	// deletion is immediate. However, it might take a few minutes for all
	// workspaces resources to be deleted, depending on the size and number of
	// workspace resources.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error

	// Get a workspace.
	//
	// Gets information including status for a Databricks workspace, specified by
	// ID. In the response, the `workspace_status` field indicates the current
	// status. After initial workspace creation (which is asynchronous), make
	// repeated `GET` requests with the workspace ID and check its status. The
	// workspace becomes available when the status changes to `RUNNING`.
	//
	// For information about how to create a new workspace with this API **including
	// error handling**, see [Create a new workspace using the Account API].
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error)

	// Get a workspace.
	//
	// Gets information including status for a Databricks workspace, specified by
	// ID. In the response, the `workspace_status` field indicates the current
	// status. After initial workspace creation (which is asynchronous), make
	// repeated `GET` requests with the workspace ID and check its status. The
	// workspace becomes available when the status changes to `RUNNING`.
	//
	// For information about how to create a new workspace with this API **including
	// error handling**, see [Create a new workspace using the Account API].
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)

	// Get all workspaces.
	//
	// Gets a list of all workspaces associated with an account, specified by ID.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	List(ctx context.Context) ([]Workspace, error)

	// WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value.
	//
	// Returns an error if there's more than one [Workspace] with the same .WorkspaceName.
	//
	// Note: All [Workspace] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error)

	// GetByWorkspaceName calls [WorkspacesAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace].
	//
	// Returns an error if there's more than one [Workspace] with the same .WorkspaceName.
	//
	// Note: All [Workspace] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error)

	// Update workspace configuration.
	//
	// Updates a workspace configuration for either a running workspace or a failed
	// workspace. The elements that can be updated varies between these two use
	// cases.
	//
	// ### Update a failed workspace You can update a Databricks workspace
	// configuration for failed workspace deployment for some fields, but not all
	// fields. For a failed workspace, this request supports updates to the
	// following fields only: - Credential configuration ID - Storage configuration
	// ID - Network configuration ID. Used only to add or change a network
	// configuration for a customer-managed VPC. For a failed workspace only, you
	// can convert a workspace with Databricks-managed VPC to use a customer-managed
	// VPC by adding this ID. You cannot downgrade a workspace with a
	// customer-managed VPC to be a Databricks-managed VPC. You can update the
	// network configuration for a failed or running workspace to add PrivateLink
	// support, though you must also add a private access settings object. - Key
	// configuration ID for managed services (control plane storage, such as
	// notebook source and Databricks SQL queries). Used only if you use
	// customer-managed keys for managed services. - Key configuration ID for
	// workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if
	// you use customer-managed keys for workspace storage. **Important**: If the
	// workspace was ever in the running state, even if briefly before becoming a
	// failed workspace, you cannot add a new key configuration ID for workspace
	// storage. - Private access settings ID to add PrivateLink support. You can add
	// or update the private access settings ID to upgrade a workspace to add
	// support for front-end, back-end, or both types of connectivity. You cannot
	// remove (downgrade) any existing front-end or back-end PrivateLink support on
	// a workspace. - Custom tags. Given you provide an empty custom tags, the
	// update would not be applied.
	//
	// After calling the `PATCH` operation to update the workspace configuration,
	// make repeated `GET` requests with the workspace ID and check the workspace
	// status. The workspace is successful if the status changes to `RUNNING`.
	//
	// For information about how to create a new workspace with this API **including
	// error handling**, see [Create a new workspace using the Account API].
	//
	// ### Update a running workspace You can update a Databricks workspace
	// configuration for running workspaces for some fields, but not all fields. For
	// a running workspace, this request supports updating the following fields
	// only: - Credential configuration ID
	//
	// - Network configuration ID. Used only if you already use a customer-managed
	// VPC. You cannot convert a running workspace from a Databricks-managed VPC to
	// a customer-managed VPC. You can use a network configuration update in this
	// API for a failed or running workspace to add support for PrivateLink,
	// although you also need to add a private access settings object.
	//
	// - Key configuration ID for managed services (control plane storage, such as
	// notebook source and Databricks SQL queries). Databricks does not directly
	// encrypt the data with the customer-managed key (CMK). Databricks uses both
	// the CMK and the Databricks managed key (DMK) that is unique to your workspace
	// to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to encrypt
	// your workspace's managed services persisted data. If the workspace does not
	// already have a CMK for managed services, adding this ID enables managed
	// services encryption for new or updated data. Existing managed services data
	// that existed before adding the key remains not encrypted with the DEK until
	// it is modified. If the workspace already has customer-managed keys for
	// managed services, this request rotates (changes) the CMK keys and the DEK is
	// re-encrypted with the DMK and the new CMK. - Key configuration ID for
	// workspace storage (root S3 bucket and, optionally, EBS volumes). You can set
	// this only if the workspace does not already have a customer-managed key
	// configuration for workspace storage. - Private access settings ID to add
	// PrivateLink support. You can add or update the private access settings ID to
	// upgrade a workspace to add support for front-end, back-end, or both types of
	// connectivity. You cannot remove (downgrade) any existing front-end or
	// back-end PrivateLink support on a workspace. - Custom tags. Given you provide
	// an empty custom tags, the update would not be applied.
	//
	// **Important**: To update a running workspace, your workspace must have no
	// running compute resources that run in your workspace's VPC in the Classic
	// data plane. For example, stop all all-purpose clusters, job clusters, pools
	// with running clusters, and Classic SQL warehouses. If you do not terminate
	// all cluster instances in the workspace before calling this API, the request
	// will fail.
	//
	// ### Wait until changes take effect. After calling the `PATCH` operation to
	// update the workspace configuration, make repeated `GET` requests with the
	// workspace ID and check the workspace status and the status of the fields. *
	// For workspaces with a Databricks-managed VPC, the workspace status becomes
	// `PROVISIONING` temporarily (typically under 20 minutes). If the workspace
	// update is successful, the workspace status changes to `RUNNING`. Note that
	// you can also check the workspace status in the [Account Console]. However,
	// you cannot use or create clusters for another 20 minutes after that status
	// change. This results in a total of up to 40 minutes in which you cannot
	// create clusters. If you create or use clusters before this time interval
	// elapses, clusters do not launch successfully, fail, or could cause other
	// unexpected behavior.
	//
	// * For workspaces with a customer-managed VPC, the workspace status stays at
	// status `RUNNING` and the VPC change happens immediately. A change to the
	// storage customer-managed key configuration ID might take a few minutes to
	// update, so continue to check the workspace until you observe that it has been
	// updated. If the update fails, the workspace might revert silently to its
	// original configuration. After the workspace has been updated, you cannot use
	// or create clusters for another 20 minutes. If you create or use clusters
	// before this time interval elapses, clusters do not launch successfully, fail,
	// or could cause other unexpected behavior.
	//
	// If you update the _storage_ customer-managed key configurations, it takes 20
	// minutes for the changes to fully take effect. During the 20 minute wait, it
	// is important that you stop all REST API calls to the DBFS API. If you are
	// modifying _only the managed services key configuration_, you can omit the 20
	// minute wait.
	//
	// **Important**: Customer-managed keys and customer-managed VPCs are supported
	// by only some deployment types and subscription types. If you have questions
	// about availability, contact your Databricks representative.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	//
	// [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Update(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest) (*WaitGetWorkspaceRunning[struct{}], error)

	// Calls [WorkspacesAPIInterface.Update] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[Workspace](60*time.Minute) functional option.
	//
	// Deprecated: use [WorkspacesAPIInterface.Update].Get() or [WorkspacesAPIInterface.WaitGetWorkspaceRunning]
	UpdateAndWait(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)
}

type WorkspacesService

type WorkspacesService interface {

	// Create a new workspace.
	//
	// Creates a new workspace.
	//
	// **Important**: This operation is asynchronous. A response with HTTP
	// status code 200 means the request has been accepted and is in progress,
	// but does not mean that the workspace deployed successfully and is
	// running. The initial workspace status is typically `PROVISIONING`. Use
	// the workspace ID (`workspace_id`) field in the response to identify the
	// new workspace and make repeated `GET` requests with the workspace ID and
	// check its status. The workspace becomes available when the status changes
	// to `RUNNING`.
	Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error)

	// Delete a workspace.
	//
	// Terminates and deletes a Databricks workspace. From an API perspective,
	// deletion is immediate. However, it might take a few minutes for all
	// workspaces resources to be deleted, depending on the size and number of
	// workspace resources.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform or on a select custom plan that allows multiple workspaces
	// per account.
	Delete(ctx context.Context, request DeleteWorkspaceRequest) error

	// Get a workspace.
	//
	// Gets information including status for a Databricks workspace, specified
	// by ID. In the response, the `workspace_status` field indicates the
	// current status. After initial workspace creation (which is asynchronous),
	// make repeated `GET` requests with the workspace ID and check its status.
	// The workspace becomes available when the status changes to `RUNNING`.
	//
	// For information about how to create a new workspace with this API
	// **including error handling**, see [Create a new workspace using the
	// Account API].
	//
	// This operation is available only if your account is on the E2 version of
	// the platform or on a select custom plan that allows multiple workspaces
	// per account.
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error)

	// Get all workspaces.
	//
	// Gets a list of all workspaces associated with an account, specified by
	// ID.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform or on a select custom plan that allows multiple workspaces
	// per account.
	List(ctx context.Context) ([]Workspace, error)

	// Update workspace configuration.
	//
	// Updates a workspace configuration for either a running workspace or a
	// failed workspace. The elements that can be updated varies between these
	// two use cases.
	//
	// ### Update a failed workspace You can update a Databricks workspace
	// configuration for failed workspace deployment for some fields, but not
	// all fields. For a failed workspace, this request supports updates to the
	// following fields only: - Credential configuration ID - Storage
	// configuration ID - Network configuration ID. Used only to add or change a
	// network configuration for a customer-managed VPC. For a failed workspace
	// only, you can convert a workspace with Databricks-managed VPC to use a
	// customer-managed VPC by adding this ID. You cannot downgrade a workspace
	// with a customer-managed VPC to be a Databricks-managed VPC. You can
	// update the network configuration for a failed or running workspace to add
	// PrivateLink support, though you must also add a private access settings
	// object. - Key configuration ID for managed services (control plane
	// storage, such as notebook source and Databricks SQL queries). Used only
	// if you use customer-managed keys for managed services. - Key
	// configuration ID for workspace storage (root S3 bucket and, optionally,
	// EBS volumes). Used only if you use customer-managed keys for workspace
	// storage. **Important**: If the workspace was ever in the running state,
	// even if briefly before becoming a failed workspace, you cannot add a new
	// key configuration ID for workspace storage. - Private access settings ID
	// to add PrivateLink support. You can add or update the private access
	// settings ID to upgrade a workspace to add support for front-end,
	// back-end, or both types of connectivity. You cannot remove (downgrade)
	// any existing front-end or back-end PrivateLink support on a workspace. -
	// Custom tags. Given you provide an empty custom tags, the update would not
	// be applied.
	//
	// After calling the `PATCH` operation to update the workspace
	// configuration, make repeated `GET` requests with the workspace ID and
	// check the workspace status. The workspace is successful if the status
	// changes to `RUNNING`.
	//
	// For information about how to create a new workspace with this API
	// **including error handling**, see [Create a new workspace using the
	// Account API].
	//
	// ### Update a running workspace You can update a Databricks workspace
	// configuration for running workspaces for some fields, but not all fields.
	// For a running workspace, this request supports updating the following
	// fields only: - Credential configuration ID
	//
	// - Network configuration ID. Used only if you already use a
	// customer-managed VPC. You cannot convert a running workspace from a
	// Databricks-managed VPC to a customer-managed VPC. You can use a network
	// configuration update in this API for a failed or running workspace to add
	// support for PrivateLink, although you also need to add a private access
	// settings object.
	//
	// - Key configuration ID for managed services (control plane storage, such
	// as notebook source and Databricks SQL queries). Databricks does not
	// directly encrypt the data with the customer-managed key (CMK). Databricks
	// uses both the CMK and the Databricks managed key (DMK) that is unique to
	// your workspace to encrypt the Data Encryption Key (DEK). Databricks uses
	// the DEK to encrypt your workspace's managed services persisted data. If
	// the workspace does not already have a CMK for managed services, adding
	// this ID enables managed services encryption for new or updated data.
	// Existing managed services data that existed before adding the key remains
	// not encrypted with the DEK until it is modified. If the workspace already
	// has customer-managed keys for managed services, this request rotates
	// (changes) the CMK keys and the DEK is re-encrypted with the DMK and the
	// new CMK. - Key configuration ID for workspace storage (root S3 bucket
	// and, optionally, EBS volumes). You can set this only if the workspace
	// does not already have a customer-managed key configuration for workspace
	// storage. - Private access settings ID to add PrivateLink support. You can
	// add or update the private access settings ID to upgrade a workspace to
	// add support for front-end, back-end, or both types of connectivity. You
	// cannot remove (downgrade) any existing front-end or back-end PrivateLink
	// support on a workspace. - Custom tags. Given you provide an empty custom
	// tags, the update would not be applied.
	//
	// **Important**: To update a running workspace, your workspace must have no
	// running compute resources that run in your workspace's VPC in the Classic
	// data plane. For example, stop all all-purpose clusters, job clusters,
	// pools with running clusters, and Classic SQL warehouses. If you do not
	// terminate all cluster instances in the workspace before calling this API,
	// the request will fail.
	//
	// ### Wait until changes take effect. After calling the `PATCH` operation
	// to update the workspace configuration, make repeated `GET` requests with
	// the workspace ID and check the workspace status and the status of the
	// fields. * For workspaces with a Databricks-managed VPC, the workspace
	// status becomes `PROVISIONING` temporarily (typically under 20 minutes).
	// If the workspace update is successful, the workspace status changes to
	// `RUNNING`. Note that you can also check the workspace status in the
	// [Account Console]. However, you cannot use or create clusters for another
	// 20 minutes after that status change. This results in a total of up to 40
	// minutes in which you cannot create clusters. If you create or use
	// clusters before this time interval elapses, clusters do not launch
	// successfully, fail, or could cause other unexpected behavior.
	//
	// * For workspaces with a customer-managed VPC, the workspace status stays
	// at status `RUNNING` and the VPC change happens immediately. A change to
	// the storage customer-managed key configuration ID might take a few
	// minutes to update, so continue to check the workspace until you observe
	// that it has been updated. If the update fails, the workspace might revert
	// silently to its original configuration. After the workspace has been
	// updated, you cannot use or create clusters for another 20 minutes. If you
	// create or use clusters before this time interval elapses, clusters do not
	// launch successfully, fail, or could cause other unexpected behavior.
	//
	// If you update the _storage_ customer-managed key configurations, it takes
	// 20 minutes for the changes to fully take effect. During the 20 minute
	// wait, it is important that you stop all REST API calls to the DBFS API.
	// If you are modifying _only the managed services key configuration_, you
	// can omit the 20 minute wait.
	//
	// **Important**: Customer-managed keys and customer-managed VPCs are
	// supported by only some deployment types and subscription types. If you
	// have questions about availability, contact your Databricks
	// representative.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform or on a select custom plan that allows multiple workspaces
	// per account.
	//
	// [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Update(ctx context.Context, request UpdateWorkspaceRequest) error
}

These APIs manage workspaces for this account. A Databricks workspace is an environment for accessing all of your Databricks assets. The workspace organizes objects (notebooks, libraries, and experiments) into folders, and provides access to data and computational resources such as clusters and jobs.

These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL