Documentation ¶
Overview ¶
These APIs allow you to manage Credentials, Encryption Keys, Networks, Private Access, Storage, Vpc Endpoints, Workspaces, etc.
Index ¶
- type AwsCredentials
- type AwsKeyInfo
- type CloudResourceContainer
- type CreateAwsKeyInfo
- type CreateCredentialAwsCredentials
- type CreateCredentialRequest
- type CreateCredentialStsRole
- type CreateCustomerManagedKeyRequest
- type CreateGcpKeyInfo
- type CreateNetworkRequest
- type CreateStorageConfigurationRequest
- type CreateVpcEndpointRequest
- type CreateWorkspaceRequest
- type Credential
- type CredentialsAPI
- func (a *CredentialsAPI) Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error)
- func (a *CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error)
- func (a *CredentialsAPI) Delete(ctx context.Context, request DeleteCredentialRequest) error
- func (a *CredentialsAPI) DeleteByCredentialsId(ctx context.Context, credentialsId string) error
- func (a *CredentialsAPI) Get(ctx context.Context, request GetCredentialRequest) (*Credential, error)
- func (a *CredentialsAPI) GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)
- func (a *CredentialsAPI) GetByCredentialsName(ctx context.Context, name string) (*Credential, error)
- func (a *CredentialsAPI) Impl() CredentialsService
- func (a *CredentialsAPI) List(ctx context.Context) ([]Credential, error)
- func (a *CredentialsAPI) WithImpl(impl CredentialsService) *CredentialsAPI
- type CredentialsService
- type CustomerFacingGcpCloudResourceContainer
- type CustomerManagedKey
- type DeleteCredentialRequest
- type DeleteEncryptionKeyRequest
- type DeleteNetworkRequest
- type DeletePrivateAccesRequest
- type DeleteStorageRequest
- type DeleteVpcEndpointRequest
- type DeleteWorkspaceRequest
- type EncryptionKeysAPI
- func (a *EncryptionKeysAPI) Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error)
- func (a *EncryptionKeysAPI) Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error
- func (a *EncryptionKeysAPI) DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error
- func (a *EncryptionKeysAPI) Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error)
- func (a *EncryptionKeysAPI) GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)
- func (a *EncryptionKeysAPI) Impl() EncryptionKeysService
- func (a *EncryptionKeysAPI) List(ctx context.Context) ([]CustomerManagedKey, error)
- func (a *EncryptionKeysAPI) WithImpl(impl EncryptionKeysService) *EncryptionKeysAPI
- type EncryptionKeysService
- type EndpointUseCase
- type ErrorType
- type GcpKeyInfo
- type GcpManagedNetworkConfig
- type GcpNetworkInfo
- type GcpVpcEndpointInfo
- type GetCredentialRequest
- type GetEncryptionKeyRequest
- type GetNetworkRequest
- type GetPrivateAccesRequest
- type GetStorageRequest
- type GetVpcEndpointRequest
- type GetWorkspaceRequest
- type GkeConfig
- type GkeConfigConnectivityType
- type KeyUseCase
- type Network
- type NetworkHealth
- type NetworkVpcEndpoints
- type NetworkWarning
- type NetworksAPI
- func (a *NetworksAPI) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)
- func (a *NetworksAPI) Delete(ctx context.Context, request DeleteNetworkRequest) error
- func (a *NetworksAPI) DeleteByNetworkId(ctx context.Context, networkId string) error
- func (a *NetworksAPI) Get(ctx context.Context, request GetNetworkRequest) (*Network, error)
- func (a *NetworksAPI) GetByNetworkId(ctx context.Context, networkId string) (*Network, error)
- func (a *NetworksAPI) GetByNetworkName(ctx context.Context, name string) (*Network, error)
- func (a *NetworksAPI) Impl() NetworksService
- func (a *NetworksAPI) List(ctx context.Context) ([]Network, error)
- func (a *NetworksAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error)
- func (a *NetworksAPI) WithImpl(impl NetworksService) *NetworksAPI
- type NetworksService
- type PricingTier
- type PrivateAccessAPI
- func (a *PrivateAccessAPI) Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error)
- func (a *PrivateAccessAPI) Delete(ctx context.Context, request DeletePrivateAccesRequest) error
- func (a *PrivateAccessAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error
- func (a *PrivateAccessAPI) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error)
- func (a *PrivateAccessAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)
- func (a *PrivateAccessAPI) GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error)
- func (a *PrivateAccessAPI) Impl() PrivateAccessService
- func (a *PrivateAccessAPI) List(ctx context.Context) ([]PrivateAccessSettings, error)
- func (a *PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error)
- func (a *PrivateAccessAPI) Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error
- func (a *PrivateAccessAPI) WithImpl(impl PrivateAccessService) *PrivateAccessAPI
- type PrivateAccessLevel
- type PrivateAccessService
- type PrivateAccessSettings
- type RootBucketInfo
- type StorageAPI
- func (a *StorageAPI) Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error)
- func (a *StorageAPI) Delete(ctx context.Context, request DeleteStorageRequest) error
- func (a *StorageAPI) DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error
- func (a *StorageAPI) Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error)
- func (a *StorageAPI) GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)
- func (a *StorageAPI) GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error)
- func (a *StorageAPI) Impl() StorageService
- func (a *StorageAPI) List(ctx context.Context) ([]StorageConfiguration, error)
- func (a *StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error)
- func (a *StorageAPI) WithImpl(impl StorageService) *StorageAPI
- type StorageConfiguration
- type StorageService
- type StsRole
- type UpdateWorkspaceRequest
- type UpsertPrivateAccessSettingsRequest
- type VpcEndpoint
- type VpcEndpointsAPI
- func (a *VpcEndpointsAPI) Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error)
- func (a *VpcEndpointsAPI) Delete(ctx context.Context, request DeleteVpcEndpointRequest) error
- func (a *VpcEndpointsAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error
- func (a *VpcEndpointsAPI) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error)
- func (a *VpcEndpointsAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)
- func (a *VpcEndpointsAPI) GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error)
- func (a *VpcEndpointsAPI) Impl() VpcEndpointsService
- func (a *VpcEndpointsAPI) List(ctx context.Context) ([]VpcEndpoint, error)
- func (a *VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error)
- func (a *VpcEndpointsAPI) WithImpl(impl VpcEndpointsService) *VpcEndpointsAPI
- type VpcEndpointsService
- type VpcStatus
- type WaitGetWorkspaceRunning
- type WarningType
- type Workspace
- type WorkspaceStatus
- type WorkspacesAPI
- func (a *WorkspacesAPI) Create(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)
- func (a *WorkspacesAPI) CreateAndWait(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest, ...) (*Workspace, error)deprecated
- func (a *WorkspacesAPI) Delete(ctx context.Context, request DeleteWorkspaceRequest) error
- func (a *WorkspacesAPI) DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error
- func (a *WorkspacesAPI) Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error)
- func (a *WorkspacesAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)
- func (a *WorkspacesAPI) GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error)
- func (a *WorkspacesAPI) Impl() WorkspacesService
- func (a *WorkspacesAPI) List(ctx context.Context) ([]Workspace, error)
- func (a *WorkspacesAPI) Update(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest) (*WaitGetWorkspaceRunning[any], error)
- func (a *WorkspacesAPI) UpdateAndWait(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest, ...) (*Workspace, error)deprecated
- func (a *WorkspacesAPI) WaitGetWorkspaceRunning(ctx context.Context, workspaceId int64, timeout time.Duration, ...) (*Workspace, error)
- func (a *WorkspacesAPI) WithImpl(impl WorkspacesService) *WorkspacesAPI
- func (a *WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error)
- type WorkspacesService
Examples ¶
- CredentialsAPI.Create (Credentials)
- CredentialsAPI.Create (LogDelivery)
- CredentialsAPI.Create (Workspaces)
- CredentialsAPI.Get (Credentials)
- EncryptionKeysAPI.Create (EncryptionKeys)
- EncryptionKeysAPI.Get (EncryptionKeys)
- NetworksAPI.Create (Networks)
- NetworksAPI.Get (Networks)
- PrivateAccessAPI.Create (PrivateAccess)
- PrivateAccessAPI.Get (PrivateAccess)
- PrivateAccessAPI.Replace (PrivateAccess)
- StorageAPI.Create (LogDelivery)
- StorageAPI.Create (Storage)
- StorageAPI.Create (Workspaces)
- StorageAPI.Get (Storage)
- VpcEndpointsAPI.Create (VpcEndpoints)
- VpcEndpointsAPI.Get (VpcEndpoints)
- WorkspacesAPI.Create (Workspaces)
- WorkspacesAPI.Get (Workspaces)
- WorkspacesAPI.Update (Workspaces)
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AwsCredentials ¶
type AwsCredentials struct {
StsRole *StsRole `json:"sts_role,omitempty"`
}
type AwsKeyInfo ¶
type AwsKeyInfo struct { // The AWS KMS key alias. KeyAlias string `json:"key_alias,omitempty"` // The AWS KMS key's Amazon Resource Name (ARN). KeyArn string `json:"key_arn"` // The AWS KMS key region. KeyRegion string `json:"key_region"` // This field applies only if the `use_cases` property includes `STORAGE`. // If this is set to `true` or omitted, the key is also used to encrypt // cluster EBS volumes. If you do not want to use this key for encrypting // EBS volumes, set to `false`. ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"` }
type CloudResourceContainer ¶
type CloudResourceContainer struct { // The general workspace configurations that are specific to Google Cloud. Gcp *CustomerFacingGcpCloudResourceContainer `json:"gcp,omitempty"` }
The general workspace configurations that are specific to cloud providers.
type CreateAwsKeyInfo ¶
type CreateAwsKeyInfo struct { // The AWS KMS key alias. KeyAlias string `json:"key_alias,omitempty"` // The AWS KMS key's Amazon Resource Name (ARN). Note that the key's AWS // region is inferred from the ARN. KeyArn string `json:"key_arn"` // This field applies only if the `use_cases` property includes `STORAGE`. // If this is set to `true` or omitted, the key is also used to encrypt // cluster EBS volumes. To not use this key also for encrypting EBS volumes, // set this to `false`. ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"` }
type CreateCredentialAwsCredentials ¶
type CreateCredentialAwsCredentials struct {
StsRole *CreateCredentialStsRole `json:"sts_role,omitempty"`
}
type CreateCredentialRequest ¶
type CreateCredentialRequest struct { AwsCredentials CreateCredentialAwsCredentials `json:"aws_credentials"` // The human-readable name of the credential configuration object. CredentialsName string `json:"credentials_name"` }
type CreateCredentialStsRole ¶
type CreateCredentialStsRole struct { // The Amazon Resource Name (ARN) of the cross account role. RoleArn string `json:"role_arn,omitempty"` }
type CreateCustomerManagedKeyRequest ¶
type CreateCustomerManagedKeyRequest struct { AwsKeyInfo *CreateAwsKeyInfo `json:"aws_key_info,omitempty"` GcpKeyInfo *CreateGcpKeyInfo `json:"gcp_key_info,omitempty"` // The cases that the key can be used for. UseCases []KeyUseCase `json:"use_cases"` }
type CreateGcpKeyInfo ¶ added in v0.9.0
type CreateGcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId string `json:"kms_key_id"` }
type CreateNetworkRequest ¶
type CreateNetworkRequest struct { // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"` // The human-readable name of the network configuration. NetworkName string `json:"network_name"` // IDs of one to five security groups associated with this network. Security // group IDs **cannot** be used in multiple network configurations. SecurityGroupIds []string `json:"security_group_ids,omitempty"` // IDs of at least two subnets associated with this network. Subnet IDs // **cannot** be used in multiple network configurations. SubnetIds []string `json:"subnet_ids,omitempty"` // If specified, contains the VPC endpoints used to allow cluster // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"` // The ID of the VPC associated with this network. VPC IDs can be used in // multiple network configurations. VpcId string `json:"vpc_id,omitempty"` }
type CreateStorageConfigurationRequest ¶
type CreateStorageConfigurationRequest struct { // Root S3 bucket information. RootBucketInfo RootBucketInfo `json:"root_bucket_info"` // The human-readable name of the storage configuration. StorageConfigurationName string `json:"storage_configuration_name"` }
type CreateVpcEndpointRequest ¶
type CreateVpcEndpointRequest struct { // The ID of the VPC endpoint object in AWS. AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"` // The Google Cloud specific information for this Private Service Connect // endpoint. GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"` // The AWS region in which this VPC endpoint object exists. Region string `json:"region,omitempty"` // The human-readable name of the storage configuration. VpcEndpointName string `json:"vpc_endpoint_name"` }
type CreateWorkspaceRequest ¶
type CreateWorkspaceRequest struct { // The AWS region of the workspace's data plane. AwsRegion string `json:"aws_region,omitempty"` // The cloud provider which the workspace uses. For Google Cloud workspaces, // always set this field to `gcp`. Cloud string `json:"cloud,omitempty"` // The general workspace configurations that are specific to cloud // providers. CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"` // ID of the workspace's credential configuration object. CredentialsId string `json:"credentials_id,omitempty"` // The deployment name defines part of the subdomain for the workspace. The // workspace URL for web application and REST APIs is // `<workspace-deployment-name>.cloud.databricks.com`. For example, if the // deployment name is `abcsales`, your workspace URL will be // `https://abcsales.cloud.databricks.com`. Hyphens are allowed. This // property supports only the set of characters that are allowed in a // subdomain. // // If your account has a non-empty deployment name prefix at workspace // creation time, the workspace deployment name changes so that the // beginning has the account prefix and a hyphen. For example, if your // account's deployment prefix is `acme` and the workspace deployment name // is `workspace-1`, the `deployment_name` field becomes `acme-workspace-1` // and that is the value that is returned in JSON responses for the // `deployment_name` field. The workspace URL is // `acme-workspace-1.cloud.databricks.com`. // // If your account has a non-empty deployment name prefix and you set // `deployment_name` to the reserved keyword `EMPTY`, `deployment_name` is // just the account prefix only. For example, if your account's deployment // prefix is `acme` and the workspace deployment name is `EMPTY`, // `deployment_name` becomes `acme` only and the workspace URL is // `acme.cloud.databricks.com`. // // Contact your Databricks representatives to add an account deployment name // prefix to your account. If you do not have a deployment name prefix, the // special deployment name value `EMPTY` is invalid. // // This value must be unique across all non-deleted deployments across all // AWS regions. // // If a new workspace omits this property, the server generates a unique // deployment name for you with the pattern `dbc-xxxxxxxx-xxxx`. DeploymentName string `json:"deployment_name,omitempty"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location string `json:"location,omitempty"` // The ID of the workspace's managed services encryption key configuration // object. This is used to help protect and control access to the // workspace's notebooks, secrets, Databricks SQL queries, and query // history. The provided key configuration object property `use_cases` must // contain `MANAGED_SERVICES`. ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"` NetworkId string `json:"network_id,omitempty"` // The pricing tier of the workspace. For pricing tier information, see [AWS // Pricing]. // // [AWS Pricing]: https://databricks.com/product/aws-pricing PricingTier PricingTier `json:"pricing_tier,omitempty"` // ID of the workspace's private access settings object. Only used for // PrivateLink. This ID must be specified for customers using [AWS // PrivateLink] for either front-end (user-to-workspace connection), // back-end (data plane to control plane connection), or both connection // types. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` // The ID of the workspace's storage configuration object. StorageConfigurationId string `json:"storage_configuration_id,omitempty"` // The ID of the workspace's storage encryption key configuration object. // This is used to encrypt the workspace's root S3 bucket (root DBFS and // system data) and, optionally, cluster EBS volumes. The provided key // configuration object property `use_cases` must contain `STORAGE`. StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"` // The workspace's human-readable name. WorkspaceName string `json:"workspace_name"` }
type Credential ¶
type Credential struct { // The Databricks account ID that hosts the credential. AccountId string `json:"account_id,omitempty"` AwsCredentials *AwsCredentials `json:"aws_credentials,omitempty"` // Time in epoch milliseconds when the credential was created. CreationTime int64 `json:"creation_time,omitempty"` // Databricks credential configuration ID. CredentialsId string `json:"credentials_id,omitempty"` // The human-readable name of the credential configuration object. CredentialsName string `json:"credentials_name,omitempty"` }
type CredentialsAPI ¶
type CredentialsAPI struct {
// contains filtered or unexported fields
}
These APIs manage credential configurations for this workspace. Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters in the appropriate VPC for the new workspace. A credential configuration encapsulates this role information, and its ID is used when creating a new workspace.
func NewCredentials ¶
func NewCredentials(client *client.DatabricksClient) *CredentialsAPI
func (*CredentialsAPI) Create ¶
func (a *CredentialsAPI) Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error)
Create credential configuration.
Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks clusters. For your AWS IAM role, you need to trust the External ID (the Databricks Account API account ID) in the returned credential object, and configure the required access policy.
Save the response's `credentials_id` field, which is the ID for your new credential configuration object.
For information about how to create a new workspace with this API, see Create a new workspace using the Account API
Example (Credentials) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", role) // cleanup err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId) if err != nil { panic(err) }
Output:
Example (LogDelivery) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } creds, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_LOGDELIVERY_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", creds) // cleanup err = a.Credentials.DeleteByCredentialsId(ctx, creds.CredentialsId) if err != nil { panic(err) }
Output:
Example (Workspaces) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", role) // cleanup err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId) if err != nil { panic(err) }
Output:
func (*CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap ¶
func (a *CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error)
CredentialCredentialsNameToCredentialsIdMap calls CredentialsAPI.List and creates a map of results with Credential.CredentialsName as key and Credential.CredentialsId as value.
Returns an error if there's more than one Credential with the same .CredentialsName.
Note: All Credential instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
func (*CredentialsAPI) Delete ¶
func (a *CredentialsAPI) Delete(ctx context.Context, request DeleteCredentialRequest) error
Delete credential configuration.
Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace.
func (*CredentialsAPI) DeleteByCredentialsId ¶
func (a *CredentialsAPI) DeleteByCredentialsId(ctx context.Context, credentialsId string) error
Delete credential configuration.
Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace.
func (*CredentialsAPI) Get ¶
func (a *CredentialsAPI) Get(ctx context.Context, request GetCredentialRequest) (*Credential, error)
Get credential configuration.
Gets a Databricks credential configuration object for an account, both specified by ID.
Example (Credentials) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", role) byId, err := a.Credentials.GetByCredentialsId(ctx, role.CredentialsId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) // cleanup err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId) if err != nil { panic(err) }
Output:
func (*CredentialsAPI) GetByCredentialsId ¶
func (a *CredentialsAPI) GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)
Get credential configuration.
Gets a Databricks credential configuration object for an account, both specified by ID.
func (*CredentialsAPI) GetByCredentialsName ¶
func (a *CredentialsAPI) GetByCredentialsName(ctx context.Context, name string) (*Credential, error)
GetByCredentialsName calls CredentialsAPI.CredentialCredentialsNameToCredentialsIdMap and returns a single Credential.
Returns an error if there's more than one Credential with the same .CredentialsName.
Note: All Credential instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*CredentialsAPI) Impl ¶
func (a *CredentialsAPI) Impl() CredentialsService
Impl returns low-level Credentials API implementation
func (*CredentialsAPI) List ¶
func (a *CredentialsAPI) List(ctx context.Context) ([]Credential, error)
Get all credential configurations.
Gets all Databricks credential configurations associated with an account specified by ID.
func (*CredentialsAPI) WithImpl ¶
func (a *CredentialsAPI) WithImpl(impl CredentialsService) *CredentialsAPI
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.
type CredentialsService ¶
type CredentialsService interface { // Create credential configuration. // // Creates a Databricks credential configuration that represents cloud // cross-account credentials for a specified account. Databricks uses this // to set up network infrastructure properly to host Databricks clusters. // For your AWS IAM role, you need to trust the External ID (the Databricks // Account API account ID) in the returned credential object, and configure // the required access policy. // // Save the response's `credentials_id` field, which is the ID for your new // credential configuration object. // // For information about how to create a new workspace with this API, see // [Create a new workspace using the Account API] // // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error) // Delete credential configuration. // // Deletes a Databricks credential configuration object for an account, both // specified by ID. You cannot delete a credential that is associated with // any workspace. Delete(ctx context.Context, request DeleteCredentialRequest) error // Get credential configuration. // // Gets a Databricks credential configuration object for an account, both // specified by ID. Get(ctx context.Context, request GetCredentialRequest) (*Credential, error) // Get all credential configurations. // // Gets all Databricks credential configurations associated with an account // specified by ID. List(ctx context.Context) ([]Credential, error) }
These APIs manage credential configurations for this workspace. Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters in the appropriate VPC for the new workspace. A credential configuration encapsulates this role information, and its ID is used when creating a new workspace.
type CustomerFacingGcpCloudResourceContainer ¶
type CustomerFacingGcpCloudResourceContainer struct { // The Google Cloud project ID, which the workspace uses to instantiate // cloud resources for your workspace. ProjectId string `json:"project_id,omitempty"` }
The general workspace configurations that are specific to Google Cloud.
type CustomerManagedKey ¶
type CustomerManagedKey struct { // The Databricks account ID that holds the customer-managed key. AccountId string `json:"account_id,omitempty"` AwsKeyInfo *AwsKeyInfo `json:"aws_key_info,omitempty"` // Time in epoch milliseconds when the customer key was created. CreationTime int64 `json:"creation_time,omitempty"` // ID of the encryption key configuration object. CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"` GcpKeyInfo *GcpKeyInfo `json:"gcp_key_info,omitempty"` // The cases that the key can be used for. UseCases []KeyUseCase `json:"use_cases,omitempty"` }
type DeleteCredentialRequest ¶
type DeleteCredentialRequest struct { // Databricks Account API credential configuration ID CredentialsId string `json:"-" url:"-"` }
Delete credential configuration
type DeleteEncryptionKeyRequest ¶
type DeleteEncryptionKeyRequest struct { // Databricks encryption key configuration ID. CustomerManagedKeyId string `json:"-" url:"-"` }
Delete encryption key configuration
type DeleteNetworkRequest ¶
type DeleteNetworkRequest struct { // Databricks Account API network configuration ID. NetworkId string `json:"-" url:"-"` }
Delete a network configuration
type DeletePrivateAccesRequest ¶
type DeletePrivateAccesRequest struct { // Databricks Account API private access settings ID. PrivateAccessSettingsId string `json:"-" url:"-"` }
Delete a private access settings object
type DeleteStorageRequest ¶
type DeleteStorageRequest struct { // Databricks Account API storage configuration ID. StorageConfigurationId string `json:"-" url:"-"` }
Delete storage configuration
type DeleteVpcEndpointRequest ¶
type DeleteVpcEndpointRequest struct { // Databricks VPC endpoint ID. VpcEndpointId string `json:"-" url:"-"` }
Delete VPC endpoint configuration
type DeleteWorkspaceRequest ¶
type DeleteWorkspaceRequest struct { // Workspace ID. WorkspaceId int64 `json:"-" url:"-"` }
Delete a workspace
type EncryptionKeysAPI ¶
type EncryptionKeysAPI struct {
// contains filtered or unexported fields
}
These APIs manage encryption key configurations for this workspace (optional). A key configuration encapsulates the AWS KMS key information and some information about how the key configuration can be used. There are two possible uses for key configurations:
* Managed services: A key configuration can be used to encrypt a workspace's notebook and secret data in the control plane, as well as Databricks SQL queries and query history. * Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in the data plane.
In both of these cases, the key configuration's ID is used when creating a new workspace. This Preview feature is available if your account is on the E2 version of the platform. Updating a running workspace with workspace storage encryption requires that the workspace is on the E2 version of the platform. If you have an older workspace, it might not be on the E2 version of the platform. If you are not sure, contact your Databricks representative.
func NewEncryptionKeys ¶
func NewEncryptionKeys(client *client.DatabricksClient) *EncryptionKeysAPI
func (*EncryptionKeysAPI) Create ¶
func (a *EncryptionKeysAPI) Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error)
Create encryption key configuration.
Creates a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If the key is assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for workspace storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data.
**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions that currently support creation of Databricks workspaces.
This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
Example (EncryptionKeys) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } created, err := a.EncryptionKeys.Create(ctx, provisioning.CreateCustomerManagedKeyRequest{ AwsKeyInfo: &provisioning.CreateAwsKeyInfo{ KeyArn: os.Getenv("TEST_MANAGED_KMS_KEY_ARN"), KeyAlias: os.Getenv("TEST_STORAGE_KMS_KEY_ALIAS"), }, UseCases: []provisioning.KeyUseCase{provisioning.KeyUseCaseManagedServices}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) // cleanup err = a.EncryptionKeys.DeleteByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId) if err != nil { panic(err) }
Output:
func (*EncryptionKeysAPI) Delete ¶
func (a *EncryptionKeysAPI) Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error
Delete encryption key configuration.
Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace.
func (*EncryptionKeysAPI) DeleteByCustomerManagedKeyId ¶
func (a *EncryptionKeysAPI) DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error
Delete encryption key configuration.
Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace.
func (*EncryptionKeysAPI) Get ¶
func (a *EncryptionKeysAPI) Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error)
Get encryption key configuration.
Gets a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data.
**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions.
This operation is available only if your account is on the E2 version of the platform.",
Example (EncryptionKeys) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } created, err := a.EncryptionKeys.Create(ctx, provisioning.CreateCustomerManagedKeyRequest{ AwsKeyInfo: &provisioning.CreateAwsKeyInfo{ KeyArn: os.Getenv("TEST_MANAGED_KMS_KEY_ARN"), KeyAlias: os.Getenv("TEST_STORAGE_KMS_KEY_ALIAS"), }, UseCases: []provisioning.KeyUseCase{provisioning.KeyUseCaseManagedServices}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) byId, err := a.EncryptionKeys.GetByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) // cleanup err = a.EncryptionKeys.DeleteByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId) if err != nil { panic(err) }
Output:
func (*EncryptionKeysAPI) GetByCustomerManagedKeyId ¶
func (a *EncryptionKeysAPI) GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)
Get encryption key configuration.
Gets a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data.
**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions.
This operation is available only if your account is on the E2 version of the platform.",
func (*EncryptionKeysAPI) Impl ¶
func (a *EncryptionKeysAPI) Impl() EncryptionKeysService
Impl returns low-level EncryptionKeys API implementation
func (*EncryptionKeysAPI) List ¶
func (a *EncryptionKeysAPI) List(ctx context.Context) ([]CustomerManagedKey, error)
Get all encryption key configurations.
Gets all customer-managed key configuration objects for an account. If the key is specified as a workspace's managed services customer-managed key, Databricks uses the key to encrypt the workspace's notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane.
**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions.
This operation is available only if your account is on the E2 version of the platform.
func (*EncryptionKeysAPI) WithImpl ¶
func (a *EncryptionKeysAPI) WithImpl(impl EncryptionKeysService) *EncryptionKeysAPI
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.
type EncryptionKeysService ¶
type EncryptionKeysService interface { // Create encryption key configuration. // // Creates a customer-managed key configuration object for an account, // specified by ID. This operation uploads a reference to a customer-managed // key to Databricks. If the key is assigned as a workspace's // customer-managed key for managed services, Databricks uses the key to // encrypt the workspaces notebooks and secrets in the control plane, in // addition to Databricks SQL queries and query history. If it is specified // as a workspace's customer-managed key for workspace storage, the key // encrypts the workspace's root S3 bucket (which contains the workspace's // root DBFS and system data) and, optionally, cluster EBS volume data. // // **Important**: Customer-managed keys are supported only for some // deployment types, subscription types, and AWS regions that currently // support creation of Databricks workspaces. // // This operation is available only if your account is on the E2 version of // the platform or on a select custom plan that allows multiple workspaces // per account. Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error) // Delete encryption key configuration. // // Deletes a customer-managed key configuration object for an account. You // cannot delete a configuration that is associated with a running // workspace. Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error // Get encryption key configuration. // // Gets a customer-managed key configuration object for an account, // specified by ID. This operation uploads a reference to a customer-managed // key to Databricks. If assigned as a workspace's customer-managed key for // managed services, Databricks uses the key to encrypt the workspaces // notebooks and secrets in the control plane, in addition to Databricks SQL // queries and query history. If it is specified as a workspace's // customer-managed key for storage, the key encrypts the workspace's root // S3 bucket (which contains the workspace's root DBFS and system data) and, // optionally, cluster EBS volume data. // // **Important**: Customer-managed keys are supported only for some // deployment types, subscription types, and AWS regions. // // This operation is available only if your account is on the E2 version of // the platform.", Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error) // Get all encryption key configurations. // // Gets all customer-managed key configuration objects for an account. If // the key is specified as a workspace's managed services customer-managed // key, Databricks uses the key to encrypt the workspace's notebooks and // secrets in the control plane, in addition to Databricks SQL queries and // query history. If the key is specified as a workspace's storage // customer-managed key, the key is used to encrypt the workspace's root S3 // bucket and optionally can encrypt cluster EBS volumes data in the data // plane. // // **Important**: Customer-managed keys are supported only for some // deployment types, subscription types, and AWS regions. // // This operation is available only if your account is on the E2 version of // the platform. List(ctx context.Context) ([]CustomerManagedKey, error) }
These APIs manage encryption key configurations for this workspace (optional). A key configuration encapsulates the AWS KMS key information and some information about how the key configuration can be used. There are two possible uses for key configurations:
* Managed services: A key configuration can be used to encrypt a workspace's notebook and secret data in the control plane, as well as Databricks SQL queries and query history. * Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in the data plane.
In both of these cases, the key configuration's ID is used when creating a new workspace. This Preview feature is available if your account is on the E2 version of the platform. Updating a running workspace with workspace storage encryption requires that the workspace is on the E2 version of the platform. If you have an older workspace, it might not be on the E2 version of the platform. If you are not sure, contact your Databricks representative.
type EndpointUseCase ¶
type EndpointUseCase string
This enumeration represents the type of Databricks VPC endpoint service that was used when creating this VPC endpoint.
const EndpointUseCaseDataplaneRelayAccess EndpointUseCase = `DATAPLANE_RELAY_ACCESS`
const EndpointUseCaseWorkspaceAccess EndpointUseCase = `WORKSPACE_ACCESS`
func (*EndpointUseCase) Set ¶
func (f *EndpointUseCase) Set(v string) error
Set raw string value and validate it against allowed values
func (*EndpointUseCase) String ¶
func (f *EndpointUseCase) String() string
String representation for fmt.Print
func (*EndpointUseCase) Type ¶
func (f *EndpointUseCase) Type() string
Type always returns EndpointUseCase to satisfy [pflag.Value] interface
type ErrorType ¶
type ErrorType string
The AWS resource associated with this error: credentials, VPC, subnet, security group, or network ACL.
const ErrorTypeCredentials ErrorType = `credentials`
const ErrorTypeNetworkAcl ErrorType = `networkAcl`
const ErrorTypeSecurityGroup ErrorType = `securityGroup`
const ErrorTypeSubnet ErrorType = `subnet`
const ErrorTypeVpc ErrorType = `vpc`
type GcpKeyInfo ¶ added in v0.9.0
type GcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId string `json:"kms_key_id"` }
type GcpManagedNetworkConfig ¶
type GcpManagedNetworkConfig struct { // The IP range from which to allocate GKE cluster pods. No bigger than `/9` // and no smaller than `/21`. GkeClusterPodIpRange string `json:"gke_cluster_pod_ip_range,omitempty"` // The IP range from which to allocate GKE cluster services. No bigger than // `/16` and no smaller than `/27`. GkeClusterServiceIpRange string `json:"gke_cluster_service_ip_range,omitempty"` // The IP range from which to allocate GKE cluster nodes. No bigger than // `/9` and no smaller than `/29`. SubnetCidr string `json:"subnet_cidr,omitempty"` }
The network settings for the workspace. The configurations are only for Databricks-managed VPCs. It is ignored if you specify a customer-managed VPC in the `network_id` field.", All the IP range configurations must be mutually exclusive. An attempt to create a workspace fails if Databricks detects an IP range overlap.
Specify custom IP ranges in CIDR format. The IP ranges for these fields must not overlap, and all IP addresses must be entirely within the following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, `192.168.0.0/16`, and `240.0.0.0/4`.
The sizes of these IP ranges affect the maximum number of nodes for the workspace.
**Important**: Confirm the IP ranges used by your Databricks workspace before creating the workspace. You cannot change them after your workspace is deployed. If the IP address ranges for your Databricks are too small, IP exhaustion can occur, causing your Databricks jobs to fail. To determine the address range sizes that you need, Databricks provides a calculator as a Microsoft Excel spreadsheet. See calculate subnet sizes for a new workspace.
type GcpNetworkInfo ¶
type GcpNetworkInfo struct { // The Google Cloud project ID of the VPC network. NetworkProjectId string `json:"network_project_id"` // The name of the secondary IP range for pods. A Databricks-managed GKE // cluster uses this IP range for its pods. This secondary IP range can be // used by only one workspace. PodIpRangeName string `json:"pod_ip_range_name"` // The name of the secondary IP range for services. A Databricks-managed GKE // cluster uses this IP range for its services. This secondary IP range can // be used by only one workspace. ServiceIpRangeName string `json:"service_ip_range_name"` // The ID of the subnet associated with this network. SubnetId string `json:"subnet_id"` // The Google Cloud region of the workspace data plane (for example, // `us-east4`). SubnetRegion string `json:"subnet_region"` // The ID of the VPC associated with this network. VPC IDs can be used in // multiple network configurations. VpcId string `json:"vpc_id"` }
The Google Cloud specific information for this network (for example, the VPC ID, subnet ID, and secondary IP ranges).
type GcpVpcEndpointInfo ¶
type GcpVpcEndpointInfo struct { // Region of the PSC endpoint. EndpointRegion string `json:"endpoint_region"` // The Google Cloud project ID of the VPC network where the PSC connection // resides. ProjectId string `json:"project_id"` // The unique ID of this PSC connection. PscConnectionId string `json:"psc_connection_id,omitempty"` // The name of the PSC endpoint in the Google Cloud project. PscEndpointName string `json:"psc_endpoint_name"` // The service attachment this PSC connection connects to. ServiceAttachmentId string `json:"service_attachment_id,omitempty"` }
The Google Cloud specific information for this Private Service Connect endpoint.
type GetCredentialRequest ¶
type GetCredentialRequest struct { // Databricks Account API credential configuration ID CredentialsId string `json:"-" url:"-"` }
Get credential configuration
type GetEncryptionKeyRequest ¶
type GetEncryptionKeyRequest struct { // Databricks encryption key configuration ID. CustomerManagedKeyId string `json:"-" url:"-"` }
Get encryption key configuration
type GetNetworkRequest ¶
type GetNetworkRequest struct { // Databricks Account API network configuration ID. NetworkId string `json:"-" url:"-"` }
Get a network configuration
type GetPrivateAccesRequest ¶
type GetPrivateAccesRequest struct { // Databricks Account API private access settings ID. PrivateAccessSettingsId string `json:"-" url:"-"` }
Get a private access settings object
type GetStorageRequest ¶
type GetStorageRequest struct { // Databricks Account API storage configuration ID. StorageConfigurationId string `json:"-" url:"-"` }
Get storage configuration
type GetVpcEndpointRequest ¶
type GetVpcEndpointRequest struct { // Databricks VPC endpoint ID. VpcEndpointId string `json:"-" url:"-"` }
Get a VPC endpoint configuration
type GetWorkspaceRequest ¶
type GetWorkspaceRequest struct { // Workspace ID. WorkspaceId int64 `json:"-" url:"-"` }
Get a workspace
type GkeConfig ¶
type GkeConfig struct { // Specifies the network connectivity types for the GKE nodes and the GKE // master network. // // Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the // workspace. The GKE nodes will not have public IPs. // // Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of // a public GKE cluster have public IP addresses. ConnectivityType GkeConfigConnectivityType `json:"connectivity_type,omitempty"` // The IP range from which to allocate GKE cluster master resources. This // field will be ignored if GKE private cluster is not enabled. // // It must be exactly as big as `/28`. MasterIpRange string `json:"master_ip_range,omitempty"` }
The configurations for the GKE cluster of a Databricks workspace.
type GkeConfigConnectivityType ¶
type GkeConfigConnectivityType string
Specifies the network connectivity types for the GKE nodes and the GKE master network.
Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the workspace. The GKE nodes will not have public IPs.
Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a public GKE cluster have public IP addresses.
const GkeConfigConnectivityTypePrivateNodePublicMaster GkeConfigConnectivityType = `PRIVATE_NODE_PUBLIC_MASTER`
const GkeConfigConnectivityTypePublicNodePublicMaster GkeConfigConnectivityType = `PUBLIC_NODE_PUBLIC_MASTER`
func (*GkeConfigConnectivityType) Set ¶
func (f *GkeConfigConnectivityType) Set(v string) error
Set raw string value and validate it against allowed values
func (*GkeConfigConnectivityType) String ¶
func (f *GkeConfigConnectivityType) String() string
String representation for fmt.Print
func (*GkeConfigConnectivityType) Type ¶
func (f *GkeConfigConnectivityType) Type() string
Type always returns GkeConfigConnectivityType to satisfy [pflag.Value] interface
type KeyUseCase ¶
type KeyUseCase string
This describes an enum
const KeyUseCaseManagedServices KeyUseCase = `MANAGED_SERVICES`
Encrypts notebook and secret data in the control plane
const KeyUseCaseStorage KeyUseCase = `STORAGE`
Encrypts the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes.
func (*KeyUseCase) Set ¶
func (f *KeyUseCase) Set(v string) error
Set raw string value and validate it against allowed values
func (*KeyUseCase) String ¶
func (f *KeyUseCase) String() string
String representation for fmt.Print
func (*KeyUseCase) Type ¶
func (f *KeyUseCase) Type() string
Type always returns KeyUseCase to satisfy [pflag.Value] interface
type Network ¶
type Network struct { // The Databricks account ID associated with this network configuration. AccountId string `json:"account_id,omitempty"` // Time in epoch milliseconds when the network was created. CreationTime int64 `json:"creation_time,omitempty"` // Array of error messages about the network configuration. ErrorMessages []NetworkHealth `json:"error_messages,omitempty"` // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"` // The Databricks network configuration ID. NetworkId string `json:"network_id,omitempty"` // The human-readable name of the network configuration. NetworkName string `json:"network_name,omitempty"` SecurityGroupIds []string `json:"security_group_ids,omitempty"` SubnetIds []string `json:"subnet_ids,omitempty"` // If specified, contains the VPC endpoints used to allow cluster // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"` // The ID of the VPC associated with this network configuration. VPC IDs can // be used in multiple networks. VpcId string `json:"vpc_id,omitempty"` // This describes an enum VpcStatus VpcStatus `json:"vpc_status,omitempty"` // Array of warning messages about the network configuration. WarningMessages []NetworkWarning `json:"warning_messages,omitempty"` // Workspace ID associated with this network configuration. WorkspaceId int64 `json:"workspace_id,omitempty"` }
type NetworkHealth ¶
type NetworkVpcEndpoints ¶
type NetworkVpcEndpoints struct { // The VPC endpoint ID used by this network to access the Databricks secure // cluster connectivity relay. DataplaneRelay []string `json:"dataplane_relay"` // The VPC endpoint ID used by this network to access the Databricks REST // API. RestApi []string `json:"rest_api"` }
If specified, contains the VPC endpoints used to allow cluster communication from this VPC over AWS PrivateLink.
type NetworkWarning ¶
type NetworkWarning struct { // Details of the warning. WarningMessage string `json:"warning_message,omitempty"` // The AWS resource associated with this warning: a subnet or a security // group. WarningType WarningType `json:"warning_type,omitempty"` }
type NetworksAPI ¶
type NetworksAPI struct {
// contains filtered or unexported fields
}
These APIs manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs.
func NewNetworks ¶
func NewNetworks(client *client.DatabricksClient) *NetworksAPI
func (*NetworksAPI) Create ¶
func (a *NetworksAPI) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)
Create network configuration.
Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a pre-existing VPC and subnets.
Example (Networks) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } netw, err := a.Networks.Create(ctx, provisioning.CreateNetworkRequest{ NetworkName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), VpcId: fmt.Sprintf("%x", time.Now().UnixNano()), SubnetIds: []string{fmt.Sprintf("%x", time.Now().UnixNano()), fmt.Sprintf("%x", time.Now().UnixNano())}, SecurityGroupIds: []string{fmt.Sprintf("%x", time.Now().UnixNano())}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", netw)
Output:
func (*NetworksAPI) Delete ¶
func (a *NetworksAPI) Delete(ctx context.Context, request DeleteNetworkRequest) error
Delete a network configuration.
Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace.
This operation is available only if your account is on the E2 version of the platform.
func (*NetworksAPI) DeleteByNetworkId ¶
func (a *NetworksAPI) DeleteByNetworkId(ctx context.Context, networkId string) error
Delete a network configuration.
Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace.
This operation is available only if your account is on the E2 version of the platform.
func (*NetworksAPI) Get ¶
func (a *NetworksAPI) Get(ctx context.Context, request GetNetworkRequest) (*Network, error)
Get a network configuration.
Gets a Databricks network configuration, which represents a cloud VPC and its resources.
Example (Networks) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } netw, err := a.Networks.Create(ctx, provisioning.CreateNetworkRequest{ NetworkName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), VpcId: fmt.Sprintf("%x", time.Now().UnixNano()), SubnetIds: []string{fmt.Sprintf("%x", time.Now().UnixNano()), fmt.Sprintf("%x", time.Now().UnixNano())}, SecurityGroupIds: []string{fmt.Sprintf("%x", time.Now().UnixNano())}, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", netw) byId, err := a.Networks.GetByNetworkId(ctx, netw.NetworkId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId)
Output:
func (*NetworksAPI) GetByNetworkId ¶
Get a network configuration.
Gets a Databricks network configuration, which represents a cloud VPC and its resources.
func (*NetworksAPI) GetByNetworkName ¶
GetByNetworkName calls NetworksAPI.NetworkNetworkNameToNetworkIdMap and returns a single Network.
Returns an error if there's more than one Network with the same .NetworkName.
Note: All Network instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*NetworksAPI) Impl ¶
func (a *NetworksAPI) Impl() NetworksService
Impl returns low-level Networks API implementation
func (*NetworksAPI) List ¶
func (a *NetworksAPI) List(ctx context.Context) ([]Network, error)
Get all network configurations.
Gets a list of all Databricks network configurations for an account, specified by ID.
This operation is available only if your account is on the E2 version of the platform.
func (*NetworksAPI) NetworkNetworkNameToNetworkIdMap ¶
func (a *NetworksAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error)
NetworkNetworkNameToNetworkIdMap calls NetworksAPI.List and creates a map of results with Network.NetworkName as key and Network.NetworkId as value.
Returns an error if there's more than one Network with the same .NetworkName.
Note: All Network instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
func (*NetworksAPI) WithImpl ¶
func (a *NetworksAPI) WithImpl(impl NetworksService) *NetworksAPI
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.
type NetworksService ¶
type NetworksService interface { // Create network configuration. // // Creates a Databricks network configuration that represents an VPC and its // resources. The VPC will be used for new Databricks clusters. This // requires a pre-existing VPC and subnets. Create(ctx context.Context, request CreateNetworkRequest) (*Network, error) // Delete a network configuration. // // Deletes a Databricks network configuration, which represents a cloud VPC // and its resources. You cannot delete a network that is associated with a // workspace. // // This operation is available only if your account is on the E2 version of // the platform. Delete(ctx context.Context, request DeleteNetworkRequest) error // Get a network configuration. // // Gets a Databricks network configuration, which represents a cloud VPC and // its resources. Get(ctx context.Context, request GetNetworkRequest) (*Network, error) // Get all network configurations. // // Gets a list of all Databricks network configurations for an account, // specified by ID. // // This operation is available only if your account is on the E2 version of // the platform. List(ctx context.Context) ([]Network, error) }
These APIs manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs.
type PricingTier ¶
type PricingTier string
The pricing tier of the workspace. For pricing tier information, see AWS Pricing.
const PricingTierCommunityEdition PricingTier = `COMMUNITY_EDITION`
const PricingTierDedicated PricingTier = `DEDICATED`
const PricingTierEnterprise PricingTier = `ENTERPRISE`
const PricingTierPremium PricingTier = `PREMIUM`
const PricingTierStandard PricingTier = `STANDARD`
const PricingTierUnknown PricingTier = `UNKNOWN`
func (*PricingTier) Set ¶
func (f *PricingTier) Set(v string) error
Set raw string value and validate it against allowed values
func (*PricingTier) String ¶
func (f *PricingTier) String() string
String representation for fmt.Print
func (*PricingTier) Type ¶
func (f *PricingTier) Type() string
Type always returns PricingTier to satisfy [pflag.Value] interface
type PrivateAccessAPI ¶
type PrivateAccessAPI struct {
// contains filtered or unexported fields
}
These APIs manage private access settings for this account.
func NewPrivateAccess ¶
func NewPrivateAccess(client *client.DatabricksClient) *PrivateAccessAPI
func (*PrivateAccessAPI) Create ¶
func (a *PrivateAccessAPI) Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error)
Create private access settings.
Creates a private access settings object, which specifies how your workspace is accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object referenced by ID in the workspace's `private_access_settings_id` property.
You can share one private access settings with multiple workspaces in a single account. However, private access settings are specific to AWS regions, so only workspaces in the same AWS region can use a given private access settings object.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
Example (PrivateAccess) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } created, err := a.PrivateAccess.Create(ctx, provisioning.UpsertPrivateAccessSettingsRequest{ PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Region: os.Getenv("AWS_REGION"), }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) // cleanup err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId) if err != nil { panic(err) }
Output:
func (*PrivateAccessAPI) Delete ¶
func (a *PrivateAccessAPI) Delete(ctx context.Context, request DeletePrivateAccesRequest) error
Delete a private access settings object.
Deletes a private access settings object, which determines how your workspace is accessed over AWS PrivateLink.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
func (*PrivateAccessAPI) DeleteByPrivateAccessSettingsId ¶
func (a *PrivateAccessAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error
Delete a private access settings object.
Deletes a private access settings object, which determines how your workspace is accessed over AWS PrivateLink.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
func (*PrivateAccessAPI) Get ¶
func (a *PrivateAccessAPI) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error)
Get a private access settings object.
Gets a private access settings object, which specifies how your workspace is accessed over AWS PrivateLink.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
Example (PrivateAccess) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } created, err := a.PrivateAccess.Create(ctx, provisioning.UpsertPrivateAccessSettingsRequest{ PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Region: os.Getenv("AWS_REGION"), }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) byId, err := a.PrivateAccess.GetByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) // cleanup err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId) if err != nil { panic(err) }
Output:
func (*PrivateAccessAPI) GetByPrivateAccessSettingsId ¶
func (a *PrivateAccessAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)
Get a private access settings object.
Gets a private access settings object, which specifies how your workspace is accessed over AWS PrivateLink.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
func (*PrivateAccessAPI) GetByPrivateAccessSettingsName ¶
func (a *PrivateAccessAPI) GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error)
GetByPrivateAccessSettingsName calls PrivateAccessAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap and returns a single PrivateAccessSettings.
Returns an error if there's more than one PrivateAccessSettings with the same .PrivateAccessSettingsName.
Note: All PrivateAccessSettings instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*PrivateAccessAPI) Impl ¶
func (a *PrivateAccessAPI) Impl() PrivateAccessService
Impl returns low-level PrivateAccess API implementation
func (*PrivateAccessAPI) List ¶
func (a *PrivateAccessAPI) List(ctx context.Context) ([]PrivateAccessSettings, error)
Get all private access settings objects.
Gets a list of all private access settings objects for an account, specified by ID.
func (*PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap ¶
func (a *PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error)
PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls PrivateAccessAPI.List and creates a map of results with PrivateAccessSettings.PrivateAccessSettingsName as key and PrivateAccessSettings.PrivateAccessSettingsId as value.
Returns an error if there's more than one PrivateAccessSettings with the same .PrivateAccessSettingsName.
Note: All PrivateAccessSettings instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
func (*PrivateAccessAPI) Replace ¶
func (a *PrivateAccessAPI) Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error
Replace private access settings.
Updates an existing private access settings object, which specifies how your workspace is accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object referenced by ID in the workspace's `private_access_settings_id` property.
This operation completely overwrites your existing private access settings object attached to your workspaces. All workspaces attached to the private access settings are affected by any change. If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of these changes might take several minutes to propagate to the workspace API.
You can share one private access settings object with multiple workspaces in a single account. However, private access settings are specific to AWS regions, so only workspaces in the same AWS region can use a given private access settings object.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
Example (PrivateAccess) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } created, err := a.PrivateAccess.Create(ctx, provisioning.UpsertPrivateAccessSettingsRequest{ PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Region: os.Getenv("AWS_REGION"), }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) err = a.PrivateAccess.Replace(ctx, provisioning.UpsertPrivateAccessSettingsRequest{ PrivateAccessSettingsId: created.PrivateAccessSettingsId, PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), Region: os.Getenv("AWS_REGION"), }) if err != nil { panic(err) } // cleanup err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId) if err != nil { panic(err) }
Output:
func (*PrivateAccessAPI) WithImpl ¶
func (a *PrivateAccessAPI) WithImpl(impl PrivateAccessService) *PrivateAccessAPI
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.
type PrivateAccessLevel ¶
type PrivateAccessLevel string
The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. * `ACCOUNT` level access (the default) allows only VPC endpoints that are registered in your Databricks account connect to your workspace. * `ENDPOINT` level access allows only specified VPC endpoints connect to your workspace. For details, see `allowed_vpc_endpoint_ids`.
const PrivateAccessLevelAccount PrivateAccessLevel = `ACCOUNT`
const PrivateAccessLevelEndpoint PrivateAccessLevel = `ENDPOINT`
func (*PrivateAccessLevel) Set ¶
func (f *PrivateAccessLevel) Set(v string) error
Set raw string value and validate it against allowed values
func (*PrivateAccessLevel) String ¶
func (f *PrivateAccessLevel) String() string
String representation for fmt.Print
func (*PrivateAccessLevel) Type ¶
func (f *PrivateAccessLevel) Type() string
Type always returns PrivateAccessLevel to satisfy [pflag.Value] interface
type PrivateAccessService ¶
type PrivateAccessService interface { // Create private access settings. // // Creates a private access settings object, which specifies how your // workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a // workspace must have a private access settings object referenced by ID in // the workspace's `private_access_settings_id` property. // // You can share one private access settings with multiple workspaces in a // single account. However, private access settings are specific to AWS // regions, so only workspaces in the same AWS region can use a given // private access settings object. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) // Delete a private access settings object. // // Deletes a private access settings object, which determines how your // workspace is accessed over [AWS PrivateLink]. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Delete(ctx context.Context, request DeletePrivateAccesRequest) error // Get a private access settings object. // // Gets a private access settings object, which specifies how your workspace // is accessed over [AWS PrivateLink]. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) // Get all private access settings objects. // // Gets a list of all private access settings objects for an account, // specified by ID. List(ctx context.Context) ([]PrivateAccessSettings, error) // Replace private access settings. // // Updates an existing private access settings object, which specifies how // your workspace is accessed over [AWS PrivateLink]. To use AWS // PrivateLink, a workspace must have a private access settings object // referenced by ID in the workspace's `private_access_settings_id` // property. // // This operation completely overwrites your existing private access // settings object attached to your workspaces. All workspaces attached to // the private access settings are affected by any change. If // `public_access_enabled`, `private_access_level`, or // `allowed_vpc_endpoint_ids` are updated, effects of these changes might // take several minutes to propagate to the workspace API. // // You can share one private access settings object with multiple workspaces // in a single account. However, private access settings are specific to AWS // regions, so only workspaces in the same AWS region can use a given // private access settings object. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error }
These APIs manage private access settings for this account.
type PrivateAccessSettings ¶
type PrivateAccessSettings struct { // The Databricks account ID that hosts the credential. AccountId string `json:"account_id,omitempty"` // An array of Databricks VPC endpoint IDs. AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"` // The private access level controls which VPC endpoints can connect to the // UI or API of any workspace that attaches this private access settings // object. * `ACCOUNT` level access (the default) allows only VPC endpoints // that are registered in your Databricks account connect to your workspace. // * `ENDPOINT` level access allows only specified VPC endpoints connect to // your workspace. For details, see `allowed_vpc_endpoint_ids`. PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"` // Databricks private access settings ID. PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` // The human-readable name of the private access settings object. PrivateAccessSettingsName string `json:"private_access_settings_name,omitempty"` // Determines if the workspace can be accessed over public internet. For // fully private workspaces, you can optionally specify `false`, but only if // you implement both the front-end and the back-end PrivateLink // connections. Otherwise, specify `true`, which means that public access is // enabled. PublicAccessEnabled bool `json:"public_access_enabled,omitempty"` // The cloud region for workspaces attached to this private access settings // object. Region string `json:"region,omitempty"` }
type RootBucketInfo ¶
type RootBucketInfo struct { // The name of the S3 bucket. BucketName string `json:"bucket_name,omitempty"` }
Root S3 bucket information.
type StorageAPI ¶
type StorageAPI struct {
// contains filtered or unexported fields
}
These APIs manage storage configurations for this workspace. A root storage S3 bucket in your account is required to store objects like cluster logs, notebook revisions, and job results. You can also use the root storage S3 bucket for storage of non-production DBFS data. A storage configuration encapsulates this bucket information, and its ID is used when creating a new workspace.
func NewStorage ¶
func NewStorage(client *client.DatabricksClient) *StorageAPI
func (*StorageAPI) Create ¶
func (a *StorageAPI) Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error)
Create new storage configuration.
Creates new storage configuration for an account, specified by ID. Uploads a storage configuration object that represents the root AWS S3 bucket in your account. Databricks stores related workspace assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the required bucket policy.
For information about how to create a new workspace with this API, see Create a new workspace using the Account API
Example (LogDelivery) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } bucket, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{ StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), RootBucketInfo: provisioning.RootBucketInfo{ BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", bucket) // cleanup err = a.Storage.DeleteByStorageConfigurationId(ctx, bucket.StorageConfigurationId) if err != nil { panic(err) }
Output:
Example (Storage) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{ StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), RootBucketInfo: provisioning.RootBucketInfo{ BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", storage)
Output:
Example (Workspaces) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{ StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), RootBucketInfo: provisioning.RootBucketInfo{ BucketName: os.Getenv("TEST_ROOT_BUCKET"), }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", storage) // cleanup err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId) if err != nil { panic(err) }
Output:
func (*StorageAPI) Delete ¶
func (a *StorageAPI) Delete(ctx context.Context, request DeleteStorageRequest) error
Delete storage configuration.
Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace.
func (*StorageAPI) DeleteByStorageConfigurationId ¶
func (a *StorageAPI) DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error
Delete storage configuration.
Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace.
func (*StorageAPI) Get ¶
func (a *StorageAPI) Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error)
Get storage configuration.
Gets a Databricks storage configuration for an account, both specified by ID.
Example (Storage) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{ StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), RootBucketInfo: provisioning.RootBucketInfo{ BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", storage) byId, err := a.Storage.GetByStorageConfigurationId(ctx, storage.StorageConfigurationId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId)
Output:
func (*StorageAPI) GetByStorageConfigurationId ¶
func (a *StorageAPI) GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)
Get storage configuration.
Gets a Databricks storage configuration for an account, both specified by ID.
func (*StorageAPI) GetByStorageConfigurationName ¶
func (a *StorageAPI) GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error)
GetByStorageConfigurationName calls StorageAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap and returns a single StorageConfiguration.
Returns an error if there's more than one StorageConfiguration with the same .StorageConfigurationName.
Note: All StorageConfiguration instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*StorageAPI) Impl ¶
func (a *StorageAPI) Impl() StorageService
Impl returns low-level Storage API implementation
func (*StorageAPI) List ¶
func (a *StorageAPI) List(ctx context.Context) ([]StorageConfiguration, error)
Get all storage configurations.
Gets a list of all Databricks storage configurations for your account, specified by ID.
func (*StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap ¶
func (a *StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error)
StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls StorageAPI.List and creates a map of results with StorageConfiguration.StorageConfigurationName as key and StorageConfiguration.StorageConfigurationId as value.
Returns an error if there's more than one StorageConfiguration with the same .StorageConfigurationName.
Note: All StorageConfiguration instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
func (*StorageAPI) WithImpl ¶
func (a *StorageAPI) WithImpl(impl StorageService) *StorageAPI
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.
type StorageConfiguration ¶
type StorageConfiguration struct { // The Databricks account ID that hosts the credential. AccountId string `json:"account_id,omitempty"` // Time in epoch milliseconds when the storage configuration was created. CreationTime int64 `json:"creation_time,omitempty"` // Root S3 bucket information. RootBucketInfo *RootBucketInfo `json:"root_bucket_info,omitempty"` // Databricks storage configuration ID. StorageConfigurationId string `json:"storage_configuration_id,omitempty"` // The human-readable name of the storage configuration. StorageConfigurationName string `json:"storage_configuration_name,omitempty"` }
type StorageService ¶
type StorageService interface { // Create new storage configuration. // // Creates new storage configuration for an account, specified by ID. // Uploads a storage configuration object that represents the root AWS S3 // bucket in your account. Databricks stores related workspace assets // including DBFS, cluster logs, and job results. For the AWS S3 bucket, you // need to configure the required bucket policy. // // For information about how to create a new workspace with this API, see // [Create a new workspace using the Account API] // // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error) // Delete storage configuration. // // Deletes a Databricks storage configuration. You cannot delete a storage // configuration that is associated with any workspace. Delete(ctx context.Context, request DeleteStorageRequest) error // Get storage configuration. // // Gets a Databricks storage configuration for an account, both specified by // ID. Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error) // Get all storage configurations. // // Gets a list of all Databricks storage configurations for your account, // specified by ID. List(ctx context.Context) ([]StorageConfiguration, error) }
These APIs manage storage configurations for this workspace. A root storage S3 bucket in your account is required to store objects like cluster logs, notebook revisions, and job results. You can also use the root storage S3 bucket for storage of non-production DBFS data. A storage configuration encapsulates this bucket information, and its ID is used when creating a new workspace.
type UpdateWorkspaceRequest ¶
type UpdateWorkspaceRequest struct { // The AWS region of the workspace's data plane (for example, `us-west-2`). // This parameter is available only for updating failed workspaces. AwsRegion string `json:"aws_region,omitempty"` // ID of the workspace's credential configuration object. This parameter is // available for updating both failed and running workspaces. CredentialsId string `json:"credentials_id,omitempty"` // The ID of the workspace's managed services encryption key configuration // object. This parameter is available only for updating failed workspaces. ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"` // The ID of the workspace's network configuration object. Used only if you // already use a customer-managed VPC. For failed workspaces only, you can // switch from a Databricks-managed VPC to a customer-managed VPC by // updating the workspace to add a network configuration ID. NetworkId string `json:"network_id,omitempty"` // The ID of the workspace's storage configuration object. This parameter is // available only for updating failed workspaces. StorageConfigurationId string `json:"storage_configuration_id,omitempty"` // The ID of the key configuration object for workspace storage. This // parameter is available for updating both failed and running workspaces. StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"` // Workspace ID. WorkspaceId int64 `json:"-" url:"-"` }
type UpsertPrivateAccessSettingsRequest ¶
type UpsertPrivateAccessSettingsRequest struct { // An array of Databricks VPC endpoint IDs. This is the Databricks ID that // is returned when registering the VPC endpoint configuration in your // Databricks account. This is not the ID of the VPC endpoint in AWS. // // Only used when `private_access_level` is set to `ENDPOINT`. This is an // allow list of VPC endpoints that in your account that can connect to your // workspace over AWS PrivateLink. // // If hybrid access to your workspace is enabled by setting // `public_access_enabled` to `true`, this control only works for // PrivateLink connections. To control how your workspace is accessed via // public internet, see [IP access lists]. // // [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"` // The private access level controls which VPC endpoints can connect to the // UI or API of any workspace that attaches this private access settings // object. * `ACCOUNT` level access (the default) allows only VPC endpoints // that are registered in your Databricks account connect to your workspace. // * `ENDPOINT` level access allows only specified VPC endpoints connect to // your workspace. For details, see `allowed_vpc_endpoint_ids`. PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"` // Databricks Account API private access settings ID. PrivateAccessSettingsId string `json:"-" url:"-"` // The human-readable name of the private access settings object. PrivateAccessSettingsName string `json:"private_access_settings_name"` // Determines if the workspace can be accessed over public internet. For // fully private workspaces, you can optionally specify `false`, but only if // you implement both the front-end and the back-end PrivateLink // connections. Otherwise, specify `true`, which means that public access is // enabled. PublicAccessEnabled bool `json:"public_access_enabled,omitempty"` // The cloud region for workspaces associated with this private access // settings object. Region string `json:"region"` }
type VpcEndpoint ¶
type VpcEndpoint struct { // The Databricks account ID that hosts the VPC endpoint configuration. AccountId string `json:"account_id,omitempty"` // The AWS Account in which the VPC endpoint object exists. AwsAccountId string `json:"aws_account_id,omitempty"` // The ID of the Databricks [endpoint service] that this VPC endpoint is // connected to. For a list of endpoint service IDs for each supported AWS // region, see the [Databricks PrivateLink documentation]. // // [Databricks PrivateLink documentation]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html AwsEndpointServiceId string `json:"aws_endpoint_service_id,omitempty"` // The ID of the VPC endpoint object in AWS. AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"` // The Google Cloud specific information for this Private Service Connect // endpoint. GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"` // The AWS region in which this VPC endpoint object exists. Region string `json:"region,omitempty"` // The current state (such as `available` or `rejected`) of the VPC // endpoint. Derived from AWS. For the full set of values, see [AWS // DescribeVpcEndpoint documentation]. // // [AWS DescribeVpcEndpoint documentation]: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html State string `json:"state,omitempty"` // This enumeration represents the type of Databricks VPC [endpoint service] // that was used when creating this VPC endpoint. // // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html UseCase EndpointUseCase `json:"use_case,omitempty"` // Databricks VPC endpoint ID. This is the Databricks-specific name of the // VPC endpoint. Do not confuse this with the `aws_vpc_endpoint_id`, which // is the ID within AWS of the VPC endpoint. VpcEndpointId string `json:"vpc_endpoint_id,omitempty"` // The human-readable name of the storage configuration. VpcEndpointName string `json:"vpc_endpoint_name,omitempty"` }
type VpcEndpointsAPI ¶
type VpcEndpointsAPI struct {
// contains filtered or unexported fields
}
These APIs manage VPC endpoint configurations for this account.
func NewVpcEndpoints ¶
func NewVpcEndpoints(client *client.DatabricksClient) *VpcEndpointsAPI
func (*VpcEndpointsAPI) Create ¶
func (a *VpcEndpointsAPI) Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error)
Create VPC endpoint configuration.
Creates a VPC endpoint configuration, which represents a VPC endpoint object in AWS used to communicate privately with Databricks over AWS PrivateLink.
After you create the VPC endpoint configuration, the Databricks endpoint service automatically accepts the VPC endpoint.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
Example (VpcEndpoints) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } created, err := a.VpcEndpoints.Create(ctx, provisioning.CreateVpcEndpointRequest{ AwsVpcEndpointId: os.Getenv("TEST_RELAY_VPC_ENDPOINT"), Region: os.Getenv("AWS_REGION"), VpcEndpointName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) // cleanup err = a.VpcEndpoints.DeleteByVpcEndpointId(ctx, created.VpcEndpointId) if err != nil { panic(err) }
Output:
func (*VpcEndpointsAPI) Delete ¶
func (a *VpcEndpointsAPI) Delete(ctx context.Context, request DeleteVpcEndpointRequest) error
Delete VPC endpoint configuration.
Deletes a VPC endpoint configuration, which represents an AWS VPC endpoint that can communicate privately with Databricks over AWS PrivateLink.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
func (*VpcEndpointsAPI) DeleteByVpcEndpointId ¶
func (a *VpcEndpointsAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error
Delete VPC endpoint configuration.
Deletes a VPC endpoint configuration, which represents an AWS VPC endpoint that can communicate privately with Databricks over AWS PrivateLink.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
func (*VpcEndpointsAPI) Get ¶
func (a *VpcEndpointsAPI) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error)
Get a VPC endpoint configuration.
Gets a VPC endpoint configuration, which represents a VPC endpoint object in AWS used to communicate privately with Databricks over AWS PrivateLink.
Example (VpcEndpoints) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } created, err := a.VpcEndpoints.Create(ctx, provisioning.CreateVpcEndpointRequest{ AwsVpcEndpointId: os.Getenv("TEST_RELAY_VPC_ENDPOINT"), Region: os.Getenv("AWS_REGION"), VpcEndpointName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) byId, err := a.VpcEndpoints.GetByVpcEndpointId(ctx, created.VpcEndpointId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) // cleanup err = a.VpcEndpoints.DeleteByVpcEndpointId(ctx, created.VpcEndpointId) if err != nil { panic(err) }
Output:
func (*VpcEndpointsAPI) GetByVpcEndpointId ¶
func (a *VpcEndpointsAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)
Get a VPC endpoint configuration.
Gets a VPC endpoint configuration, which represents a VPC endpoint object in AWS used to communicate privately with Databricks over AWS PrivateLink.
func (*VpcEndpointsAPI) GetByVpcEndpointName ¶
func (a *VpcEndpointsAPI) GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error)
GetByVpcEndpointName calls VpcEndpointsAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap and returns a single VpcEndpoint.
Returns an error if there's more than one VpcEndpoint with the same .VpcEndpointName.
Note: All VpcEndpoint instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*VpcEndpointsAPI) Impl ¶
func (a *VpcEndpointsAPI) Impl() VpcEndpointsService
Impl returns low-level VpcEndpoints API implementation
func (*VpcEndpointsAPI) List ¶
func (a *VpcEndpointsAPI) List(ctx context.Context) ([]VpcEndpoint, error)
Get all VPC endpoint configurations.
Gets a list of all VPC endpoints for an account, specified by ID.
Before configuring PrivateLink, read the Databricks article about PrivateLink.
func (*VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap ¶
func (a *VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error)
VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls VpcEndpointsAPI.List and creates a map of results with VpcEndpoint.VpcEndpointName as key and VpcEndpoint.VpcEndpointId as value.
Returns an error if there's more than one VpcEndpoint with the same .VpcEndpointName.
Note: All VpcEndpoint instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
func (*VpcEndpointsAPI) WithImpl ¶
func (a *VpcEndpointsAPI) WithImpl(impl VpcEndpointsService) *VpcEndpointsAPI
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.
type VpcEndpointsService ¶
type VpcEndpointsService interface { // Create VPC endpoint configuration. // // Creates a VPC endpoint configuration, which represents a [VPC endpoint] // object in AWS used to communicate privately with Databricks over [AWS // PrivateLink]. // // After you create the VPC endpoint configuration, the Databricks [endpoint // service] automatically accepts the VPC endpoint. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error) // Delete VPC endpoint configuration. // // Deletes a VPC endpoint configuration, which represents an [AWS VPC // endpoint] that can communicate privately with Databricks over [AWS // PrivateLink]. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Delete(ctx context.Context, request DeleteVpcEndpointRequest) error // Get a VPC endpoint configuration. // // Gets a VPC endpoint configuration, which represents a [VPC endpoint] // object in AWS used to communicate privately with Databricks over [AWS // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) // Get all VPC endpoint configurations. // // Gets a list of all VPC endpoints for an account, specified by ID. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html List(ctx context.Context) ([]VpcEndpoint, error) }
These APIs manage VPC endpoint configurations for this account.
type VpcStatus ¶
type VpcStatus string
This describes an enum
const VpcStatusBroken VpcStatus = `BROKEN`
Broken.
const VpcStatusUnattached VpcStatus = `UNATTACHED`
Unattached.
const VpcStatusValid VpcStatus = `VALID`
Valid.
const VpcStatusWarned VpcStatus = `WARNED`
Warned.
type WaitGetWorkspaceRunning ¶ added in v0.10.0
type WaitGetWorkspaceRunning[R any] struct { Response *R WorkspaceId int64 `json:"workspace_id"` // contains filtered or unexported fields }
WaitGetWorkspaceRunning is a wrapper that calls WorkspacesAPI.WaitGetWorkspaceRunning and waits to reach RUNNING state.
func (*WaitGetWorkspaceRunning[R]) Get ¶ added in v0.10.0
func (w *WaitGetWorkspaceRunning[R]) Get() (*Workspace, error)
Get the Workspace with the default timeout of 20 minutes.
func (*WaitGetWorkspaceRunning[R]) GetWithTimeout ¶ added in v0.10.0
func (w *WaitGetWorkspaceRunning[R]) GetWithTimeout(timeout time.Duration) (*Workspace, error)
Get the Workspace with custom timeout.
func (*WaitGetWorkspaceRunning[R]) OnProgress ¶ added in v0.10.0
func (w *WaitGetWorkspaceRunning[R]) OnProgress(callback func(*Workspace)) *WaitGetWorkspaceRunning[R]
OnProgress invokes a callback every time it polls for the status update.
type WarningType ¶
type WarningType string
The AWS resource associated with this warning: a subnet or a security group.
const WarningTypeSecurityGroup WarningType = `securityGroup`
const WarningTypeSubnet WarningType = `subnet`
func (*WarningType) Set ¶
func (f *WarningType) Set(v string) error
Set raw string value and validate it against allowed values
func (*WarningType) String ¶
func (f *WarningType) String() string
String representation for fmt.Print
func (*WarningType) Type ¶
func (f *WarningType) Type() string
Type always returns WarningType to satisfy [pflag.Value] interface
type Workspace ¶
type Workspace struct { // Databricks account ID. AccountId string `json:"account_id,omitempty"` // The AWS region of the workspace data plane (for example, `us-west-2`). AwsRegion string `json:"aws_region,omitempty"` // The cloud name. This field always has the value `gcp`. Cloud string `json:"cloud,omitempty"` // The general workspace configurations that are specific to cloud // providers. CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"` // Time in epoch milliseconds when the workspace was created. CreationTime int64 `json:"creation_time,omitempty"` // ID of the workspace's credential configuration object. CredentialsId string `json:"credentials_id,omitempty"` // The deployment name defines part of the subdomain for the workspace. The // workspace URL for web application and REST APIs is // `<deployment-name>.cloud.databricks.com`. // // This value must be unique across all non-deleted deployments across all // AWS regions. DeploymentName string `json:"deployment_name,omitempty"` // The network settings for the workspace. The configurations are only for // Databricks-managed VPCs. It is ignored if you specify a customer-managed // VPC in the `network_id` field.", All the IP range configurations must be // mutually exclusive. An attempt to create a workspace fails if Databricks // detects an IP range overlap. // // Specify custom IP ranges in CIDR format. The IP ranges for these fields // must not overlap, and all IP addresses must be entirely within the // following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, // `192.168.0.0/16`, and `240.0.0.0/4`. // // The sizes of these IP ranges affect the maximum number of nodes for the // workspace. // // **Important**: Confirm the IP ranges used by your Databricks workspace // before creating the workspace. You cannot change them after your // workspace is deployed. If the IP address ranges for your Databricks are // too small, IP exhaustion can occur, causing your Databricks jobs to fail. // To determine the address range sizes that you need, Databricks provides a // calculator as a Microsoft Excel spreadsheet. See [calculate subnet sizes // for a new workspace]. // // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig *GkeConfig `json:"gke_config,omitempty"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location string `json:"location,omitempty"` // ID of the key configuration for encrypting managed services. ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"` // The network configuration ID that is attached to the workspace. This // field is available only if the network is a customer-managed network. NetworkId string `json:"network_id,omitempty"` // The pricing tier of the workspace. For pricing tier information, see [AWS // Pricing]. // // [AWS Pricing]: https://databricks.com/product/aws-pricing PricingTier PricingTier `json:"pricing_tier,omitempty"` // ID of the workspace's private access settings object. Only used for // PrivateLink. You must specify this ID if you are using [AWS PrivateLink] // for either front-end (user-to-workspace connection), back-end (data plane // to control plane connection), or both connection types. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` // ID of the workspace's storage configuration object. StorageConfigurationId string `json:"storage_configuration_id,omitempty"` // ID of the key configuration for encrypting workspace storage. StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"` // A unique integer ID for the workspace WorkspaceId int64 `json:"workspace_id,omitempty"` // The human-readable name of the workspace. WorkspaceName string `json:"workspace_name,omitempty"` // The status of the workspace. For workspace creation, usually it is set to // `PROVISIONING` initially. Continue to check the status until the status // is `RUNNING`. WorkspaceStatus WorkspaceStatus `json:"workspace_status,omitempty"` // Message describing the current workspace status. WorkspaceStatusMessage string `json:"workspace_status_message,omitempty"` }
type WorkspaceStatus ¶
type WorkspaceStatus string
The status of the workspace. For workspace creation, usually it is set to `PROVISIONING` initially. Continue to check the status until the status is `RUNNING`.
const WorkspaceStatusBanned WorkspaceStatus = `BANNED`
const WorkspaceStatusCancelling WorkspaceStatus = `CANCELLING`
const WorkspaceStatusFailed WorkspaceStatus = `FAILED`
const WorkspaceStatusNotProvisioned WorkspaceStatus = `NOT_PROVISIONED`
const WorkspaceStatusProvisioning WorkspaceStatus = `PROVISIONING`
const WorkspaceStatusRunning WorkspaceStatus = `RUNNING`
func (*WorkspaceStatus) Set ¶
func (f *WorkspaceStatus) Set(v string) error
Set raw string value and validate it against allowed values
func (*WorkspaceStatus) String ¶
func (f *WorkspaceStatus) String() string
String representation for fmt.Print
func (*WorkspaceStatus) Type ¶
func (f *WorkspaceStatus) Type() string
Type always returns WorkspaceStatus to satisfy [pflag.Value] interface
type WorkspacesAPI ¶
type WorkspacesAPI struct {
// contains filtered or unexported fields
}
These APIs manage workspaces for this account. A Databricks workspace is an environment for accessing all of your Databricks assets. The workspace organizes objects (notebooks, libraries, and experiments) into folders, and provides access to data and computational resources such as clusters and jobs.
These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
func NewWorkspaces ¶
func NewWorkspaces(client *client.DatabricksClient) *WorkspacesAPI
func (*WorkspacesAPI) Create ¶
func (a *WorkspacesAPI) Create(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)
Create a new workspace.
Creates a new workspace.
**Important**: This operation is asynchronous. A response with HTTP status code 200 means the request has been accepted and is in progress, but does not mean that the workspace deployed successfully and is running. The initial workspace status is typically `PROVISIONING`. Use the workspace ID (`workspace_id`) field in the response to identify the new workspace and make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`.
Example (Workspaces) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{ StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), RootBucketInfo: provisioning.RootBucketInfo{ BucketName: os.Getenv("TEST_ROOT_BUCKET"), }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", storage) role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", role) created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{ WorkspaceName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsRegion: os.Getenv("AWS_REGION"), CredentialsId: role.CredentialsId, StorageConfigurationId: storage.StorageConfigurationId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) // cleanup err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId) if err != nil { panic(err) } err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId) if err != nil { panic(err) } err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId) if err != nil { panic(err) }
Output:
func (*WorkspacesAPI) CreateAndWait
deprecated
func (a *WorkspacesAPI) CreateAndWait(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)
Calls WorkspacesAPI.Create and waits to reach RUNNING state
You can override the default timeout of 20 minutes by calling adding retries.Timeout[Workspace](60*time.Minute) functional option.
Deprecated: use WorkspacesAPI.Create.Get() or WorkspacesAPI.WaitGetWorkspaceRunning
func (*WorkspacesAPI) Delete ¶
func (a *WorkspacesAPI) Delete(ctx context.Context, request DeleteWorkspaceRequest) error
Delete a workspace.
Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. However, it might take a few minutes for all workspaces resources to be deleted, depending on the size and number of workspace resources.
This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
func (*WorkspacesAPI) DeleteByWorkspaceId ¶
func (a *WorkspacesAPI) DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error
Delete a workspace.
Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. However, it might take a few minutes for all workspaces resources to be deleted, depending on the size and number of workspace resources.
This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
func (*WorkspacesAPI) Get ¶
func (a *WorkspacesAPI) Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error)
Get a workspace.
Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`.
For information about how to create a new workspace with this API **including error handling**, see Create a new workspace using the Account API.
This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
Example (Workspaces) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{ StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), RootBucketInfo: provisioning.RootBucketInfo{ BucketName: os.Getenv("TEST_ROOT_BUCKET"), }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", storage) role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", role) created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{ WorkspaceName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsRegion: os.Getenv("AWS_REGION"), CredentialsId: role.CredentialsId, StorageConfigurationId: storage.StorageConfigurationId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) byId, err := a.Workspaces.GetByWorkspaceId(ctx, created.WorkspaceId) if err != nil { panic(err) } logger.Infof(ctx, "found %v", byId) // cleanup err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId) if err != nil { panic(err) } err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId) if err != nil { panic(err) } err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId) if err != nil { panic(err) }
Output:
func (*WorkspacesAPI) GetByWorkspaceId ¶
func (a *WorkspacesAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)
Get a workspace.
Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`.
For information about how to create a new workspace with this API **including error handling**, see Create a new workspace using the Account API.
This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
func (*WorkspacesAPI) GetByWorkspaceName ¶
GetByWorkspaceName calls WorkspacesAPI.WorkspaceWorkspaceNameToWorkspaceIdMap and returns a single Workspace.
Returns an error if there's more than one Workspace with the same .WorkspaceName.
Note: All Workspace instances are loaded into memory before returning matching by name.
This method is generated by Databricks SDK Code Generator.
func (*WorkspacesAPI) Impl ¶
func (a *WorkspacesAPI) Impl() WorkspacesService
Impl returns low-level Workspaces API implementation
func (*WorkspacesAPI) List ¶
func (a *WorkspacesAPI) List(ctx context.Context) ([]Workspace, error)
Get all workspaces.
Gets a list of all workspaces associated with an account, specified by ID.
This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
func (*WorkspacesAPI) Update ¶
func (a *WorkspacesAPI) Update(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest) (*WaitGetWorkspaceRunning[any], error)
Update workspace configuration.
Updates a workspace configuration for either a running workspace or a failed workspace. The elements that can be updated varies between these two use cases.
### Update a failed workspace You can update a Databricks workspace configuration for failed workspace deployment for some fields, but not all fields. For a failed workspace, this request supports updates to the following fields only: - Credential configuration ID - Storage configuration ID - Network configuration ID. Used only to add or change a network configuration for a customer-managed VPC. For a failed workspace only, you can convert a workspace with Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the network configuration for a failed or running workspace to add PrivateLink support, though you must also add a private access settings object. - Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Used only if you use customer-managed keys for managed services. - Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for workspace storage. **Important**: If the workspace was ever in the running state, even if briefly before becoming a failed workspace, you cannot add a new key configuration ID for workspace storage. - Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace.
After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status. The workspace is successful if the status changes to `RUNNING`.
For information about how to create a new workspace with this API **including error handling**, see Create a new workspace using the Account API.
### Update a running workspace You can update a Databricks workspace configuration for running workspaces for some fields, but not all fields. For a running workspace, this request supports updating the following fields only: - Credential configuration ID
- Network configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a network configuration update in this API for a failed or running workspace to add support for PrivateLink, although you also need to add a private access settings object.
- Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Databricks does not directly encrypt the data with the customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK) that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to encrypt your workspace's managed services persisted data. If the workspace does not already have a CMK for managed services, adding this ID enables managed services encryption for new or updated data. Existing managed services data that existed before adding the key remains not encrypted with the DEK until it is modified. If the workspace already has customer-managed keys for managed services, this request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not already have a customer-managed key configuration for workspace storage. - Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace.
**Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not terminate all cluster instances in the workspace before calling this API, the request will fail.
### Wait until changes take effect. After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status and the status of the fields. * For workspaces with a Databricks-managed VPC, the workspace status becomes `PROVISIONING` temporarily (typically under 20 minutes). If the workspace update is successful, the workspace status changes to `RUNNING`. Note that you can also check the workspace status in the Account Console. However, you cannot use or create clusters for another 20 minutes after that status change. This results in a total of up to 40 minutes in which you cannot create clusters. If you create or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.
* For workspaces with a customer-managed VPC, the workspace status stays at status `RUNNING` and the VPC change happens immediately. A change to the storage customer-managed key configuration ID might take a few minutes to update, so continue to check the workspace until you observe that it has been updated. If the update fails, the workspace might revert silently to its original configuration. After the workspace has been updated, you cannot use or create clusters for another 20 minutes. If you create or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.
If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the changes to fully take effect. During the 20 minute wait, it is important that you stop all REST API calls to the DBFS API. If you are modifying _only the managed services key configuration_, you can omit the 20 minute wait.
**Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment types and subscription types. If you have questions about availability, contact your Databricks representative.
This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.
Example (Workspaces) ¶
ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { panic(err) } storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{ StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), RootBucketInfo: provisioning.RootBucketInfo{ BucketName: os.Getenv("TEST_ROOT_BUCKET"), }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", storage) role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", role) updateRole, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{ CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsCredentials: provisioning.CreateCredentialAwsCredentials{ StsRole: &provisioning.CreateCredentialStsRole{ RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"), }, }, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", updateRole) created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{ WorkspaceName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), AwsRegion: os.Getenv("AWS_REGION"), CredentialsId: role.CredentialsId, StorageConfigurationId: storage.StorageConfigurationId, }) if err != nil { panic(err) } logger.Infof(ctx, "found %v", created) _, err = a.Workspaces.UpdateAndWait(ctx, provisioning.UpdateWorkspaceRequest{ WorkspaceId: created.WorkspaceId, CredentialsId: updateRole.CredentialsId, }) if err != nil { panic(err) } // cleanup err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId) if err != nil { panic(err) } err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId) if err != nil { panic(err) } err = a.Credentials.DeleteByCredentialsId(ctx, updateRole.CredentialsId) if err != nil { panic(err) } err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId) if err != nil { panic(err) }
Output:
func (*WorkspacesAPI) UpdateAndWait
deprecated
func (a *WorkspacesAPI) UpdateAndWait(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)
Calls WorkspacesAPI.Update and waits to reach RUNNING state
You can override the default timeout of 20 minutes by calling adding retries.Timeout[Workspace](60*time.Minute) functional option.
Deprecated: use WorkspacesAPI.Update.Get() or WorkspacesAPI.WaitGetWorkspaceRunning
func (*WorkspacesAPI) WaitGetWorkspaceRunning ¶ added in v0.10.0
func (a *WorkspacesAPI) WaitGetWorkspaceRunning(ctx context.Context, workspaceId int64, timeout time.Duration, callback func(*Workspace)) (*Workspace, error)
WaitGetWorkspaceRunning repeatedly calls WorkspacesAPI.Get and waits to reach RUNNING state
func (*WorkspacesAPI) WithImpl ¶
func (a *WorkspacesAPI) WithImpl(impl WorkspacesService) *WorkspacesAPI
WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.
func (*WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap ¶
func (a *WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error)
WorkspaceWorkspaceNameToWorkspaceIdMap calls WorkspacesAPI.List and creates a map of results with Workspace.WorkspaceName as key and Workspace.WorkspaceId as value.
Returns an error if there's more than one Workspace with the same .WorkspaceName.
Note: All Workspace instances are loaded into memory before creating a map.
This method is generated by Databricks SDK Code Generator.
type WorkspacesService ¶
type WorkspacesService interface { // Create a new workspace. // // Creates a new workspace. // // **Important**: This operation is asynchronous. A response with HTTP // status code 200 means the request has been accepted and is in progress, // but does not mean that the workspace deployed successfully and is // running. The initial workspace status is typically `PROVISIONING`. Use // the workspace ID (`workspace_id`) field in the response to identify the // new workspace and make repeated `GET` requests with the workspace ID and // check its status. The workspace becomes available when the status changes // to `RUNNING`. Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error) // Delete a workspace. // // Terminates and deletes a Databricks workspace. From an API perspective, // deletion is immediate. However, it might take a few minutes for all // workspaces resources to be deleted, depending on the size and number of // workspace resources. // // This operation is available only if your account is on the E2 version of // the platform or on a select custom plan that allows multiple workspaces // per account. Delete(ctx context.Context, request DeleteWorkspaceRequest) error // Get a workspace. // // Gets information including status for a Databricks workspace, specified // by ID. In the response, the `workspace_status` field indicates the // current status. After initial workspace creation (which is asynchronous), // make repeated `GET` requests with the workspace ID and check its status. // The workspace becomes available when the status changes to `RUNNING`. // // For information about how to create a new workspace with this API // **including error handling**, see [Create a new workspace using the // Account API]. // // This operation is available only if your account is on the E2 version of // the platform or on a select custom plan that allows multiple workspaces // per account. // // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error) // Get all workspaces. // // Gets a list of all workspaces associated with an account, specified by // ID. // // This operation is available only if your account is on the E2 version of // the platform or on a select custom plan that allows multiple workspaces // per account. List(ctx context.Context) ([]Workspace, error) // Update workspace configuration. // // Updates a workspace configuration for either a running workspace or a // failed workspace. The elements that can be updated varies between these // two use cases. // // ### Update a failed workspace You can update a Databricks workspace // configuration for failed workspace deployment for some fields, but not // all fields. For a failed workspace, this request supports updates to the // following fields only: - Credential configuration ID - Storage // configuration ID - Network configuration ID. Used only to add or change a // network configuration for a customer-managed VPC. For a failed workspace // only, you can convert a workspace with Databricks-managed VPC to use a // customer-managed VPC by adding this ID. You cannot downgrade a workspace // with a customer-managed VPC to be a Databricks-managed VPC. You can // update the network configuration for a failed or running workspace to add // PrivateLink support, though you must also add a private access settings // object. - Key configuration ID for managed services (control plane // storage, such as notebook source and Databricks SQL queries). Used only // if you use customer-managed keys for managed services. - Key // configuration ID for workspace storage (root S3 bucket and, optionally, // EBS volumes). Used only if you use customer-managed keys for workspace // storage. **Important**: If the workspace was ever in the running state, // even if briefly before becoming a failed workspace, you cannot add a new // key configuration ID for workspace storage. - Private access settings ID // to add PrivateLink support. You can add or update the private access // settings ID to upgrade a workspace to add support for front-end, // back-end, or both types of connectivity. You cannot remove (downgrade) // any existing front-end or back-end PrivateLink support on a workspace. // // After calling the `PATCH` operation to update the workspace // configuration, make repeated `GET` requests with the workspace ID and // check the workspace status. The workspace is successful if the status // changes to `RUNNING`. // // For information about how to create a new workspace with this API // **including error handling**, see [Create a new workspace using the // Account API]. // // ### Update a running workspace You can update a Databricks workspace // configuration for running workspaces for some fields, but not all fields. // For a running workspace, this request supports updating the following // fields only: - Credential configuration ID // // - Network configuration ID. Used only if you already use a // customer-managed VPC. You cannot convert a running workspace from a // Databricks-managed VPC to a customer-managed VPC. You can use a network // configuration update in this API for a failed or running workspace to add // support for PrivateLink, although you also need to add a private access // settings object. // // - Key configuration ID for managed services (control plane storage, such // as notebook source and Databricks SQL queries). Databricks does not // directly encrypt the data with the customer-managed key (CMK). Databricks // uses both the CMK and the Databricks managed key (DMK) that is unique to // your workspace to encrypt the Data Encryption Key (DEK). Databricks uses // the DEK to encrypt your workspace's managed services persisted data. If // the workspace does not already have a CMK for managed services, adding // this ID enables managed services encryption for new or updated data. // Existing managed services data that existed before adding the key remains // not encrypted with the DEK until it is modified. If the workspace already // has customer-managed keys for managed services, this request rotates // (changes) the CMK keys and the DEK is re-encrypted with the DMK and the // new CMK. - Key configuration ID for workspace storage (root S3 bucket // and, optionally, EBS volumes). You can set this only if the workspace // does not already have a customer-managed key configuration for workspace // storage. - Private access settings ID to add PrivateLink support. You can // add or update the private access settings ID to upgrade a workspace to // add support for front-end, back-end, or both types of connectivity. You // cannot remove (downgrade) any existing front-end or back-end PrivateLink // support on a workspace. // // **Important**: To update a running workspace, your workspace must have no // running compute resources that run in your workspace's VPC in the Classic // data plane. For example, stop all all-purpose clusters, job clusters, // pools with running clusters, and Classic SQL warehouses. If you do not // terminate all cluster instances in the workspace before calling this API, // the request will fail. // // ### Wait until changes take effect. After calling the `PATCH` operation // to update the workspace configuration, make repeated `GET` requests with // the workspace ID and check the workspace status and the status of the // fields. * For workspaces with a Databricks-managed VPC, the workspace // status becomes `PROVISIONING` temporarily (typically under 20 minutes). // If the workspace update is successful, the workspace status changes to // `RUNNING`. Note that you can also check the workspace status in the // [Account Console]. However, you cannot use or create clusters for another // 20 minutes after that status change. This results in a total of up to 40 // minutes in which you cannot create clusters. If you create or use // clusters before this time interval elapses, clusters do not launch // successfully, fail, or could cause other unexpected behavior. // // * For workspaces with a customer-managed VPC, the workspace status stays // at status `RUNNING` and the VPC change happens immediately. A change to // the storage customer-managed key configuration ID might take a few // minutes to update, so continue to check the workspace until you observe // that it has been updated. If the update fails, the workspace might revert // silently to its original configuration. After the workspace has been // updated, you cannot use or create clusters for another 20 minutes. If you // create or use clusters before this time interval elapses, clusters do not // launch successfully, fail, or could cause other unexpected behavior. // // If you update the _storage_ customer-managed key configurations, it takes // 20 minutes for the changes to fully take effect. During the 20 minute // wait, it is important that you stop all REST API calls to the DBFS API. // If you are modifying _only the managed services key configuration_, you // can omit the 20 minute wait. // // **Important**: Customer-managed keys and customer-managed VPCs are // supported by only some deployment types and subscription types. If you // have questions about availability, contact your Databricks // representative. // // This operation is available only if your account is on the E2 version of // the platform or on a select custom plan that allows multiple workspaces // per account. // // [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html Update(ctx context.Context, request UpdateWorkspaceRequest) error }
These APIs manage workspaces for this account. A Databricks workspace is an environment for accessing all of your Databricks assets. The workspace organizes objects (notebooks, libraries, and experiments) into folders, and provides access to data and computational resources such as clusters and jobs.
These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.