catalog

package
v0.13.3 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 20, 2023 License: Apache-2.0 Imports: 5 Imported by: 0

Documentation

Overview

These APIs allow you to manage Account Metastore Assignments, Account Metastores, Account Storage Credentials, Catalogs, Connections, External Locations, Functions, Grants, Metastores, Schemas, Storage Credentials, System Schemas, Table Constraints, Tables, Volumes, Workspace Bindings, etc.

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AccountMetastoreAssignmentsAPI

type AccountMetastoreAssignmentsAPI struct {
	// contains filtered or unexported fields
}

These APIs manage metastore assignments to a workspace.

func NewAccountMetastoreAssignments

func NewAccountMetastoreAssignments(client *client.DatabricksClient) *AccountMetastoreAssignmentsAPI

func (*AccountMetastoreAssignmentsAPI) Create

Assigns a workspace to a metastore.

Creates an assignment to a metastore for a workspace Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoreAssignmentsAPI) Delete

Delete a metastore assignment.

Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoreAssignmentsAPI) DeleteByWorkspaceIdAndMetastoreId

func (a *AccountMetastoreAssignmentsAPI) DeleteByWorkspaceIdAndMetastoreId(ctx context.Context, workspaceId int64, metastoreId string) error

Delete a metastore assignment.

Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoreAssignmentsAPI) Get

Gets the metastore assignment for a workspace.

Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is assigned a metastore, the mappig will be returned. If no metastore is assigned to the workspace, the assignment will not be found and a 404 returned. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoreAssignmentsAPI) GetByWorkspaceId

func (a *AccountMetastoreAssignmentsAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*AccountsMetastoreAssignment, error)

Gets the metastore assignment for a workspace.

Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is assigned a metastore, the mappig will be returned. If no metastore is assigned to the workspace, the assignment will not be found and a 404 returned. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoreAssignmentsAPI) Impl

Impl returns low-level AccountMetastoreAssignments API implementation

func (*AccountMetastoreAssignmentsAPI) List

Get all workspaces assigned to a metastore.

Gets a list of all Databricks workspace IDs that have been assigned to given metastore. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API

func (*AccountMetastoreAssignmentsAPI) ListByMetastoreId

func (a *AccountMetastoreAssignmentsAPI) ListByMetastoreId(ctx context.Context, metastoreId string) ([]MetastoreAssignment, error)

Get all workspaces assigned to a metastore.

Gets a list of all Databricks workspace IDs that have been assigned to given metastore. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API

func (*AccountMetastoreAssignmentsAPI) Update

Updates a metastore assignment to a workspaces.

Updates an assignment to a metastore for a workspace. Currently, only the default catalog may be updated. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoreAssignmentsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type AccountMetastoreAssignmentsService

type AccountMetastoreAssignmentsService interface {

	// Assigns a workspace to a metastore.
	//
	// Creates an assignment to a metastore for a workspace Please add a header
	// X-Databricks-Account-Console-API-Version: 2.0 to access this API.
	Create(ctx context.Context, request AccountsCreateMetastoreAssignment) ([]CreateMetastoreAssignmentsResponseItem, error)

	// Delete a metastore assignment.
	//
	// Deletes a metastore assignment to a workspace, leaving the workspace with
	// no metastore. Please add a header
	// X-Databricks-Account-Console-API-Version: 2.0 to access this API.
	Delete(ctx context.Context, request DeleteAccountMetastoreAssignmentRequest) error

	// Gets the metastore assignment for a workspace.
	//
	// Gets the metastore assignment, if any, for the workspace specified by ID.
	// If the workspace is assigned a metastore, the mappig will be returned. If
	// no metastore is assigned to the workspace, the assignment will not be
	// found and a 404 returned. Please add a header
	// X-Databricks-Account-Console-API-Version: 2.0 to access this API.
	Get(ctx context.Context, request GetAccountMetastoreAssignmentRequest) (*AccountsMetastoreAssignment, error)

	// Get all workspaces assigned to a metastore.
	//
	// Gets a list of all Databricks workspace IDs that have been assigned to
	// given metastore. Please add a header
	// X-Databricks-Account-Console-API-Version: 2.0 to access this API
	List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) ([]MetastoreAssignment, error)

	// Updates a metastore assignment to a workspaces.
	//
	// Updates an assignment to a metastore for a workspace. Currently, only the
	// default catalog may be updated. Please add a header
	// X-Databricks-Account-Console-API-Version: 2.0 to access this API.
	Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) error
}

These APIs manage metastore assignments to a workspace.

type AccountMetastoresAPI

type AccountMetastoresAPI struct {
	// contains filtered or unexported fields
}

These APIs manage Unity Catalog metastores for an account. A metastore contains catalogs that can be associated with workspaces

func NewAccountMetastores

func NewAccountMetastores(client *client.DatabricksClient) *AccountMetastoresAPI

func (*AccountMetastoresAPI) Create

Create metastore.

Creates a Unity Catalog metastore. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoresAPI) Delete

Delete a metastore.

Deletes a Unity Catalog metastore for an account, both specified by ID. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoresAPI) DeleteByMetastoreId

func (a *AccountMetastoresAPI) DeleteByMetastoreId(ctx context.Context, metastoreId string) error

Delete a metastore.

Deletes a Unity Catalog metastore for an account, both specified by ID. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoresAPI) Get

Get a metastore.

Gets a Unity Catalog metastore from an account, both specified by ID. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoresAPI) GetByMetastoreId

func (a *AccountMetastoresAPI) GetByMetastoreId(ctx context.Context, metastoreId string) (*AccountsMetastoreInfo, error)

Get a metastore.

Gets a Unity Catalog metastore from an account, both specified by ID. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoresAPI) Impl

Impl returns low-level AccountMetastores API implementation

func (*AccountMetastoresAPI) List

Get all metastores associated with an account.

Gets all Unity Catalog metastores associated with an account specified by ID. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoresAPI) Update

Update a metastore.

Updates an existing Unity Catalog metastore. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to access this API.

func (*AccountMetastoresAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type AccountMetastoresService

type AccountMetastoresService interface {

	// Create metastore.
	//
	// Creates a Unity Catalog metastore. Please add a header
	// X-Databricks-Account-Console-API-Version: 2.0 to access this API.
	Create(ctx context.Context, request AccountsCreateMetastore) (*AccountsMetastoreInfo, error)

	// Delete a metastore.
	//
	// Deletes a Unity Catalog metastore for an account, both specified by ID.
	// Please add a header X-Databricks-Account-Console-API-Version: 2.0 to
	// access this API.
	Delete(ctx context.Context, request DeleteAccountMetastoreRequest) error

	// Get a metastore.
	//
	// Gets a Unity Catalog metastore from an account, both specified by ID.
	// Please add a header X-Databricks-Account-Console-API-Version: 2.0 to
	// access this API.
	Get(ctx context.Context, request GetAccountMetastoreRequest) (*AccountsMetastoreInfo, error)

	// Get all metastores associated with an account.
	//
	// Gets all Unity Catalog metastores associated with an account specified by
	// ID. Please add a header X-Databricks-Account-Console-API-Version: 2.0 to
	// access this API.
	List(ctx context.Context) (*ListMetastoresResponse, error)

	// Update a metastore.
	//
	// Updates an existing Unity Catalog metastore. Please add a header
	// X-Databricks-Account-Console-API-Version: 2.0 to access this API.
	Update(ctx context.Context, request AccountsUpdateMetastore) (*AccountsMetastoreInfo, error)
}

These APIs manage Unity Catalog metastores for an account. A metastore contains catalogs that can be associated with workspaces

type AccountStorageCredentialsAPI

type AccountStorageCredentialsAPI struct {
	// contains filtered or unexported fields
}

These APIs manage storage credentials for a particular metastore.

func NewAccountStorageCredentials

func NewAccountStorageCredentials(client *client.DatabricksClient) *AccountStorageCredentialsAPI

func (*AccountStorageCredentialsAPI) Create

Create a storage credential.

Creates a new storage credential. The request object is specific to the cloud:

* **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials * **GcpServiceAcountKey** for GCP credentials.

The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore.

func (*AccountStorageCredentialsAPI) Delete

Delete a storage credential.

Deletes a storage credential from the metastore. The caller must be an owner of the storage credential.

func (*AccountStorageCredentialsAPI) DeleteByMetastoreId

func (a *AccountStorageCredentialsAPI) DeleteByMetastoreId(ctx context.Context, metastoreId string) error

Delete a storage credential.

Deletes a storage credential from the metastore. The caller must be an owner of the storage credential.

func (*AccountStorageCredentialsAPI) Get

Gets the named storage credential.

Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have a level of privilege on the storage credential.

func (*AccountStorageCredentialsAPI) GetByMetastoreId

func (a *AccountStorageCredentialsAPI) GetByMetastoreId(ctx context.Context, metastoreId string) (*StorageCredentialInfo, error)

Gets the named storage credential.

Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have a level of privilege on the storage credential.

func (*AccountStorageCredentialsAPI) Impl

Impl returns low-level AccountStorageCredentials API implementation

func (*AccountStorageCredentialsAPI) List

Get all storage credentials assigned to a metastore.

Gets a list of all storage credentials that have been assigned to given metastore.

func (*AccountStorageCredentialsAPI) ListByMetastoreId

func (a *AccountStorageCredentialsAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListStorageCredentialsResponse, error)

Get all storage credentials assigned to a metastore.

Gets a list of all storage credentials that have been assigned to given metastore.

func (*AccountStorageCredentialsAPI) Update

Updates a storage credential.

Updates a storage credential on the metastore. The caller must be the owner of the storage credential. If the caller is a metastore admin, only the __owner__ credential can be changed.

func (*AccountStorageCredentialsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type AccountStorageCredentialsService

type AccountStorageCredentialsService interface {

	// Create a storage credential.
	//
	// Creates a new storage credential. The request object is specific to the
	// cloud:
	//
	// * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for
	// Azure credentials * **GcpServiceAcountKey** for GCP credentials.
	//
	// The caller must be a metastore admin and have the
	// **CREATE_STORAGE_CREDENTIAL** privilege on the metastore.
	Create(ctx context.Context, request AccountsCreateStorageCredential) (*StorageCredentialInfo, error)

	// Delete a storage credential.
	//
	// Deletes a storage credential from the metastore. The caller must be an
	// owner of the storage credential.
	Delete(ctx context.Context, request DeleteAccountStorageCredentialRequest) error

	// Gets the named storage credential.
	//
	// Gets a storage credential from the metastore. The caller must be a
	// metastore admin, the owner of the storage credential, or have a level of
	// privilege on the storage credential.
	Get(ctx context.Context, request GetAccountStorageCredentialRequest) (*StorageCredentialInfo, error)

	// Get all storage credentials assigned to a metastore.
	//
	// Gets a list of all storage credentials that have been assigned to given
	// metastore.
	List(ctx context.Context, request ListAccountStorageCredentialsRequest) (*ListStorageCredentialsResponse, error)

	// Updates a storage credential.
	//
	// Updates a storage credential on the metastore. The caller must be the
	// owner of the storage credential. If the caller is a metastore admin, only
	// the __owner__ credential can be changed.
	Update(ctx context.Context, request AccountsUpdateStorageCredential) (*StorageCredentialInfo, error)
}

These APIs manage storage credentials for a particular metastore.

type AccountsCreateMetastore

type AccountsCreateMetastore struct {
	MetastoreInfo *CreateMetastore `json:"metastore_info,omitempty"`
}

type AccountsCreateMetastoreAssignment

type AccountsCreateMetastoreAssignment struct {
	MetastoreAssignment *CreateMetastoreAssignment `json:"metastore_assignment,omitempty"`
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
	// Workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

type AccountsCreateStorageCredential

type AccountsCreateStorageCredential struct {
	CredentialInfo *CreateStorageCredential `json:"credential_info,omitempty"`
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
}

type AccountsMetastoreAssignment

type AccountsMetastoreAssignment struct {
	MetastoreAssignment *MetastoreAssignment `json:"metastore_assignment,omitempty"`
}

type AccountsMetastoreInfo

type AccountsMetastoreInfo struct {
	MetastoreInfo *MetastoreInfo `json:"metastore_info,omitempty"`
}

type AccountsUpdateMetastore

type AccountsUpdateMetastore struct {
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`

	MetastoreInfo *UpdateMetastore `json:"metastore_info,omitempty"`
}

type AccountsUpdateMetastoreAssignment

type AccountsUpdateMetastoreAssignment struct {
	MetastoreAssignment *UpdateMetastoreAssignment `json:"metastore_assignment,omitempty"`
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
	// Workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

type AccountsUpdateStorageCredential

type AccountsUpdateStorageCredential struct {
	CredentialInfo *UpdateStorageCredential `json:"credential_info,omitempty"`
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
	// Name of the storage credential.
	Name string `json:"-" url:"-"`
}

type AwsIamRole

type AwsIamRole struct {
	// The external ID used in role assumption to prevent confused deputy
	// problem..
	ExternalId string `json:"external_id,omitempty"`
	// The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access.
	RoleArn string `json:"role_arn"`
	// The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks.
	// This is the identity that is going to assume the AWS IAM role.
	UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"`
}

type AzureManagedIdentity

type AzureManagedIdentity struct {
	// The Azure resource ID of the Azure Databricks Access Connector. Use the
	// format
	// /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}.
	AccessConnectorId string `json:"access_connector_id"`
	// The Databricks internal ID that represents this managed identity.
	CredentialId string `json:"credential_id,omitempty"`
	// The Azure resource ID of the managed identity. Use the format
	// /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}.
	// This is only available for user-assgined identities. For system-assigned
	// identities, the access_connector_id is used to identify the identity. If
	// this field is not provided, then we assume the AzureManagedIdentity is
	// for a system-assigned identity.
	ManagedIdentityId string `json:"managed_identity_id,omitempty"`
}

type AzureServicePrincipal

type AzureServicePrincipal struct {
	// The application ID of the application registration within the referenced
	// AAD tenant.
	ApplicationId string `json:"application_id"`
	// The client secret generated for the above app ID in AAD.
	ClientSecret string `json:"client_secret"`
	// The directory ID corresponding to the Azure Active Directory (AAD) tenant
	// of the application.
	DirectoryId string `json:"directory_id"`
}

type CatalogInfo

type CatalogInfo struct {
	// The type of the catalog.
	CatalogType CatalogType `json:"catalog_type,omitempty"`
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// The name of the connection to an external data source.
	ConnectionName string `json:"connection_name,omitempty"`
	// Time at which this catalog was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of catalog creator.
	CreatedBy string `json:"created_by,omitempty"`

	EffectiveAutoMaintenanceFlag *EffectiveAutoMaintenanceFlag `json:"effective_auto_maintenance_flag,omitempty"`
	// Whether auto maintenance should be enabled for this object and objects
	// under it.
	EnableAutoMaintenance EnableAutoMaintenance `json:"enable_auto_maintenance,omitempty"`
	// Whether the current securable is accessible from all workspaces or a
	// specific set of workspaces.
	IsolationMode IsolationMode `json:"isolation_mode,omitempty"`
	// Unique identifier of parent metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// Name of catalog.
	Name string `json:"name,omitempty"`
	// A map of key-value properties attached to the securable.
	Options map[string]string `json:"options,omitempty"`
	// Username of current owner of catalog.
	Owner string `json:"owner,omitempty"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
	// The name of delta sharing provider.
	//
	// A Delta Sharing catalog is a catalog that is based on a Delta share on a
	// remote sharing server.
	ProviderName string `json:"provider_name,omitempty"`
	// The name of the share under the share provider.
	ShareName string `json:"share_name,omitempty"`
	// Storage Location URL (full path) for managed tables within catalog.
	StorageLocation string `json:"storage_location,omitempty"`
	// Storage root URL for managed tables within catalog.
	StorageRoot string `json:"storage_root,omitempty"`
	// Time at which this catalog was last modified, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified catalog.
	UpdatedBy string `json:"updated_by,omitempty"`
}

type CatalogType

type CatalogType string

The type of the catalog.

const CatalogTypeDeltasharingCatalog CatalogType = `DELTASHARING_CATALOG`
const CatalogTypeManagedCatalog CatalogType = `MANAGED_CATALOG`
const CatalogTypeSystemCatalog CatalogType = `SYSTEM_CATALOG`

func (*CatalogType) Set

func (f *CatalogType) Set(v string) error

Set raw string value and validate it against allowed values

func (*CatalogType) String

func (f *CatalogType) String() string

String representation for fmt.Print

func (*CatalogType) Type

func (f *CatalogType) Type() string

Type always returns CatalogType to satisfy [pflag.Value] interface

type CatalogsAPI

type CatalogsAPI struct {
	// contains filtered or unexported fields
}

A catalog is the first layer of Unity Catalog’s three-level namespace. It’s used to organize your data assets. Users can see all catalogs on which they have been assigned the USE_CATALOG data permission.

In Unity Catalog, admins and data stewards manage users and their access to data centrally across all of the workspaces in a Databricks account. Users in different workspaces can share access to the same data, depending on privileges granted centrally in Unity Catalog.

func NewCatalogs

func NewCatalogs(client *client.DatabricksClient) *CatalogsAPI

func (*CatalogsAPI) Create

func (a *CatalogsAPI) Create(ctx context.Context, request CreateCatalog) (*CatalogInfo, error)

Create a catalog.

Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege.

Example (CatalogWorkspaceBindings)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  created.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

Example (Catalogs)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  created.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

Example (Schemas)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

newCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", newCatalog)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  newCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

Example (Shares)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

Example (Tables)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*CatalogsAPI) Delete

func (a *CatalogsAPI) Delete(ctx context.Context, request DeleteCatalogRequest) error

Delete a catalog.

Deletes the catalog that matches the supplied name. The caller must be a metastore admin or the owner of the catalog.

func (*CatalogsAPI) DeleteByName

func (a *CatalogsAPI) DeleteByName(ctx context.Context, name string) error

Delete a catalog.

Deletes the catalog that matches the supplied name. The caller must be a metastore admin or the owner of the catalog.

func (*CatalogsAPI) Get

func (a *CatalogsAPI) Get(ctx context.Context, request GetCatalogRequest) (*CatalogInfo, error)

Get a catalog.

Gets the specified catalog in a metastore. The caller must be a metastore admin, the owner of the catalog, or a user that has the **USE_CATALOG** privilege set for their account.

Example (Catalogs)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Catalogs.GetByName(ctx, created.Name)
if err != nil {
	panic(err)
}

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  created.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*CatalogsAPI) GetByName

func (a *CatalogsAPI) GetByName(ctx context.Context, name string) (*CatalogInfo, error)

Get a catalog.

Gets the specified catalog in a metastore. The caller must be a metastore admin, the owner of the catalog, or a user that has the **USE_CATALOG** privilege set for their account.

func (*CatalogsAPI) Impl

func (a *CatalogsAPI) Impl() CatalogsService

Impl returns low-level Catalogs API implementation

func (*CatalogsAPI) ListAll

func (a *CatalogsAPI) ListAll(ctx context.Context) ([]CatalogInfo, error)

List catalogs.

Gets an array of catalogs in the metastore. If the caller is the metastore admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the caller (or for which the caller has the **USE_CATALOG** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

Example (Catalogs)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Catalogs.ListAll(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*CatalogsAPI) Update

func (a *CatalogsAPI) Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error)

Update a catalog.

Updates the catalog that matches the supplied name. The caller must be either the owner of the catalog, or a metastore admin (when changing the owner field of the catalog).

Example (CatalogWorkspaceBindings)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Catalogs.Update(ctx, catalog.UpdateCatalog{
	Name:          created.Name,
	IsolationMode: catalog.IsolationModeIsolated,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  created.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

Example (Catalogs)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Catalogs.Update(ctx, catalog.UpdateCatalog{
	Name:    created.Name,
	Comment: "updated",
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  created.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*CatalogsAPI) WithImpl

func (a *CatalogsAPI) WithImpl(impl CatalogsService) *CatalogsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type CatalogsService

type CatalogsService interface {

	// Create a catalog.
	//
	// Creates a new catalog instance in the parent metastore if the caller is a
	// metastore admin or has the **CREATE_CATALOG** privilege.
	Create(ctx context.Context, request CreateCatalog) (*CatalogInfo, error)

	// Delete a catalog.
	//
	// Deletes the catalog that matches the supplied name. The caller must be a
	// metastore admin or the owner of the catalog.
	Delete(ctx context.Context, request DeleteCatalogRequest) error

	// Get a catalog.
	//
	// Gets the specified catalog in a metastore. The caller must be a metastore
	// admin, the owner of the catalog, or a user that has the **USE_CATALOG**
	// privilege set for their account.
	Get(ctx context.Context, request GetCatalogRequest) (*CatalogInfo, error)

	// List catalogs.
	//
	// Gets an array of catalogs in the metastore. If the caller is the
	// metastore admin, all catalogs will be retrieved. Otherwise, only catalogs
	// owned by the caller (or for which the caller has the **USE_CATALOG**
	// privilege) will be retrieved. There is no guarantee of a specific
	// ordering of the elements in the array.
	//
	// Use ListAll() to get all CatalogInfo instances
	List(ctx context.Context) (*ListCatalogsResponse, error)

	// Update a catalog.
	//
	// Updates the catalog that matches the supplied name. The caller must be
	// either the owner of the catalog, or a metastore admin (when changing the
	// owner field of the catalog).
	Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error)
}

A catalog is the first layer of Unity Catalog’s three-level namespace. It’s used to organize your data assets. Users can see all catalogs on which they have been assigned the USE_CATALOG data permission.

In Unity Catalog, admins and data stewards manage users and their access to data centrally across all of the workspaces in a Databricks account. Users in different workspaces can share access to the same data, depending on privileges granted centrally in Unity Catalog.

type ColumnInfo

type ColumnInfo struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`

	Mask *ColumnMask `json:"mask,omitempty"`
	// Name of Column.
	Name string `json:"name,omitempty"`
	// Whether field may be Null (default: true).
	Nullable bool `json:"nullable,omitempty"`
	// Partition index for column.
	PartitionIndex int `json:"partition_index,omitempty"`
	// Ordinal position of column (starting at position 0).
	Position int `json:"position,omitempty"`
	// Format of IntervalType.
	TypeIntervalType string `json:"type_interval_type,omitempty"`
	// Full data type specification, JSON-serialized.
	TypeJson string `json:"type_json,omitempty"`
	// Name of type (INT, STRUCT, MAP, etc.).
	TypeName ColumnTypeName `json:"type_name,omitempty"`
	// Digits of precision; required for DecimalTypes.
	TypePrecision int `json:"type_precision,omitempty"`
	// Digits to right of decimal; Required for DecimalTypes.
	TypeScale int `json:"type_scale,omitempty"`
	// Full data type specification as SQL/catalogString text.
	TypeText string `json:"type_text,omitempty"`
}

type ColumnMask

type ColumnMask struct {
	// The full name of the column mask SQL UDF.
	FunctionName string `json:"function_name,omitempty"`
	// The list of additional table columns to be passed as input to the column
	// mask function. The first arg of the mask function should be of the type
	// of the column being masked and the types of the rest of the args should
	// match the types of columns in 'using_column_names'.
	UsingColumnNames []string `json:"using_column_names,omitempty"`
}

type ColumnTypeName

type ColumnTypeName string

Name of type (INT, STRUCT, MAP, etc.).

const ColumnTypeNameArray ColumnTypeName = `ARRAY`
const ColumnTypeNameBinary ColumnTypeName = `BINARY`
const ColumnTypeNameBoolean ColumnTypeName = `BOOLEAN`
const ColumnTypeNameByte ColumnTypeName = `BYTE`
const ColumnTypeNameChar ColumnTypeName = `CHAR`
const ColumnTypeNameDate ColumnTypeName = `DATE`
const ColumnTypeNameDecimal ColumnTypeName = `DECIMAL`
const ColumnTypeNameDouble ColumnTypeName = `DOUBLE`
const ColumnTypeNameFloat ColumnTypeName = `FLOAT`
const ColumnTypeNameInt ColumnTypeName = `INT`
const ColumnTypeNameInterval ColumnTypeName = `INTERVAL`
const ColumnTypeNameLong ColumnTypeName = `LONG`
const ColumnTypeNameMap ColumnTypeName = `MAP`
const ColumnTypeNameNull ColumnTypeName = `NULL`
const ColumnTypeNameShort ColumnTypeName = `SHORT`
const ColumnTypeNameString ColumnTypeName = `STRING`
const ColumnTypeNameStruct ColumnTypeName = `STRUCT`
const ColumnTypeNameTableType ColumnTypeName = `TABLE_TYPE`
const ColumnTypeNameTimestamp ColumnTypeName = `TIMESTAMP`
const ColumnTypeNameTimestampNtz ColumnTypeName = `TIMESTAMP_NTZ`
const ColumnTypeNameUserDefinedType ColumnTypeName = `USER_DEFINED_TYPE`

func (*ColumnTypeName) Set

func (f *ColumnTypeName) Set(v string) error

Set raw string value and validate it against allowed values

func (*ColumnTypeName) String

func (f *ColumnTypeName) String() string

String representation for fmt.Print

func (*ColumnTypeName) Type

func (f *ColumnTypeName) Type() string

Type always returns ColumnTypeName to satisfy [pflag.Value] interface

type ConnectionInfo

type ConnectionInfo struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Unique identifier of the Connection.
	ConnectionId string `json:"connection_id,omitempty"`
	// The type of connection.
	ConnectionType ConnectionType `json:"connection_type,omitempty"`
	// Time at which this connection was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of connection creator.
	CreatedBy string `json:"created_by,omitempty"`
	// The type of credential.
	CredentialType CredentialType `json:"credential_type,omitempty"`
	// Full name of connection.
	FullName string `json:"full_name,omitempty"`
	// Unique identifier of parent metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// Name of the connection.
	Name string `json:"name,omitempty"`
	// A map of key-value properties attached to the securable.
	OptionsKvpairs map[string]string `json:"options_kvpairs,omitempty"`
	// Username of current owner of the connection.
	Owner string `json:"owner,omitempty"`
	// An object containing map of key-value properties attached to the
	// connection.
	PropertiesKvpairs map[string]string `json:"properties_kvpairs,omitempty"`
	// If the connection is read only.
	ReadOnly bool `json:"read_only,omitempty"`
	// Time at which this connection was updated, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified connection.
	UpdatedBy string `json:"updated_by,omitempty"`
	// URL of the remote data source, extracted from options_kvpairs.
	Url string `json:"url,omitempty"`
}

type ConnectionType

type ConnectionType string

The type of connection.

const ConnectionTypeDatabricks ConnectionType = `DATABRICKS`
const ConnectionTypeMysql ConnectionType = `MYSQL`
const ConnectionTypePostgresql ConnectionType = `POSTGRESQL`
const ConnectionTypeRedshift ConnectionType = `REDSHIFT`
const ConnectionTypeSnowflake ConnectionType = `SNOWFLAKE`
const ConnectionTypeSqldw ConnectionType = `SQLDW`
const ConnectionTypeSqlserver ConnectionType = `SQLSERVER`

func (*ConnectionType) Set

func (f *ConnectionType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ConnectionType) String

func (f *ConnectionType) String() string

String representation for fmt.Print

func (*ConnectionType) Type

func (f *ConnectionType) Type() string

Type always returns ConnectionType to satisfy [pflag.Value] interface

type ConnectionsAPI

type ConnectionsAPI struct {
	// contains filtered or unexported fields
}

Connections allow for creating a connection to an external data source.

A connection is an abstraction of an external data source that can be connected from Databricks Compute. Creating a connection object is the first step to managing external data sources within Unity Catalog, with the second step being creating a data object (catalog, schema, or table) using the connection. Data objects derived from a connection can be written to or read from similar to other Unity Catalog data objects based on cloud storage. Users may create different types of connections with each connection having a unique set of configuration options to support credential management and other settings.

func NewConnections

func NewConnections(client *client.DatabricksClient) *ConnectionsAPI

func (*ConnectionsAPI) ConnectionInfoNameToFullNameMap

func (a *ConnectionsAPI) ConnectionInfoNameToFullNameMap(ctx context.Context) (map[string]string, error)

ConnectionInfoNameToFullNameMap calls ConnectionsAPI.ListAll and creates a map of results with ConnectionInfo.Name as key and ConnectionInfo.FullName as value.

Returns an error if there's more than one ConnectionInfo with the same .Name.

Note: All ConnectionInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*ConnectionsAPI) Create

func (a *ConnectionsAPI) Create(ctx context.Context, request CreateConnection) (*ConnectionInfo, error)

Create a connection.

Creates a new connection

Creates a new connection to an external data source. It allows users to specify connection details and configurations for interaction with the external server.

func (*ConnectionsAPI) Delete

Delete a connection.

Deletes the connection that matches the supplied name.

func (*ConnectionsAPI) DeleteByNameArg

func (a *ConnectionsAPI) DeleteByNameArg(ctx context.Context, nameArg string) error

Delete a connection.

Deletes the connection that matches the supplied name.

func (*ConnectionsAPI) Get

Get a connection.

Gets a connection from it's name.

func (*ConnectionsAPI) GetByName

func (a *ConnectionsAPI) GetByName(ctx context.Context, name string) (*ConnectionInfo, error)

GetByName calls ConnectionsAPI.ConnectionInfoNameToFullNameMap and returns a single ConnectionInfo.

Returns an error if there's more than one ConnectionInfo with the same .Name.

Note: All ConnectionInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*ConnectionsAPI) GetByNameArg

func (a *ConnectionsAPI) GetByNameArg(ctx context.Context, nameArg string) (*ConnectionInfo, error)

Get a connection.

Gets a connection from it's name.

func (*ConnectionsAPI) Impl

Impl returns low-level Connections API implementation

func (*ConnectionsAPI) ListAll

func (a *ConnectionsAPI) ListAll(ctx context.Context) ([]ConnectionInfo, error)

List connections.

List all connections.

This method is generated by Databricks SDK Code Generator.

func (*ConnectionsAPI) Update

func (a *ConnectionsAPI) Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error)

Update a connection.

Updates the connection that matches the supplied name.

func (*ConnectionsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type ConnectionsService

type ConnectionsService interface {

	// Create a connection.
	//
	// Creates a new connection
	//
	// Creates a new connection to an external data source. It allows users to
	// specify connection details and configurations for interaction with the
	// external server.
	Create(ctx context.Context, request CreateConnection) (*ConnectionInfo, error)

	// Delete a connection.
	//
	// Deletes the connection that matches the supplied name.
	Delete(ctx context.Context, request DeleteConnectionRequest) error

	// Get a connection.
	//
	// Gets a connection from it's name.
	Get(ctx context.Context, request GetConnectionRequest) (*ConnectionInfo, error)

	// List connections.
	//
	// List all connections.
	//
	// Use ListAll() to get all ConnectionInfo instances
	List(ctx context.Context) (*ListConnectionsResponse, error)

	// Update a connection.
	//
	// Updates the connection that matches the supplied name.
	Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error)
}

Connections allow for creating a connection to an external data source.

A connection is an abstraction of an external data source that can be connected from Databricks Compute. Creating a connection object is the first step to managing external data sources within Unity Catalog, with the second step being creating a data object (catalog, schema, or table) using the connection. Data objects derived from a connection can be written to or read from similar to other Unity Catalog data objects based on cloud storage. Users may create different types of connections with each connection having a unique set of configuration options to support credential management and other settings.

type CreateCatalog

type CreateCatalog struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Name of catalog.
	Name string `json:"name"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
	// The name of delta sharing provider.
	//
	// A Delta Sharing catalog is a catalog that is based on a Delta share on a
	// remote sharing server.
	ProviderName string `json:"provider_name,omitempty"`
	// The name of the share under the share provider.
	ShareName string `json:"share_name,omitempty"`
	// Storage root URL for managed tables within catalog.
	StorageRoot string `json:"storage_root,omitempty"`
}

type CreateConnection

type CreateConnection struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// The type of connection.
	ConnectionType ConnectionType `json:"connection_type"`
	// Name of the connection.
	Name string `json:"name"`
	// A map of key-value properties attached to the securable.
	OptionsKvpairs map[string]string `json:"options_kvpairs"`
	// Username of current owner of the connection.
	Owner string `json:"owner,omitempty"`
	// An object containing map of key-value properties attached to the
	// connection.
	PropertiesKvpairs map[string]string `json:"properties_kvpairs,omitempty"`
	// If the connection is read only.
	ReadOnly bool `json:"read_only,omitempty"`
}

type CreateExternalLocation

type CreateExternalLocation struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Name of the storage credential used with this location.
	CredentialName string `json:"credential_name"`
	// Name of the external location.
	Name string `json:"name"`
	// Indicates whether the external location is read-only.
	ReadOnly bool `json:"read_only,omitempty"`
	// Skips validation of the storage credential associated with the external
	// location.
	SkipValidation bool `json:"skip_validation,omitempty"`
	// Path URL of the external location.
	Url string `json:"url"`
}

type CreateFunction

type CreateFunction struct {
	// Name of parent catalog.
	CatalogName string `json:"catalog_name"`
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Scalar function return data type.
	DataType ColumnTypeName `json:"data_type"`
	// External function language.
	ExternalLanguage string `json:"external_language,omitempty"`
	// External function name.
	ExternalName string `json:"external_name,omitempty"`
	// Pretty printed function data type.
	FullDataType string `json:"full_data_type"`
	// The array of __FunctionParameterInfo__ definitions of the function's
	// parameters.
	InputParams []FunctionParameterInfo `json:"input_params"`
	// Whether the function is deterministic.
	IsDeterministic bool `json:"is_deterministic"`
	// Function null call.
	IsNullCall bool `json:"is_null_call"`
	// Name of function, relative to parent schema.
	Name string `json:"name"`
	// Function parameter style. **S** is the value for SQL.
	ParameterStyle CreateFunctionParameterStyle `json:"parameter_style"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
	// Table function return parameters.
	ReturnParams []FunctionParameterInfo `json:"return_params"`
	// Function language. When **EXTERNAL** is used, the language of the routine
	// function should be specified in the __external_language__ field, and the
	// __return_params__ of the function cannot be used (as **TABLE** return
	// type is not supported), and the __sql_data_access__ field must be
	// **NO_SQL**.
	RoutineBody CreateFunctionRoutineBody `json:"routine_body"`
	// Function body.
	RoutineDefinition string `json:"routine_definition"`
	// Function dependencies.
	RoutineDependencies []Dependency `json:"routine_dependencies"`
	// Name of parent schema relative to its parent catalog.
	SchemaName string `json:"schema_name"`
	// Function security type.
	SecurityType CreateFunctionSecurityType `json:"security_type"`
	// Specific name of the function; Reserved for future use.
	SpecificName string `json:"specific_name"`
	// Function SQL data access.
	SqlDataAccess CreateFunctionSqlDataAccess `json:"sql_data_access"`
	// List of schemes whose objects can be referenced without qualification.
	SqlPath string `json:"sql_path,omitempty"`
}

type CreateFunctionParameterStyle

type CreateFunctionParameterStyle string

Function parameter style. **S** is the value for SQL.

const CreateFunctionParameterStyleS CreateFunctionParameterStyle = `S`

func (*CreateFunctionParameterStyle) Set

Set raw string value and validate it against allowed values

func (*CreateFunctionParameterStyle) String

String representation for fmt.Print

func (*CreateFunctionParameterStyle) Type

Type always returns CreateFunctionParameterStyle to satisfy [pflag.Value] interface

type CreateFunctionRoutineBody

type CreateFunctionRoutineBody string

Function language. When **EXTERNAL** is used, the language of the routine function should be specified in the __external_language__ field, and the __return_params__ of the function cannot be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be **NO_SQL**.

const CreateFunctionRoutineBodyExternal CreateFunctionRoutineBody = `EXTERNAL`
const CreateFunctionRoutineBodySql CreateFunctionRoutineBody = `SQL`

func (*CreateFunctionRoutineBody) Set

Set raw string value and validate it against allowed values

func (*CreateFunctionRoutineBody) String

func (f *CreateFunctionRoutineBody) String() string

String representation for fmt.Print

func (*CreateFunctionRoutineBody) Type

Type always returns CreateFunctionRoutineBody to satisfy [pflag.Value] interface

type CreateFunctionSecurityType

type CreateFunctionSecurityType string

Function security type.

const CreateFunctionSecurityTypeDefiner CreateFunctionSecurityType = `DEFINER`

func (*CreateFunctionSecurityType) Set

Set raw string value and validate it against allowed values

func (*CreateFunctionSecurityType) String

func (f *CreateFunctionSecurityType) String() string

String representation for fmt.Print

func (*CreateFunctionSecurityType) Type

Type always returns CreateFunctionSecurityType to satisfy [pflag.Value] interface

type CreateFunctionSqlDataAccess

type CreateFunctionSqlDataAccess string

Function SQL data access.

const CreateFunctionSqlDataAccessContainsSql CreateFunctionSqlDataAccess = `CONTAINS_SQL`
const CreateFunctionSqlDataAccessNoSql CreateFunctionSqlDataAccess = `NO_SQL`
const CreateFunctionSqlDataAccessReadsSqlData CreateFunctionSqlDataAccess = `READS_SQL_DATA`

func (*CreateFunctionSqlDataAccess) Set

Set raw string value and validate it against allowed values

func (*CreateFunctionSqlDataAccess) String

func (f *CreateFunctionSqlDataAccess) String() string

String representation for fmt.Print

func (*CreateFunctionSqlDataAccess) Type

Type always returns CreateFunctionSqlDataAccess to satisfy [pflag.Value] interface

type CreateMetastore

type CreateMetastore struct {
	// The user-specified name of the metastore.
	Name string `json:"name"`
	// Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). If
	// this field is omitted, the region of the workspace receiving the request
	// will be used.
	Region string `json:"region,omitempty"`
	// The storage root URL for metastore
	StorageRoot string `json:"storage_root"`
}

type CreateMetastoreAssignment

type CreateMetastoreAssignment struct {
	// The name of the default catalog in the metastore.
	DefaultCatalogName string `json:"default_catalog_name"`
	// The unique ID of the metastore.
	MetastoreId string `json:"metastore_id"`
	// A workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

type CreateMetastoreAssignmentsResponseItem

type CreateMetastoreAssignmentsResponseItem struct {
	// A human-readable message describing the outcome of the creation
	Message string `json:"message,omitempty"`

	MetastoreAssignment *MetastoreAssignment `json:"metastore_assignment,omitempty"`
	// The returned HTTP status code for an individual creation in the batch
	StatusCode int `json:"status_code,omitempty"`
}

type CreateSchema

type CreateSchema struct {
	// Name of parent catalog.
	CatalogName string `json:"catalog_name"`
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Name of schema, relative to parent catalog.
	Name string `json:"name"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
	// Storage root URL for managed tables within schema.
	StorageRoot string `json:"storage_root,omitempty"`
}

type CreateStorageCredential

type CreateStorageCredential struct {
	// The AWS IAM role configuration.
	AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"`
	// The Azure managed identity configuration.
	AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"`
	// The Azure service principal configuration.
	AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"`
	// Comment associated with the credential.
	Comment string `json:"comment,omitempty"`
	// The <Databricks> managed GCP service account configuration.
	DatabricksGcpServiceAccount any `json:"databricks_gcp_service_account,omitempty"`
	// The credential name. The name must be unique within the metastore.
	Name string `json:"name"`
	// Whether the storage credential is only usable for read operations.
	ReadOnly bool `json:"read_only,omitempty"`
	// Supplying true to this argument skips validation of the created
	// credential.
	SkipValidation bool `json:"skip_validation,omitempty"`
}

type CreateTableConstraint

type CreateTableConstraint struct {
	// A table constraint, as defined by *one* of the following fields being
	// set: __primary_key_constraint__, __foreign_key_constraint__,
	// __named_table_constraint__.
	Constraint TableConstraint `json:"constraint"`
	// The full name of the table referenced by the constraint.
	FullNameArg string `json:"full_name_arg"`
}

type CreateVolumeRequestContent

type CreateVolumeRequestContent struct {
	// The name of the catalog where the schema and the volume are
	CatalogName string `json:"catalog_name"`
	// The comment attached to the volume
	Comment string `json:"comment,omitempty"`
	// The name of the volume
	Name string `json:"name"`
	// The name of the schema where the volume is
	SchemaName string `json:"schema_name"`
	// The storage location on the cloud
	StorageLocation string `json:"storage_location,omitempty"`

	VolumeType VolumeType `json:"volume_type"`
}

type CredentialType

type CredentialType string

The type of credential.

const CredentialTypeUsernamePassword CredentialType = `USERNAME_PASSWORD`

func (*CredentialType) Set

func (f *CredentialType) Set(v string) error

Set raw string value and validate it against allowed values

func (*CredentialType) String

func (f *CredentialType) String() string

String representation for fmt.Print

func (*CredentialType) Type

func (f *CredentialType) Type() string

Type always returns CredentialType to satisfy [pflag.Value] interface

type CurrentWorkspaceBindings

type CurrentWorkspaceBindings struct {
	// A list of workspace IDs.
	Workspaces []int64 `json:"workspaces,omitempty"`
}

Currently assigned workspaces

type DataSourceFormat

type DataSourceFormat string

Data source format

const DataSourceFormatAvro DataSourceFormat = `AVRO`
const DataSourceFormatCsv DataSourceFormat = `CSV`
const DataSourceFormatDelta DataSourceFormat = `DELTA`
const DataSourceFormatDeltasharing DataSourceFormat = `DELTASHARING`
const DataSourceFormatJson DataSourceFormat = `JSON`
const DataSourceFormatOrc DataSourceFormat = `ORC`
const DataSourceFormatParquet DataSourceFormat = `PARQUET`
const DataSourceFormatText DataSourceFormat = `TEXT`
const DataSourceFormatUnityCatalog DataSourceFormat = `UNITY_CATALOG`

func (*DataSourceFormat) Set

func (f *DataSourceFormat) Set(v string) error

Set raw string value and validate it against allowed values

func (*DataSourceFormat) String

func (f *DataSourceFormat) String() string

String representation for fmt.Print

func (*DataSourceFormat) Type

func (f *DataSourceFormat) Type() string

Type always returns DataSourceFormat to satisfy [pflag.Value] interface

type DatabricksGcpServiceAccountResponse

type DatabricksGcpServiceAccountResponse struct {
	// The Databricks internal ID that represents this service account. This is
	// an output-only field.
	CredentialId string `json:"credential_id,omitempty"`
	// The email of the service account. This is an output-only field.
	Email string `json:"email,omitempty"`
}

type DeleteAccountMetastoreAssignmentRequest

type DeleteAccountMetastoreAssignmentRequest struct {
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
	// Workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

Delete a metastore assignment

type DeleteAccountMetastoreRequest

type DeleteAccountMetastoreRequest struct {
	// Force deletion even if the metastore is not empty. Default is false.
	Force bool `json:"-" url:"force,omitempty"`
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
}

Delete a metastore

type DeleteAccountStorageCredentialRequest

type DeleteAccountStorageCredentialRequest struct {
	// Force deletion even if the Storage Credential is not empty. Default is
	// false.
	Force bool `json:"-" url:"force,omitempty"`
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
	// Name of the storage credential.
	Name string `json:"-" url:"-"`
}

Delete a storage credential

type DeleteCatalogRequest

type DeleteCatalogRequest struct {
	// Force deletion even if the catalog is not empty.
	Force bool `json:"-" url:"force,omitempty"`
	// The name of the catalog.
	Name string `json:"-" url:"-"`
}

Delete a catalog

type DeleteConnectionRequest

type DeleteConnectionRequest struct {
	// The name of the connection to be deleted.
	NameArg string `json:"-" url:"-"`
}

Delete a connection

type DeleteExternalLocationRequest

type DeleteExternalLocationRequest struct {
	// Force deletion even if there are dependent external tables or mounts.
	Force bool `json:"-" url:"force,omitempty"`
	// Name of the external location.
	Name string `json:"-" url:"-"`
}

Delete an external location

type DeleteFunctionRequest

type DeleteFunctionRequest struct {
	// Force deletion even if the function is notempty.
	Force bool `json:"-" url:"force,omitempty"`
	// The fully-qualified name of the function (of the form
	// __catalog_name__.__schema_name__.__function__name__).
	Name string `json:"-" url:"-"`
}

Delete a function

type DeleteMetastoreRequest

type DeleteMetastoreRequest struct {
	// Force deletion even if the metastore is not empty. Default is false.
	Force bool `json:"-" url:"force,omitempty"`
	// Unique ID of the metastore.
	Id string `json:"-" url:"-"`
}

Delete a metastore

type DeleteSchemaRequest

type DeleteSchemaRequest struct {
	// Full name of the schema.
	FullName string `json:"-" url:"-"`
}

Delete a schema

type DeleteStorageCredentialRequest

type DeleteStorageCredentialRequest struct {
	// Force deletion even if there are dependent external locations or external
	// tables.
	Force bool `json:"-" url:"force,omitempty"`
	// Name of the storage credential.
	Name string `json:"-" url:"-"`
}

Delete a credential

type DeleteTableConstraintRequest

type DeleteTableConstraintRequest struct {
	// If true, try deleting all child constraints of the current constraint. If
	// false, reject this operation if the current constraint has any child
	// constraints.
	Cascade bool `json:"-" url:"cascade"`
	// The name of the constraint to delete.
	ConstraintName string `json:"-" url:"constraint_name"`
	// Full name of the table referenced by the constraint.
	FullName string `json:"-" url:"-"`
}

Delete a table constraint

type DeleteTableRequest

type DeleteTableRequest struct {
	// Full name of the table.
	FullName string `json:"-" url:"-"`
}

Delete a table

type DeleteVolumeRequest

type DeleteVolumeRequest struct {
	// The three-level (fully qualified) name of the volume
	FullNameArg string `json:"-" url:"-"`
}

Delete a Volume

type DeltaRuntimePropertiesKvPairs

type DeltaRuntimePropertiesKvPairs struct {
	// A map of key-value properties attached to the securable.
	DeltaRuntimeProperties map[string]string `json:"delta_runtime_properties"`
}

Properties pertaining to the current state of the delta table as given by the commit server. This does not contain **delta.*** (input) properties in __TableInfo.properties__.

type Dependency

type Dependency struct {
	// A function that is dependent on a SQL object.
	Function *FunctionDependency `json:"function,omitempty"`
	// A table that is dependent on a SQL object.
	Table *TableDependency `json:"table,omitempty"`
}

A dependency of a SQL object. Either the __table__ field or the __function__ field must be defined.

type DisableRequest

type DisableRequest struct {
	// The metastore ID under which the system schema lives.
	MetastoreId string `json:"-" url:"-"`
	// Full name of the system schema.
	SchemaName DisableSchemaName `json:"-" url:"-"`
}

Disable a system schema

type DisableSchemaName

type DisableSchemaName string
const DisableSchemaNameAccess DisableSchemaName = `access`
const DisableSchemaNameBilling DisableSchemaName = `billing`
const DisableSchemaNameLineage DisableSchemaName = `lineage`
const DisableSchemaNameOperationalData DisableSchemaName = `operational_data`

func (*DisableSchemaName) Set

func (f *DisableSchemaName) Set(v string) error

Set raw string value and validate it against allowed values

func (*DisableSchemaName) String

func (f *DisableSchemaName) String() string

String representation for fmt.Print

func (*DisableSchemaName) Type

func (f *DisableSchemaName) Type() string

Type always returns DisableSchemaName to satisfy [pflag.Value] interface

type EffectiveAutoMaintenanceFlag

type EffectiveAutoMaintenanceFlag struct {
	// The name of the object from which the flag was inherited. If there was no
	// inheritance, this field is left blank.
	InheritedFromName string `json:"inherited_from_name,omitempty"`
	// The type of the object from which the flag was inherited. If there was no
	// inheritance, this field is left blank.
	InheritedFromType EffectiveAutoMaintenanceFlagInheritedFromType `json:"inherited_from_type,omitempty"`
	// Whether auto maintenance should be enabled for this object and objects
	// under it.
	Value EnableAutoMaintenance `json:"value"`
}

type EffectiveAutoMaintenanceFlagInheritedFromType

type EffectiveAutoMaintenanceFlagInheritedFromType string

The type of the object from which the flag was inherited. If there was no inheritance, this field is left blank.

const EffectiveAutoMaintenanceFlagInheritedFromTypeCatalog EffectiveAutoMaintenanceFlagInheritedFromType = `CATALOG`
const EffectiveAutoMaintenanceFlagInheritedFromTypeSchema EffectiveAutoMaintenanceFlagInheritedFromType = `SCHEMA`

func (*EffectiveAutoMaintenanceFlagInheritedFromType) Set

Set raw string value and validate it against allowed values

func (*EffectiveAutoMaintenanceFlagInheritedFromType) String

String representation for fmt.Print

func (*EffectiveAutoMaintenanceFlagInheritedFromType) Type

Type always returns EffectiveAutoMaintenanceFlagInheritedFromType to satisfy [pflag.Value] interface

type EffectivePermissionsList

type EffectivePermissionsList struct {
	// The privileges conveyed to each principal (either directly or via
	// inheritance)
	PrivilegeAssignments []EffectivePrivilegeAssignment `json:"privilege_assignments,omitempty"`
}

type EffectivePrivilege

type EffectivePrivilege struct {
	// The full name of the object that conveys this privilege via inheritance.
	// This field is omitted when privilege is not inherited (it's assigned to
	// the securable itself).
	InheritedFromName string `json:"inherited_from_name,omitempty"`
	// The type of the object that conveys this privilege via inheritance. This
	// field is omitted when privilege is not inherited (it's assigned to the
	// securable itself).
	InheritedFromType SecurableType `json:"inherited_from_type,omitempty"`
	// The privilege assigned to the principal.
	Privilege Privilege `json:"privilege,omitempty"`
}

type EffectivePrivilegeAssignment

type EffectivePrivilegeAssignment struct {
	// The principal (user email address or group name).
	Principal string `json:"principal,omitempty"`
	// The privileges conveyed to the principal (either directly or via
	// inheritance).
	Privileges []EffectivePrivilege `json:"privileges,omitempty"`
}

type EnableAutoMaintenance

type EnableAutoMaintenance string

Whether auto maintenance should be enabled for this object and objects under it.

const EnableAutoMaintenanceDisable EnableAutoMaintenance = `DISABLE`
const EnableAutoMaintenanceEnable EnableAutoMaintenance = `ENABLE`
const EnableAutoMaintenanceInherit EnableAutoMaintenance = `INHERIT`

func (*EnableAutoMaintenance) Set

Set raw string value and validate it against allowed values

func (*EnableAutoMaintenance) String

func (f *EnableAutoMaintenance) String() string

String representation for fmt.Print

func (*EnableAutoMaintenance) Type

func (f *EnableAutoMaintenance) Type() string

Type always returns EnableAutoMaintenance to satisfy [pflag.Value] interface

type EnableRequest

type EnableRequest struct {
	// The metastore ID under which the system schema lives.
	MetastoreId string `json:"-" url:"-"`
	// Full name of the system schema.
	SchemaName EnableSchemaName `json:"-" url:"-"`
}

Enable a system schema

type EnableSchemaName

type EnableSchemaName string
const EnableSchemaNameAccess EnableSchemaName = `access`
const EnableSchemaNameBilling EnableSchemaName = `billing`
const EnableSchemaNameLineage EnableSchemaName = `lineage`
const EnableSchemaNameOperationalData EnableSchemaName = `operational_data`

func (*EnableSchemaName) Set

func (f *EnableSchemaName) Set(v string) error

Set raw string value and validate it against allowed values

func (*EnableSchemaName) String

func (f *EnableSchemaName) String() string

String representation for fmt.Print

func (*EnableSchemaName) Type

func (f *EnableSchemaName) Type() string

Type always returns EnableSchemaName to satisfy [pflag.Value] interface

type ExternalLocationInfo

type ExternalLocationInfo struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Time at which this external location was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of external location creator.
	CreatedBy string `json:"created_by,omitempty"`
	// Unique ID of the location's storage credential.
	CredentialId string `json:"credential_id,omitempty"`
	// Name of the storage credential used with this location.
	CredentialName string `json:"credential_name,omitempty"`
	// Unique identifier of metastore hosting the external location.
	MetastoreId string `json:"metastore_id,omitempty"`
	// Name of the external location.
	Name string `json:"name,omitempty"`
	// The owner of the external location.
	Owner string `json:"owner,omitempty"`
	// Indicates whether the external location is read-only.
	ReadOnly bool `json:"read_only,omitempty"`
	// Time at which external location this was last modified, in epoch
	// milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified the external location.
	UpdatedBy string `json:"updated_by,omitempty"`
	// Path URL of the external location.
	Url string `json:"url,omitempty"`
}

type ExternalLocationsAPI

type ExternalLocationsAPI struct {
	// contains filtered or unexported fields
}

An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path. Each external location is subject to Unity Catalog access-control policies that control which users and groups can access the credential. If a user does not have access to an external location in Unity Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant on the user’s behalf.

Databricks recommends using external locations rather than using storage credentials directly.

To create external locations, you must be a metastore admin or a user with the **CREATE_EXTERNAL_LOCATION** privilege.

func NewExternalLocations

func NewExternalLocations(client *client.DatabricksClient) *ExternalLocationsAPI

func (*ExternalLocationsAPI) Create

Create an external location.

Creates a new external location entry in the metastore. The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage credential.

Example (ExternalLocationsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

credential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", credential)

created, err := w.ExternalLocations.Create(ctx, catalog.CreateExternalLocation{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CredentialName: credential.Name,
	Url:            fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.StorageCredentials.DeleteByName(ctx, credential.Name)
if err != nil {
	panic(err)
}
err = w.ExternalLocations.DeleteByName(ctx, created.Name)
if err != nil {
	panic(err)
}
Output:

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

storageCredential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
	Comment: "created via SDK",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storageCredential)

externalLocation, err := w.ExternalLocations.Create(ctx, catalog.CreateExternalLocation{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CredentialName: storageCredential.Name,
	Comment:        "created via SDK",
	Url:            "s3://" + os.Getenv("TEST_BUCKET") + "/" + fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", externalLocation)
Output:

func (*ExternalLocationsAPI) Delete

Delete an external location.

Deletes the specified external location from the metastore. The caller must be the owner of the external location.

func (*ExternalLocationsAPI) DeleteByName

func (a *ExternalLocationsAPI) DeleteByName(ctx context.Context, name string) error

Delete an external location.

Deletes the specified external location from the metastore. The caller must be the owner of the external location.

func (*ExternalLocationsAPI) Get

Get an external location.

Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location.

Example (ExternalLocationsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

credential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", credential)

created, err := w.ExternalLocations.Create(ctx, catalog.CreateExternalLocation{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CredentialName: credential.Name,
	Url:            fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.ExternalLocations.GetByName(ctx, created.Name)
if err != nil {
	panic(err)
}

// cleanup

err = w.StorageCredentials.DeleteByName(ctx, credential.Name)
if err != nil {
	panic(err)
}
err = w.ExternalLocations.DeleteByName(ctx, created.Name)
if err != nil {
	panic(err)
}
Output:

func (*ExternalLocationsAPI) GetByName

Get an external location.

Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location.

func (*ExternalLocationsAPI) Impl

Impl returns low-level ExternalLocations API implementation

func (*ExternalLocationsAPI) ListAll

List external locations.

Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on the external location. There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

Example (ExternalLocationsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.ExternalLocations.ListAll(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*ExternalLocationsAPI) Update

Update an external location.

Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external location.

Example (ExternalLocationsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

credential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", credential)

created, err := w.ExternalLocations.Create(ctx, catalog.CreateExternalLocation{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CredentialName: credential.Name,
	Url:            fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.ExternalLocations.Update(ctx, catalog.UpdateExternalLocation{
	Name:           created.Name,
	CredentialName: credential.Name,
	Url:            fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}

// cleanup

err = w.StorageCredentials.DeleteByName(ctx, credential.Name)
if err != nil {
	panic(err)
}
err = w.ExternalLocations.DeleteByName(ctx, created.Name)
if err != nil {
	panic(err)
}
Output:

func (*ExternalLocationsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type ExternalLocationsService

type ExternalLocationsService interface {

	// Create an external location.
	//
	// Creates a new external location entry in the metastore. The caller must
	// be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege
	// on both the metastore and the associated storage credential.
	Create(ctx context.Context, request CreateExternalLocation) (*ExternalLocationInfo, error)

	// Delete an external location.
	//
	// Deletes the specified external location from the metastore. The caller
	// must be the owner of the external location.
	Delete(ctx context.Context, request DeleteExternalLocationRequest) error

	// Get an external location.
	//
	// Gets an external location from the metastore. The caller must be either a
	// metastore admin, the owner of the external location, or a user that has
	// some privilege on the external location.
	Get(ctx context.Context, request GetExternalLocationRequest) (*ExternalLocationInfo, error)

	// List external locations.
	//
	// Gets an array of external locations (__ExternalLocationInfo__ objects)
	// from the metastore. The caller must be a metastore admin, the owner of
	// the external location, or a user that has some privilege on the external
	// location. There is no guarantee of a specific ordering of the elements in
	// the array.
	//
	// Use ListAll() to get all ExternalLocationInfo instances
	List(ctx context.Context) (*ListExternalLocationsResponse, error)

	// Update an external location.
	//
	// Updates an external location in the metastore. The caller must be the
	// owner of the external location, or be a metastore admin. In the second
	// case, the admin can only update the name of the external location.
	Update(ctx context.Context, request UpdateExternalLocation) (*ExternalLocationInfo, error)
}

An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path. Each external location is subject to Unity Catalog access-control policies that control which users and groups can access the credential. If a user does not have access to an external location in Unity Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant on the user’s behalf.

Databricks recommends using external locations rather than using storage credentials directly.

To create external locations, you must be a metastore admin or a user with the **CREATE_EXTERNAL_LOCATION** privilege.

type ForeignKeyConstraint

type ForeignKeyConstraint struct {
	// Column names for this constraint.
	ChildColumns []string `json:"child_columns"`
	// The name of the constraint.
	Name string `json:"name"`
	// Column names for this constraint.
	ParentColumns []string `json:"parent_columns"`
	// The full name of the parent constraint.
	ParentTable string `json:"parent_table"`
}

type FunctionDependency

type FunctionDependency struct {
	// Full name of the dependent function, in the form of
	// __catalog_name__.__schema_name__.__function_name__.
	FunctionFullName string `json:"function_full_name"`
}

A function that is dependent on a SQL object.

type FunctionInfo

type FunctionInfo struct {
	// Name of parent catalog.
	CatalogName string `json:"catalog_name,omitempty"`
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Time at which this function was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of function creator.
	CreatedBy string `json:"created_by,omitempty"`
	// Scalar function return data type.
	DataType ColumnTypeName `json:"data_type,omitempty"`
	// External function language.
	ExternalLanguage string `json:"external_language,omitempty"`
	// External function name.
	ExternalName string `json:"external_name,omitempty"`
	// Pretty printed function data type.
	FullDataType string `json:"full_data_type,omitempty"`
	// Full name of function, in form of
	// __catalog_name__.__schema_name__.__function__name__
	FullName string `json:"full_name,omitempty"`
	// Id of Function, relative to parent schema.
	FunctionId string `json:"function_id,omitempty"`
	// The array of __FunctionParameterInfo__ definitions of the function's
	// parameters.
	InputParams []FunctionParameterInfo `json:"input_params,omitempty"`
	// Whether the function is deterministic.
	IsDeterministic bool `json:"is_deterministic,omitempty"`
	// Function null call.
	IsNullCall bool `json:"is_null_call,omitempty"`
	// Unique identifier of parent metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// Name of function, relative to parent schema.
	Name string `json:"name,omitempty"`
	// Username of current owner of function.
	Owner string `json:"owner,omitempty"`
	// Function parameter style. **S** is the value for SQL.
	ParameterStyle FunctionInfoParameterStyle `json:"parameter_style,omitempty"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
	// Table function return parameters.
	ReturnParams []FunctionParameterInfo `json:"return_params,omitempty"`
	// Function language. When **EXTERNAL** is used, the language of the routine
	// function should be specified in the __external_language__ field, and the
	// __return_params__ of the function cannot be used (as **TABLE** return
	// type is not supported), and the __sql_data_access__ field must be
	// **NO_SQL**.
	RoutineBody FunctionInfoRoutineBody `json:"routine_body,omitempty"`
	// Function body.
	RoutineDefinition string `json:"routine_definition,omitempty"`
	// Function dependencies.
	RoutineDependencies []Dependency `json:"routine_dependencies,omitempty"`
	// Name of parent schema relative to its parent catalog.
	SchemaName string `json:"schema_name,omitempty"`
	// Function security type.
	SecurityType FunctionInfoSecurityType `json:"security_type,omitempty"`
	// Specific name of the function; Reserved for future use.
	SpecificName string `json:"specific_name,omitempty"`
	// Function SQL data access.
	SqlDataAccess FunctionInfoSqlDataAccess `json:"sql_data_access,omitempty"`
	// List of schemes whose objects can be referenced without qualification.
	SqlPath string `json:"sql_path,omitempty"`
	// Time at which this function was created, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified function.
	UpdatedBy string `json:"updated_by,omitempty"`
}

type FunctionInfoParameterStyle

type FunctionInfoParameterStyle string

Function parameter style. **S** is the value for SQL.

const FunctionInfoParameterStyleS FunctionInfoParameterStyle = `S`

func (*FunctionInfoParameterStyle) Set

Set raw string value and validate it against allowed values

func (*FunctionInfoParameterStyle) String

func (f *FunctionInfoParameterStyle) String() string

String representation for fmt.Print

func (*FunctionInfoParameterStyle) Type

Type always returns FunctionInfoParameterStyle to satisfy [pflag.Value] interface

type FunctionInfoRoutineBody

type FunctionInfoRoutineBody string

Function language. When **EXTERNAL** is used, the language of the routine function should be specified in the __external_language__ field, and the __return_params__ of the function cannot be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be **NO_SQL**.

const FunctionInfoRoutineBodyExternal FunctionInfoRoutineBody = `EXTERNAL`
const FunctionInfoRoutineBodySql FunctionInfoRoutineBody = `SQL`

func (*FunctionInfoRoutineBody) Set

Set raw string value and validate it against allowed values

func (*FunctionInfoRoutineBody) String

func (f *FunctionInfoRoutineBody) String() string

String representation for fmt.Print

func (*FunctionInfoRoutineBody) Type

func (f *FunctionInfoRoutineBody) Type() string

Type always returns FunctionInfoRoutineBody to satisfy [pflag.Value] interface

type FunctionInfoSecurityType

type FunctionInfoSecurityType string

Function security type.

const FunctionInfoSecurityTypeDefiner FunctionInfoSecurityType = `DEFINER`

func (*FunctionInfoSecurityType) Set

Set raw string value and validate it against allowed values

func (*FunctionInfoSecurityType) String

func (f *FunctionInfoSecurityType) String() string

String representation for fmt.Print

func (*FunctionInfoSecurityType) Type

func (f *FunctionInfoSecurityType) Type() string

Type always returns FunctionInfoSecurityType to satisfy [pflag.Value] interface

type FunctionInfoSqlDataAccess

type FunctionInfoSqlDataAccess string

Function SQL data access.

const FunctionInfoSqlDataAccessContainsSql FunctionInfoSqlDataAccess = `CONTAINS_SQL`
const FunctionInfoSqlDataAccessNoSql FunctionInfoSqlDataAccess = `NO_SQL`
const FunctionInfoSqlDataAccessReadsSqlData FunctionInfoSqlDataAccess = `READS_SQL_DATA`

func (*FunctionInfoSqlDataAccess) Set

Set raw string value and validate it against allowed values

func (*FunctionInfoSqlDataAccess) String

func (f *FunctionInfoSqlDataAccess) String() string

String representation for fmt.Print

func (*FunctionInfoSqlDataAccess) Type

Type always returns FunctionInfoSqlDataAccess to satisfy [pflag.Value] interface

type FunctionParameterInfo

type FunctionParameterInfo struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Name of parameter.
	Name string `json:"name"`
	// Default value of the parameter.
	ParameterDefault string `json:"parameter_default,omitempty"`
	// The mode of the function parameter.
	ParameterMode FunctionParameterMode `json:"parameter_mode,omitempty"`
	// The type of function parameter.
	ParameterType FunctionParameterType `json:"parameter_type,omitempty"`
	// Ordinal position of column (starting at position 0).
	Position int `json:"position"`
	// Format of IntervalType.
	TypeIntervalType string `json:"type_interval_type,omitempty"`
	// Full data type spec, JSON-serialized.
	TypeJson string `json:"type_json,omitempty"`
	// Name of type (INT, STRUCT, MAP, etc.).
	TypeName ColumnTypeName `json:"type_name"`
	// Digits of precision; required on Create for DecimalTypes.
	TypePrecision int `json:"type_precision,omitempty"`
	// Digits to right of decimal; Required on Create for DecimalTypes.
	TypeScale int `json:"type_scale,omitempty"`
	// Full data type spec, SQL/catalogString text.
	TypeText string `json:"type_text"`
}

type FunctionParameterMode

type FunctionParameterMode string

The mode of the function parameter.

const FunctionParameterModeIn FunctionParameterMode = `IN`

func (*FunctionParameterMode) Set

Set raw string value and validate it against allowed values

func (*FunctionParameterMode) String

func (f *FunctionParameterMode) String() string

String representation for fmt.Print

func (*FunctionParameterMode) Type

func (f *FunctionParameterMode) Type() string

Type always returns FunctionParameterMode to satisfy [pflag.Value] interface

type FunctionParameterType

type FunctionParameterType string

The type of function parameter.

const FunctionParameterTypeColumn FunctionParameterType = `COLUMN`
const FunctionParameterTypeParam FunctionParameterType = `PARAM`

func (*FunctionParameterType) Set

Set raw string value and validate it against allowed values

func (*FunctionParameterType) String

func (f *FunctionParameterType) String() string

String representation for fmt.Print

func (*FunctionParameterType) Type

func (f *FunctionParameterType) Type() string

Type always returns FunctionParameterType to satisfy [pflag.Value] interface

type FunctionsAPI

type FunctionsAPI struct {
	// contains filtered or unexported fields
}

Functions implement User-Defined Functions (UDFs) in Unity Catalog.

The function implementation can be any SQL expression or Query, and it can be invoked wherever a table reference is allowed in a query. In Unity Catalog, a function resides at the same level as a table, so it can be referenced with the form __catalog_name__.__schema_name__.__function_name__.

func NewFunctions

func NewFunctions(client *client.DatabricksClient) *FunctionsAPI

func (*FunctionsAPI) Create

func (a *FunctionsAPI) Create(ctx context.Context, request CreateFunction) (*FunctionInfo, error)

Create a function.

Creates a new function

The user must have the following permissions in order for the function to be created: - **USE_CATALOG** on the function's parent catalog - **USE_SCHEMA** and **CREATE_FUNCTION** on the function's parent schema

func (*FunctionsAPI) Delete

func (a *FunctionsAPI) Delete(ctx context.Context, request DeleteFunctionRequest) error

Delete a function.

Deletes the function that matches the supplied name. For the deletion to succeed, the user must satisfy one of the following conditions: - Is the owner of the function's parent catalog - Is the owner of the function's parent schema and have the **USE_CATALOG** privilege on its parent catalog - Is the owner of the function itself and have both the **USE_CATALOG** privilege on its parent catalog and the **USE_SCHEMA** privilege on its parent schema

func (*FunctionsAPI) DeleteByName

func (a *FunctionsAPI) DeleteByName(ctx context.Context, name string) error

Delete a function.

Deletes the function that matches the supplied name. For the deletion to succeed, the user must satisfy one of the following conditions: - Is the owner of the function's parent catalog - Is the owner of the function's parent schema and have the **USE_CATALOG** privilege on its parent catalog - Is the owner of the function itself and have both the **USE_CATALOG** privilege on its parent catalog and the **USE_SCHEMA** privilege on its parent schema

func (*FunctionsAPI) FunctionInfoNameToFullNameMap

func (a *FunctionsAPI) FunctionInfoNameToFullNameMap(ctx context.Context, request ListFunctionsRequest) (map[string]string, error)

FunctionInfoNameToFullNameMap calls FunctionsAPI.ListAll and creates a map of results with FunctionInfo.Name as key and FunctionInfo.FullName as value.

Returns an error if there's more than one FunctionInfo with the same .Name.

Note: All FunctionInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*FunctionsAPI) Get

Get a function.

Gets a function from within a parent catalog and schema. For the fetch to succeed, the user must satisfy one of the following requirements: - Is a metastore admin - Is an owner of the function's parent catalog - Have the **USE_CATALOG** privilege on the function's parent catalog and be the owner of the function - Have the **USE_CATALOG** privilege on the function's parent catalog, the **USE_SCHEMA** privilege on the function's parent schema, and the **EXECUTE** privilege on the function itself

func (*FunctionsAPI) GetByName

func (a *FunctionsAPI) GetByName(ctx context.Context, name string) (*FunctionInfo, error)

Get a function.

Gets a function from within a parent catalog and schema. For the fetch to succeed, the user must satisfy one of the following requirements: - Is a metastore admin - Is an owner of the function's parent catalog - Have the **USE_CATALOG** privilege on the function's parent catalog and be the owner of the function - Have the **USE_CATALOG** privilege on the function's parent catalog, the **USE_SCHEMA** privilege on the function's parent schema, and the **EXECUTE** privilege on the function itself

func (*FunctionsAPI) Impl

func (a *FunctionsAPI) Impl() FunctionsService

Impl returns low-level Functions API implementation

func (*FunctionsAPI) ListAll

func (a *FunctionsAPI) ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error)

List functions.

List functions within the specified parent catalog and schema. If the user is a metastore admin, all functions are returned in the output list. Otherwise, the user must have the **USE_CATALOG** privilege on the catalog and the **USE_SCHEMA** privilege on the schema, and the output list contains only functions for which either the user has the **EXECUTE** privilege or the user is the owner. There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

func (*FunctionsAPI) Update

func (a *FunctionsAPI) Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error)

Update a function.

Updates the function that matches the supplied name. Only the owner of the function can be updated. If the user is not a metastore admin, the user must be a member of the group that is the new function owner. - Is a metastore admin - Is the owner of the function's parent catalog - Is the owner of the function's parent schema and has the **USE_CATALOG** privilege on its parent catalog - Is the owner of the function itself and has the **USE_CATALOG** privilege on its parent catalog as well as the **USE_SCHEMA** privilege on the function's parent schema.

func (*FunctionsAPI) WithImpl

func (a *FunctionsAPI) WithImpl(impl FunctionsService) *FunctionsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type FunctionsService

type FunctionsService interface {

	// Create a function.
	//
	// Creates a new function
	//
	// The user must have the following permissions in order for the function to
	// be created: - **USE_CATALOG** on the function's parent catalog -
	// **USE_SCHEMA** and **CREATE_FUNCTION** on the function's parent schema
	Create(ctx context.Context, request CreateFunction) (*FunctionInfo, error)

	// Delete a function.
	//
	// Deletes the function that matches the supplied name. For the deletion to
	// succeed, the user must satisfy one of the following conditions: - Is the
	// owner of the function's parent catalog - Is the owner of the function's
	// parent schema and have the **USE_CATALOG** privilege on its parent
	// catalog - Is the owner of the function itself and have both the
	// **USE_CATALOG** privilege on its parent catalog and the **USE_SCHEMA**
	// privilege on its parent schema
	Delete(ctx context.Context, request DeleteFunctionRequest) error

	// Get a function.
	//
	// Gets a function from within a parent catalog and schema. For the fetch to
	// succeed, the user must satisfy one of the following requirements: - Is a
	// metastore admin - Is an owner of the function's parent catalog - Have the
	// **USE_CATALOG** privilege on the function's parent catalog and be the
	// owner of the function - Have the **USE_CATALOG** privilege on the
	// function's parent catalog, the **USE_SCHEMA** privilege on the function's
	// parent schema, and the **EXECUTE** privilege on the function itself
	Get(ctx context.Context, request GetFunctionRequest) (*FunctionInfo, error)

	// List functions.
	//
	// List functions within the specified parent catalog and schema. If the
	// user is a metastore admin, all functions are returned in the output list.
	// Otherwise, the user must have the **USE_CATALOG** privilege on the
	// catalog and the **USE_SCHEMA** privilege on the schema, and the output
	// list contains only functions for which either the user has the
	// **EXECUTE** privilege or the user is the owner. There is no guarantee of
	// a specific ordering of the elements in the array.
	//
	// Use ListAll() to get all FunctionInfo instances
	List(ctx context.Context, request ListFunctionsRequest) (*ListFunctionsResponse, error)

	// Update a function.
	//
	// Updates the function that matches the supplied name. Only the owner of
	// the function can be updated. If the user is not a metastore admin, the
	// user must be a member of the group that is the new function owner. - Is a
	// metastore admin - Is the owner of the function's parent catalog - Is the
	// owner of the function's parent schema and has the **USE_CATALOG**
	// privilege on its parent catalog - Is the owner of the function itself and
	// has the **USE_CATALOG** privilege on its parent catalog as well as the
	// **USE_SCHEMA** privilege on the function's parent schema.
	Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error)
}

Functions implement User-Defined Functions (UDFs) in Unity Catalog.

The function implementation can be any SQL expression or Query, and it can be invoked wherever a table reference is allowed in a query. In Unity Catalog, a function resides at the same level as a table, so it can be referenced with the form __catalog_name__.__schema_name__.__function_name__.

type GetAccountMetastoreAssignmentRequest

type GetAccountMetastoreAssignmentRequest struct {
	// Workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

Gets the metastore assignment for a workspace

type GetAccountMetastoreRequest

type GetAccountMetastoreRequest struct {
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
}

Get a metastore

type GetAccountStorageCredentialRequest

type GetAccountStorageCredentialRequest struct {
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
	// Name of the storage credential.
	Name string `json:"-" url:"-"`
}

Gets the named storage credential

type GetCatalogRequest

type GetCatalogRequest struct {
	// The name of the catalog.
	Name string `json:"-" url:"-"`
}

Get a catalog

type GetConnectionRequest

type GetConnectionRequest struct {
	// Name of the connection.
	NameArg string `json:"-" url:"-"`
}

Get a connection

type GetEffectiveRequest

type GetEffectiveRequest struct {
	// Full name of securable.
	FullName string `json:"-" url:"-"`
	// If provided, only the effective permissions for the specified principal
	// (user or group) are returned.
	Principal string `json:"-" url:"principal,omitempty"`
	// Type of securable.
	SecurableType SecurableType `json:"-" url:"-"`
}

Get effective permissions

type GetExternalLocationRequest

type GetExternalLocationRequest struct {
	// Name of the external location.
	Name string `json:"-" url:"-"`
}

Get an external location

type GetFunctionRequest

type GetFunctionRequest struct {
	// The fully-qualified name of the function (of the form
	// __catalog_name__.__schema_name__.__function__name__).
	Name string `json:"-" url:"-"`
}

Get a function

type GetGrantRequest

type GetGrantRequest struct {
	// Full name of securable.
	FullName string `json:"-" url:"-"`
	// If provided, only the permissions for the specified principal (user or
	// group) are returned.
	Principal string `json:"-" url:"principal,omitempty"`
	// Type of securable.
	SecurableType SecurableType `json:"-" url:"-"`
}

Get permissions

type GetMetastoreRequest

type GetMetastoreRequest struct {
	// Unique ID of the metastore.
	Id string `json:"-" url:"-"`
}

Get a metastore

type GetMetastoreSummaryResponse

type GetMetastoreSummaryResponse struct {
	// Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`).
	Cloud string `json:"cloud,omitempty"`
	// Time at which this metastore was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of metastore creator.
	CreatedBy string `json:"created_by,omitempty"`
	// Unique identifier of the metastore's (Default) Data Access Configuration.
	DefaultDataAccessConfigId string `json:"default_data_access_config_id,omitempty"`
	// The organization name of a Delta Sharing entity, to be used in
	// Databricks-to-Databricks Delta Sharing as the official name.
	DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
	// The lifetime of delta sharing recipient token in seconds.
	DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
	// The scope of Delta Sharing enabled for the metastore.
	DeltaSharingScope GetMetastoreSummaryResponseDeltaSharingScope `json:"delta_sharing_scope,omitempty"`
	// Globally unique metastore ID across clouds and regions, of the form
	// `cloud:region:metastore_id`.
	GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
	// Unique identifier of metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// The user-specified name of the metastore.
	Name string `json:"name,omitempty"`
	// The owner of the metastore.
	Owner string `json:"owner,omitempty"`
	// Privilege model version of the metastore, of the form `major.minor`
	// (e.g., `1.0`).
	PrivilegeModelVersion string `json:"privilege_model_version,omitempty"`
	// Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).
	Region string `json:"region,omitempty"`
	// The storage root URL for metastore
	StorageRoot string `json:"storage_root,omitempty"`
	// UUID of storage credential to access the metastore storage_root.
	StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"`
	// Name of the storage credential to access the metastore storage_root.
	StorageRootCredentialName string `json:"storage_root_credential_name,omitempty"`
	// Time at which the metastore was last modified, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified the metastore.
	UpdatedBy string `json:"updated_by,omitempty"`
}

type GetMetastoreSummaryResponseDeltaSharingScope

type GetMetastoreSummaryResponseDeltaSharingScope string

The scope of Delta Sharing enabled for the metastore.

const GetMetastoreSummaryResponseDeltaSharingScopeInternal GetMetastoreSummaryResponseDeltaSharingScope = `INTERNAL`
const GetMetastoreSummaryResponseDeltaSharingScopeInternalAndExternal GetMetastoreSummaryResponseDeltaSharingScope = `INTERNAL_AND_EXTERNAL`

func (*GetMetastoreSummaryResponseDeltaSharingScope) Set

Set raw string value and validate it against allowed values

func (*GetMetastoreSummaryResponseDeltaSharingScope) String

String representation for fmt.Print

func (*GetMetastoreSummaryResponseDeltaSharingScope) Type

Type always returns GetMetastoreSummaryResponseDeltaSharingScope to satisfy [pflag.Value] interface

type GetSchemaRequest

type GetSchemaRequest struct {
	// Full name of the schema.
	FullName string `json:"-" url:"-"`
}

Get a schema

type GetStorageCredentialRequest

type GetStorageCredentialRequest struct {
	// Name of the storage credential.
	Name string `json:"-" url:"-"`
}

Get a credential

type GetTableRequest

type GetTableRequest struct {
	// Full name of the table.
	FullName string `json:"-" url:"-"`
	// Whether delta metadata should be included in the response.
	IncludeDeltaMetadata bool `json:"-" url:"include_delta_metadata,omitempty"`
}

Get a table

type GetWorkspaceBindingRequest

type GetWorkspaceBindingRequest struct {
	// The name of the catalog.
	Name string `json:"-" url:"-"`
}

Get catalog workspace bindings

type GrantsAPI

type GrantsAPI struct {
	// contains filtered or unexported fields
}

In Unity Catalog, data is secure by default. Initially, users have no access to data in a metastore. Access can be granted by either a metastore admin, the owner of an object, or the owner of the catalog or schema that contains the object. Securable objects in Unity Catalog are hierarchical and privileges are inherited downward.

Securable objects in Unity Catalog are hierarchical and privileges are inherited downward. This means that granting a privilege on the catalog automatically grants the privilege to all current and future objects within the catalog. Similarly, privileges granted on a schema are inherited by all current and future objects within that schema.

func NewGrants

func NewGrants(client *client.DatabricksClient) *GrantsAPI

func (*GrantsAPI) Get

func (a *GrantsAPI) Get(ctx context.Context, request GetGrantRequest) (*PermissionsList, error)

Get permissions.

Gets the permissions for a securable.

func (*GrantsAPI) GetBySecurableTypeAndFullName

func (a *GrantsAPI) GetBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*PermissionsList, error)

Get permissions.

Gets the permissions for a securable.

func (*GrantsAPI) GetEffective

func (a *GrantsAPI) GetEffective(ctx context.Context, request GetEffectiveRequest) (*EffectivePermissionsList, error)

Get effective permissions.

Gets the effective permissions for a securable.

Example (Tables)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

tableName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

_, err = w.StatementExecution.ExecuteAndWait(ctx, sql.ExecuteStatementRequest{
	WarehouseId: os.Getenv("TEST_DEFAULT_WAREHOUSE_ID"),
	Catalog:     createdCatalog.Name,
	Schema:      createdSchema.Name,
	Statement:   fmt.Sprintf("CREATE TABLE %s AS SELECT 2+2 as four", tableName),
})
if err != nil {
	panic(err)
}

tableFullName := fmt.Sprintf("%s.%s.%s", createdCatalog.Name, createdSchema.Name, tableName)

createdTable, err := w.Tables.GetByFullName(ctx, tableFullName)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdTable)

grants, err := w.Grants.GetEffectiveBySecurableTypeAndFullName(ctx, catalog.SecurableTypeTable, createdTable.FullName)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", grants)

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Tables.DeleteByFullName(ctx, tableFullName)
if err != nil {
	panic(err)
}
Output:

func (*GrantsAPI) GetEffectiveBySecurableTypeAndFullName

func (a *GrantsAPI) GetEffectiveBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*EffectivePermissionsList, error)

Get effective permissions.

Gets the effective permissions for a securable.

func (*GrantsAPI) Impl

func (a *GrantsAPI) Impl() GrantsService

Impl returns low-level Grants API implementation

func (*GrantsAPI) Update

func (a *GrantsAPI) Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error)

Update permissions.

Updates the permissions for a securable.

Example (Tables)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

tableName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

_, err = w.StatementExecution.ExecuteAndWait(ctx, sql.ExecuteStatementRequest{
	WarehouseId: os.Getenv("TEST_DEFAULT_WAREHOUSE_ID"),
	Catalog:     createdCatalog.Name,
	Schema:      createdSchema.Name,
	Statement:   fmt.Sprintf("CREATE TABLE %s AS SELECT 2+2 as four", tableName),
})
if err != nil {
	panic(err)
}

tableFullName := fmt.Sprintf("%s.%s.%s", createdCatalog.Name, createdSchema.Name, tableName)

accountLevelGroupName := os.Getenv("TEST_DATA_ENG_GROUP")

createdTable, err := w.Tables.GetByFullName(ctx, tableFullName)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdTable)

x, err := w.Grants.Update(ctx, catalog.UpdatePermissions{
	FullName:      createdTable.FullName,
	SecurableType: catalog.SecurableTypeTable,
	Changes: []catalog.PermissionsChange{catalog.PermissionsChange{
		Add:       []catalog.Privilege{catalog.PrivilegeModify, catalog.PrivilegeSelect},
		Principal: accountLevelGroupName,
	}},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", x)

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Tables.DeleteByFullName(ctx, tableFullName)
if err != nil {
	panic(err)
}
Output:

func (*GrantsAPI) WithImpl

func (a *GrantsAPI) WithImpl(impl GrantsService) *GrantsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type GrantsService

type GrantsService interface {

	// Get permissions.
	//
	// Gets the permissions for a securable.
	Get(ctx context.Context, request GetGrantRequest) (*PermissionsList, error)

	// Get effective permissions.
	//
	// Gets the effective permissions for a securable.
	GetEffective(ctx context.Context, request GetEffectiveRequest) (*EffectivePermissionsList, error)

	// Update permissions.
	//
	// Updates the permissions for a securable.
	Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error)
}

In Unity Catalog, data is secure by default. Initially, users have no access to data in a metastore. Access can be granted by either a metastore admin, the owner of an object, or the owner of the catalog or schema that contains the object. Securable objects in Unity Catalog are hierarchical and privileges are inherited downward.

Securable objects in Unity Catalog are hierarchical and privileges are inherited downward. This means that granting a privilege on the catalog automatically grants the privilege to all current and future objects within the catalog. Similarly, privileges granted on a schema are inherited by all current and future objects within that schema.

type IsolationMode

type IsolationMode string

Whether the current securable is accessible from all workspaces or a specific set of workspaces.

const IsolationModeIsolated IsolationMode = `ISOLATED`
const IsolationModeOpen IsolationMode = `OPEN`

func (*IsolationMode) Set

func (f *IsolationMode) Set(v string) error

Set raw string value and validate it against allowed values

func (*IsolationMode) String

func (f *IsolationMode) String() string

String representation for fmt.Print

func (*IsolationMode) Type

func (f *IsolationMode) Type() string

Type always returns IsolationMode to satisfy [pflag.Value] interface

type ListAccountMetastoreAssignmentsRequest

type ListAccountMetastoreAssignmentsRequest struct {
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
}

Get all workspaces assigned to a metastore

type ListAccountStorageCredentialsRequest

type ListAccountStorageCredentialsRequest struct {
	// Unity Catalog metastore ID
	MetastoreId string `json:"-" url:"-"`
}

Get all storage credentials assigned to a metastore

type ListCatalogsResponse

type ListCatalogsResponse struct {
	// An array of catalog information objects.
	Catalogs []CatalogInfo `json:"catalogs,omitempty"`
}

type ListConnectionsResponse

type ListConnectionsResponse struct {
	// An array of connection information objects.
	Connections []ConnectionInfo `json:"connections,omitempty"`
}

type ListExternalLocationsResponse

type ListExternalLocationsResponse struct {
	// An array of external locations.
	ExternalLocations []ExternalLocationInfo `json:"external_locations,omitempty"`
}

type ListFunctionsRequest

type ListFunctionsRequest struct {
	// Name of parent catalog for functions of interest.
	CatalogName string `json:"-" url:"catalog_name"`
	// Parent schema of functions.
	SchemaName string `json:"-" url:"schema_name"`
}

List functions

type ListFunctionsResponse

type ListFunctionsResponse struct {
	// An array of function information objects.
	Functions []FunctionInfo `json:"functions,omitempty"`
}

type ListMetastoresResponse

type ListMetastoresResponse struct {
	// An array of metastore information objects.
	Metastores []MetastoreInfo `json:"metastores,omitempty"`
}

type ListSchemasRequest

type ListSchemasRequest struct {
	// Parent catalog for schemas of interest.
	CatalogName string `json:"-" url:"catalog_name"`
}

List schemas

type ListSchemasResponse

type ListSchemasResponse struct {
	// An array of schema information objects.
	Schemas []SchemaInfo `json:"schemas,omitempty"`
}

type ListStorageCredentialsResponse

type ListStorageCredentialsResponse struct {
	StorageCredentials []StorageCredentialInfo `json:"storage_credentials,omitempty"`
}

type ListSummariesRequest

type ListSummariesRequest struct {
	// Name of parent catalog for tables of interest.
	CatalogName string `json:"-" url:"catalog_name"`
	// Maximum number of tables to return (page length). Defaults to 10000.
	MaxResults int `json:"-" url:"max_results,omitempty"`
	// Opaque token to send for the next page of results (pagination).
	PageToken string `json:"-" url:"page_token,omitempty"`
	// A sql LIKE pattern (% and _) for schema names. All schemas will be
	// returned if not set or empty.
	SchemaNamePattern string `json:"-" url:"schema_name_pattern,omitempty"`
	// A sql LIKE pattern (% and _) for table names. All tables will be returned
	// if not set or empty.
	TableNamePattern string `json:"-" url:"table_name_pattern,omitempty"`
}

List table summaries

type ListSystemSchemasRequest

type ListSystemSchemasRequest struct {
	// The ID for the metastore in which the system schema resides.
	MetastoreId string `json:"-" url:"-"`
}

List system schemas

type ListSystemSchemasResponse

type ListSystemSchemasResponse struct {
	// An array of system schema information objects.
	Schemas []SystemSchemaInfo `json:"schemas,omitempty"`
}

type ListTableSummariesResponse

type ListTableSummariesResponse struct {
	// Opaque token for pagination. Omitted if there are no more results.
	NextPageToken string `json:"next_page_token,omitempty"`
	// List of table summaries.
	Tables []TableSummary `json:"tables,omitempty"`
}

type ListTablesRequest

type ListTablesRequest struct {
	// Name of parent catalog for tables of interest.
	CatalogName string `json:"-" url:"catalog_name"`
	// Whether delta metadata should be included in the response.
	IncludeDeltaMetadata bool `json:"-" url:"include_delta_metadata,omitempty"`
	// Maximum number of tables to return (page length). If not set, all
	// accessible tables in the schema are returned. If set to:
	//
	// * greater than 0, page length is the minimum of this value and a server
	// configured value. * equal to 0, page length is set to a server configured
	// value. * lesser than 0, invalid parameter error.
	MaxResults int `json:"-" url:"max_results,omitempty"`
	// Opaque token to send for the next page of results (pagination).
	PageToken string `json:"-" url:"page_token,omitempty"`
	// Parent schema of tables.
	SchemaName string `json:"-" url:"schema_name"`
}

List tables

type ListTablesResponse

type ListTablesResponse struct {
	// Opaque token for pagination. Omitted if there are no more results.
	// page_token should be set to this value for fetching the next page.
	NextPageToken string `json:"next_page_token,omitempty"`
	// An array of table information objects.
	Tables []TableInfo `json:"tables,omitempty"`
}

type ListVolumesRequest

type ListVolumesRequest struct {
	// The identifier of the catalog
	CatalogName string `json:"-" url:"catalog_name"`
	// The identifier of the schema
	SchemaName string `json:"-" url:"schema_name"`
}

List Volumes

type ListVolumesResponseContent

type ListVolumesResponseContent struct {
	Volumes []VolumeInfo `json:"volumes,omitempty"`
}

type MetastoreAssignment

type MetastoreAssignment struct {
	// The name of the default catalog in the metastore.
	DefaultCatalogName string `json:"default_catalog_name,omitempty"`
	// The unique ID of the metastore.
	MetastoreId string `json:"metastore_id"`
	// The unique ID of the Databricks workspace.
	WorkspaceId int64 `json:"workspace_id"`
}

type MetastoreInfo

type MetastoreInfo struct {
	// Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`).
	Cloud string `json:"cloud,omitempty"`
	// Time at which this metastore was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of metastore creator.
	CreatedBy string `json:"created_by,omitempty"`
	// Unique identifier of the metastore's (Default) Data Access Configuration.
	DefaultDataAccessConfigId string `json:"default_data_access_config_id,omitempty"`
	// The organization name of a Delta Sharing entity, to be used in
	// Databricks-to-Databricks Delta Sharing as the official name.
	DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
	// The lifetime of delta sharing recipient token in seconds.
	DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
	// The scope of Delta Sharing enabled for the metastore.
	DeltaSharingScope MetastoreInfoDeltaSharingScope `json:"delta_sharing_scope,omitempty"`
	// Globally unique metastore ID across clouds and regions, of the form
	// `cloud:region:metastore_id`.
	GlobalMetastoreId string `json:"global_metastore_id,omitempty"`
	// Unique identifier of metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// The user-specified name of the metastore.
	Name string `json:"name,omitempty"`
	// The owner of the metastore.
	Owner string `json:"owner,omitempty"`
	// Privilege model version of the metastore, of the form `major.minor`
	// (e.g., `1.0`).
	PrivilegeModelVersion string `json:"privilege_model_version,omitempty"`
	// Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).
	Region string `json:"region,omitempty"`
	// The storage root URL for metastore
	StorageRoot string `json:"storage_root,omitempty"`
	// UUID of storage credential to access the metastore storage_root.
	StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"`
	// Name of the storage credential to access the metastore storage_root.
	StorageRootCredentialName string `json:"storage_root_credential_name,omitempty"`
	// Time at which the metastore was last modified, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified the metastore.
	UpdatedBy string `json:"updated_by,omitempty"`
}

type MetastoreInfoDeltaSharingScope

type MetastoreInfoDeltaSharingScope string

The scope of Delta Sharing enabled for the metastore.

const MetastoreInfoDeltaSharingScopeInternal MetastoreInfoDeltaSharingScope = `INTERNAL`
const MetastoreInfoDeltaSharingScopeInternalAndExternal MetastoreInfoDeltaSharingScope = `INTERNAL_AND_EXTERNAL`

func (*MetastoreInfoDeltaSharingScope) Set

Set raw string value and validate it against allowed values

func (*MetastoreInfoDeltaSharingScope) String

String representation for fmt.Print

func (*MetastoreInfoDeltaSharingScope) Type

Type always returns MetastoreInfoDeltaSharingScope to satisfy [pflag.Value] interface

type MetastoresAPI

type MetastoresAPI struct {
	// contains filtered or unexported fields
}

A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces to control which workloads use each metastore. For a workspace to use Unity Catalog, it must have a Unity Catalog metastore attached.

Each metastore is configured with a root storage location in a cloud storage account. This storage location is used for metadata and managed tables data.

NOTE: This metastore is distinct from the metastore included in Databricks workspaces created before Unity Catalog was released. If your workspace includes a legacy Hive metastore, the data in that metastore is available in a catalog named hive_metastore.

func NewMetastores

func NewMetastores(client *client.DatabricksClient) *MetastoresAPI

func (*MetastoresAPI) Assign

Create an assignment.

Creates a new metastore assignment. If an assignment for the same __workspace_id__ exists, it will be overwritten by the new __metastore_id__ and __default_catalog_name__. The caller must be an account admin.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

workspaceId := func(v string) int64 {
	i, err := strconv.ParseInt(v, 10, 64)
	if err != nil {
		panic(fmt.Sprintf("`%s` is not int64: %s", v, err))
	}
	return i
}(os.Getenv("TEST_WORKSPACE_ID"))

created, err := w.Metastores.Create(ctx, catalog.CreateMetastore{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageRoot: fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Metastores.Assign(ctx, catalog.CreateMetastoreAssignment{
	MetastoreId: created.MetastoreId,
	WorkspaceId: workspaceId,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Metastores.Delete(ctx, catalog.DeleteMetastoreRequest{
	Id:    created.MetastoreId,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*MetastoresAPI) Create

func (a *MetastoresAPI) Create(ctx context.Context, request CreateMetastore) (*MetastoreInfo, error)

Create a metastore.

Creates a new metastore based on a provided name and storage root path.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Metastores.Create(ctx, catalog.CreateMetastore{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageRoot: fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Metastores.Delete(ctx, catalog.DeleteMetastoreRequest{
	Id:    created.MetastoreId,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*MetastoresAPI) Current

Get metastore assignment for workspace.

Gets the metastore assignment for the workspace being accessed.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

currentMetastore, err := w.Metastores.Current(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", currentMetastore)
Output:

func (*MetastoresAPI) Delete

func (a *MetastoresAPI) Delete(ctx context.Context, request DeleteMetastoreRequest) error

Delete a metastore.

Deletes a metastore. The caller must be a metastore admin.

func (*MetastoresAPI) DeleteById

func (a *MetastoresAPI) DeleteById(ctx context.Context, id string) error

Delete a metastore.

Deletes a metastore. The caller must be a metastore admin.

func (*MetastoresAPI) EnableOptimization

Toggle predictive optimization on the metastore.

Enables or disables predictive optimization on the metastore.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Metastores.Create(ctx, catalog.CreateMetastore{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageRoot: fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

autoMaintenance, err := w.Metastores.EnableOptimization(ctx, catalog.UpdatePredictiveOptimization{
	Enable:      true,
	MetastoreId: created.MetastoreId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", autoMaintenance)

// cleanup

err = w.Metastores.Delete(ctx, catalog.DeleteMetastoreRequest{
	Id:    created.MetastoreId,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*MetastoresAPI) Get

Get a metastore.

Gets a metastore that matches the supplied ID. The caller must be a metastore admin to retrieve this info.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Metastores.Create(ctx, catalog.CreateMetastore{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageRoot: fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Metastores.GetById(ctx, created.MetastoreId)
if err != nil {
	panic(err)
}

// cleanup

err = w.Metastores.Delete(ctx, catalog.DeleteMetastoreRequest{
	Id:    created.MetastoreId,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*MetastoresAPI) GetById

func (a *MetastoresAPI) GetById(ctx context.Context, id string) (*MetastoreInfo, error)

Get a metastore.

Gets a metastore that matches the supplied ID. The caller must be a metastore admin to retrieve this info.

func (*MetastoresAPI) GetByName

func (a *MetastoresAPI) GetByName(ctx context.Context, name string) (*MetastoreInfo, error)

GetByName calls MetastoresAPI.MetastoreInfoNameToMetastoreIdMap and returns a single MetastoreInfo.

Returns an error if there's more than one MetastoreInfo with the same .Name.

Note: All MetastoreInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*MetastoresAPI) Impl

func (a *MetastoresAPI) Impl() MetastoresService

Impl returns low-level Metastores API implementation

func (*MetastoresAPI) ListAll

func (a *MetastoresAPI) ListAll(ctx context.Context) ([]MetastoreInfo, error)

List metastores.

Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Metastores.ListAll(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*MetastoresAPI) MetastoreInfoNameToMetastoreIdMap

func (a *MetastoresAPI) MetastoreInfoNameToMetastoreIdMap(ctx context.Context) (map[string]string, error)

MetastoreInfoNameToMetastoreIdMap calls MetastoresAPI.ListAll and creates a map of results with MetastoreInfo.Name as key and MetastoreInfo.MetastoreId as value.

Returns an error if there's more than one MetastoreInfo with the same .Name.

Note: All MetastoreInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*MetastoresAPI) Summary

Get a metastore summary.

Gets information about a metastore. This summary includes the storage credential, the cloud vendor, the cloud region, and the global metastore ID.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

summary, err := w.Metastores.Summary(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", summary)
Output:

func (*MetastoresAPI) Unassign

func (a *MetastoresAPI) Unassign(ctx context.Context, request UnassignRequest) error

Delete an assignment.

Deletes a metastore assignment. The caller must be an account administrator.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

workspaceId := func(v string) int64 {
	i, err := strconv.ParseInt(v, 10, 64)
	if err != nil {
		panic(fmt.Sprintf("`%s` is not int64: %s", v, err))
	}
	return i
}(os.Getenv("TEST_WORKSPACE_ID"))

created, err := w.Metastores.Create(ctx, catalog.CreateMetastore{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageRoot: fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Metastores.Unassign(ctx, catalog.UnassignRequest{
	MetastoreId: created.MetastoreId,
	WorkspaceId: workspaceId,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Metastores.Delete(ctx, catalog.DeleteMetastoreRequest{
	Id:    created.MetastoreId,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*MetastoresAPI) UnassignByWorkspaceId

func (a *MetastoresAPI) UnassignByWorkspaceId(ctx context.Context, workspaceId int64) error

Delete an assignment.

Deletes a metastore assignment. The caller must be an account administrator.

func (*MetastoresAPI) Update

func (a *MetastoresAPI) Update(ctx context.Context, request UpdateMetastore) (*MetastoreInfo, error)

Update a metastore.

Updates information for a specific metastore. The caller must be a metastore admin.

Example (Metastores)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Metastores.Create(ctx, catalog.CreateMetastore{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageRoot: fmt.Sprintf("s3://%s/%s", os.Getenv("TEST_BUCKET"), fmt.Sprintf("sdk-%x", time.Now().UnixNano())),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Metastores.Update(ctx, catalog.UpdateMetastore{
	Id:   created.MetastoreId,
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Metastores.Delete(ctx, catalog.DeleteMetastoreRequest{
	Id:    created.MetastoreId,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*MetastoresAPI) UpdateAssignment

func (a *MetastoresAPI) UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error

Update an assignment.

Updates a metastore assignment. This operation can be used to update __metastore_id__ or __default_catalog_name__ for a specified Workspace, if the Workspace is already assigned a metastore. The caller must be an account admin to update __metastore_id__; otherwise, the caller can be a Workspace admin.

func (*MetastoresAPI) WithImpl

func (a *MetastoresAPI) WithImpl(impl MetastoresService) *MetastoresAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type MetastoresService

type MetastoresService interface {

	// Create an assignment.
	//
	// Creates a new metastore assignment. If an assignment for the same
	// __workspace_id__ exists, it will be overwritten by the new
	// __metastore_id__ and __default_catalog_name__. The caller must be an
	// account admin.
	Assign(ctx context.Context, request CreateMetastoreAssignment) error

	// Create a metastore.
	//
	// Creates a new metastore based on a provided name and storage root path.
	Create(ctx context.Context, request CreateMetastore) (*MetastoreInfo, error)

	// Get metastore assignment for workspace.
	//
	// Gets the metastore assignment for the workspace being accessed.
	Current(ctx context.Context) (*MetastoreAssignment, error)

	// Delete a metastore.
	//
	// Deletes a metastore. The caller must be a metastore admin.
	Delete(ctx context.Context, request DeleteMetastoreRequest) error

	// Toggle predictive optimization on the metastore.
	//
	// Enables or disables predictive optimization on the metastore.
	EnableOptimization(ctx context.Context, request UpdatePredictiveOptimization) (*UpdatePredictiveOptimizationResponse, error)

	// Get a metastore.
	//
	// Gets a metastore that matches the supplied ID. The caller must be a
	// metastore admin to retrieve this info.
	Get(ctx context.Context, request GetMetastoreRequest) (*MetastoreInfo, error)

	// List metastores.
	//
	// Gets an array of the available metastores (as __MetastoreInfo__ objects).
	// The caller must be an admin to retrieve this info. There is no guarantee
	// of a specific ordering of the elements in the array.
	//
	// Use ListAll() to get all MetastoreInfo instances
	List(ctx context.Context) (*ListMetastoresResponse, error)

	// Get a metastore summary.
	//
	// Gets information about a metastore. This summary includes the storage
	// credential, the cloud vendor, the cloud region, and the global metastore
	// ID.
	Summary(ctx context.Context) (*GetMetastoreSummaryResponse, error)

	// Delete an assignment.
	//
	// Deletes a metastore assignment. The caller must be an account
	// administrator.
	Unassign(ctx context.Context, request UnassignRequest) error

	// Update a metastore.
	//
	// Updates information for a specific metastore. The caller must be a
	// metastore admin.
	Update(ctx context.Context, request UpdateMetastore) (*MetastoreInfo, error)

	// Update an assignment.
	//
	// Updates a metastore assignment. This operation can be used to update
	// __metastore_id__ or __default_catalog_name__ for a specified Workspace,
	// if the Workspace is already assigned a metastore. The caller must be an
	// account admin to update __metastore_id__; otherwise, the caller can be a
	// Workspace admin.
	UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error
}

A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces to control which workloads use each metastore. For a workspace to use Unity Catalog, it must have a Unity Catalog metastore attached.

Each metastore is configured with a root storage location in a cloud storage account. This storage location is used for metadata and managed tables data.

NOTE: This metastore is distinct from the metastore included in Databricks workspaces created before Unity Catalog was released. If your workspace includes a legacy Hive metastore, the data in that metastore is available in a catalog named hive_metastore.

type NamedTableConstraint

type NamedTableConstraint struct {
	// The name of the constraint.
	Name string `json:"name"`
}

type PermissionsChange

type PermissionsChange struct {
	// The set of privileges to add.
	Add []Privilege `json:"add,omitempty"`
	// The principal whose privileges we are changing.
	Principal string `json:"principal,omitempty"`
	// The set of privileges to remove.
	Remove []Privilege `json:"remove,omitempty"`
}

type PermissionsList

type PermissionsList struct {
	// The privileges assigned to each principal
	PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"`
}

type PrimaryKeyConstraint

type PrimaryKeyConstraint struct {
	// Column names for this constraint.
	ChildColumns []string `json:"child_columns"`
	// The name of the constraint.
	Name string `json:"name"`
}

type Privilege

type Privilege string
const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES`
const PrivilegeCreate Privilege = `CREATE`
const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG`
const PrivilegeCreateExternalLocation Privilege = `CREATE_EXTERNAL_LOCATION`
const PrivilegeCreateExternalTable Privilege = `CREATE_EXTERNAL_TABLE`
const PrivilegeCreateFunction Privilege = `CREATE_FUNCTION`
const PrivilegeCreateManagedStorage Privilege = `CREATE_MANAGED_STORAGE`
const PrivilegeCreateMaterializedView Privilege = `CREATE_MATERIALIZED_VIEW`
const PrivilegeCreateProvider Privilege = `CREATE_PROVIDER`
const PrivilegeCreateRecipient Privilege = `CREATE_RECIPIENT`
const PrivilegeCreateSchema Privilege = `CREATE_SCHEMA`
const PrivilegeCreateShare Privilege = `CREATE_SHARE`
const PrivilegeCreateStorageCredential Privilege = `CREATE_STORAGE_CREDENTIAL`
const PrivilegeCreateTable Privilege = `CREATE_TABLE`
const PrivilegeCreateView Privilege = `CREATE_VIEW`
const PrivilegeExecute Privilege = `EXECUTE`
const PrivilegeModify Privilege = `MODIFY`
const PrivilegeReadFiles Privilege = `READ_FILES`
const PrivilegeReadPrivateFiles Privilege = `READ_PRIVATE_FILES`
const PrivilegeRefresh Privilege = `REFRESH`
const PrivilegeSelect Privilege = `SELECT`
const PrivilegeSetSharePermission Privilege = `SET_SHARE_PERMISSION`
const PrivilegeUsage Privilege = `USAGE`
const PrivilegeUseCatalog Privilege = `USE_CATALOG`
const PrivilegeUseMarketplaceAssets Privilege = `USE_MARKETPLACE_ASSETS`
const PrivilegeUseProvider Privilege = `USE_PROVIDER`
const PrivilegeUseRecipient Privilege = `USE_RECIPIENT`
const PrivilegeUseSchema Privilege = `USE_SCHEMA`
const PrivilegeUseShare Privilege = `USE_SHARE`
const PrivilegeWriteFiles Privilege = `WRITE_FILES`
const PrivilegeWritePrivateFiles Privilege = `WRITE_PRIVATE_FILES`

func (*Privilege) Set

func (f *Privilege) Set(v string) error

Set raw string value and validate it against allowed values

func (*Privilege) String

func (f *Privilege) String() string

String representation for fmt.Print

func (*Privilege) Type

func (f *Privilege) Type() string

Type always returns Privilege to satisfy [pflag.Value] interface

type PrivilegeAssignment

type PrivilegeAssignment struct {
	// The principal (user email address or group name).
	Principal string `json:"principal,omitempty"`
	// The privileges assigned to the principal.
	Privileges []Privilege `json:"privileges,omitempty"`
}

type PropertiesKvPairs

type PropertiesKvPairs map[string]string

An object containing map of key-value properties attached to the connection.

type ReadVolumeRequest

type ReadVolumeRequest struct {
	// The three-level (fully qualified) name of the volume
	FullNameArg string `json:"-" url:"-"`
}

Get a Volume

type SchemaInfo

type SchemaInfo struct {
	// Name of parent catalog.
	CatalogName string `json:"catalog_name,omitempty"`
	// The type of the parent catalog.
	CatalogType string `json:"catalog_type,omitempty"`
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Time at which this schema was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of schema creator.
	CreatedBy string `json:"created_by,omitempty"`

	EffectiveAutoMaintenanceFlag *EffectiveAutoMaintenanceFlag `json:"effective_auto_maintenance_flag,omitempty"`
	// Whether auto maintenance should be enabled for this object and objects
	// under it.
	EnableAutoMaintenance EnableAutoMaintenance `json:"enable_auto_maintenance,omitempty"`
	// Full name of schema, in form of __catalog_name__.__schema_name__.
	FullName string `json:"full_name,omitempty"`
	// Unique identifier of parent metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// Name of schema, relative to parent catalog.
	Name string `json:"name,omitempty"`
	// Username of current owner of schema.
	Owner string `json:"owner,omitempty"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
	// Storage location for managed tables within schema.
	StorageLocation string `json:"storage_location,omitempty"`
	// Storage root URL for managed tables within schema.
	StorageRoot string `json:"storage_root,omitempty"`
	// Time at which this schema was created, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified schema.
	UpdatedBy string `json:"updated_by,omitempty"`
}

type SchemasAPI

type SchemasAPI struct {
	// contains filtered or unexported fields
}

A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace. A schema organizes tables, views and functions. To access (or list) a table or view in a schema, users must have the USE_SCHEMA data permission on the schema and its parent catalog, and they must have the SELECT permission on the table or view.

func NewSchemas

func NewSchemas(client *client.DatabricksClient) *SchemasAPI

func (*SchemasAPI) Create

func (a *SchemasAPI) Create(ctx context.Context, request CreateSchema) (*SchemaInfo, error)

Create a schema.

Creates a new schema for catalog in the Metatastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent catalog.

Example (Schemas)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

newCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", newCatalog)

created, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: newCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  newCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Schemas.DeleteByFullName(ctx, created.FullName)
if err != nil {
	panic(err)
}
Output:

Example (Shares)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
Output:

Example (Tables)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
Output:

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
Output:

func (*SchemasAPI) Delete

func (a *SchemasAPI) Delete(ctx context.Context, request DeleteSchemaRequest) error

Delete a schema.

Deletes the specified schema from the parent catalog. The caller must be the owner of the schema or an owner of the parent catalog.

func (*SchemasAPI) DeleteByFullName

func (a *SchemasAPI) DeleteByFullName(ctx context.Context, fullName string) error

Delete a schema.

Deletes the specified schema from the parent catalog. The caller must be the owner of the schema or an owner of the parent catalog.

func (*SchemasAPI) Get

func (a *SchemasAPI) Get(ctx context.Context, request GetSchemaRequest) (*SchemaInfo, error)

Get a schema.

Gets the specified schema within the metastore. The caller must be a metastore admin, the owner of the schema, or a user that has the **USE_SCHEMA** privilege on the schema.

Example (Schemas)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

newCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", newCatalog)

created, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: newCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Schemas.GetByFullName(ctx, created.FullName)
if err != nil {
	panic(err)
}

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  newCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Schemas.DeleteByFullName(ctx, created.FullName)
if err != nil {
	panic(err)
}
Output:

func (*SchemasAPI) GetByFullName

func (a *SchemasAPI) GetByFullName(ctx context.Context, fullName string) (*SchemaInfo, error)

Get a schema.

Gets the specified schema within the metastore. The caller must be a metastore admin, the owner of the schema, or a user that has the **USE_SCHEMA** privilege on the schema.

func (*SchemasAPI) GetByName

func (a *SchemasAPI) GetByName(ctx context.Context, name string) (*SchemaInfo, error)

GetByName calls SchemasAPI.SchemaInfoNameToFullNameMap and returns a single SchemaInfo.

Returns an error if there's more than one SchemaInfo with the same .Name.

Note: All SchemaInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*SchemasAPI) Impl

func (a *SchemasAPI) Impl() SchemasService

Impl returns low-level Schemas API implementation

func (*SchemasAPI) ListAll

func (a *SchemasAPI) ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error)

List schemas.

Gets an array of schemas for a catalog in the metastore. If the caller is the metastore admin or the owner of the parent catalog, all schemas for the catalog will be retrieved. Otherwise, only schemas owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

Example (Schemas)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

newCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", newCatalog)

all, err := w.Schemas.ListAll(ctx, catalog.ListSchemasRequest{
	CatalogName: newCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  newCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*SchemasAPI) SchemaInfoNameToFullNameMap

func (a *SchemasAPI) SchemaInfoNameToFullNameMap(ctx context.Context, request ListSchemasRequest) (map[string]string, error)

SchemaInfoNameToFullNameMap calls SchemasAPI.ListAll and creates a map of results with SchemaInfo.Name as key and SchemaInfo.FullName as value.

Returns an error if there's more than one SchemaInfo with the same .Name.

Note: All SchemaInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*SchemasAPI) Update

func (a *SchemasAPI) Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error)

Update a schema.

Updates a schema for a catalog. The caller must be the owner of the schema or a metastore admin. If the caller is a metastore admin, only the __owner__ field can be changed in the update. If the __name__ field must be updated, the caller must be a metastore admin or have the **CREATE_SCHEMA** privilege on the parent catalog.

Example (Schemas)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

newCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", newCatalog)

created, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: newCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Schemas.Update(ctx, catalog.UpdateSchema{
	FullName: created.FullName,
	Comment:  fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  newCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Schemas.DeleteByFullName(ctx, created.FullName)
if err != nil {
	panic(err)
}
Output:

func (*SchemasAPI) WithImpl

func (a *SchemasAPI) WithImpl(impl SchemasService) *SchemasAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type SchemasService

type SchemasService interface {

	// Create a schema.
	//
	// Creates a new schema for catalog in the Metatastore. The caller must be a
	// metastore admin, or have the **CREATE_SCHEMA** privilege in the parent
	// catalog.
	Create(ctx context.Context, request CreateSchema) (*SchemaInfo, error)

	// Delete a schema.
	//
	// Deletes the specified schema from the parent catalog. The caller must be
	// the owner of the schema or an owner of the parent catalog.
	Delete(ctx context.Context, request DeleteSchemaRequest) error

	// Get a schema.
	//
	// Gets the specified schema within the metastore. The caller must be a
	// metastore admin, the owner of the schema, or a user that has the
	// **USE_SCHEMA** privilege on the schema.
	Get(ctx context.Context, request GetSchemaRequest) (*SchemaInfo, error)

	// List schemas.
	//
	// Gets an array of schemas for a catalog in the metastore. If the caller is
	// the metastore admin or the owner of the parent catalog, all schemas for
	// the catalog will be retrieved. Otherwise, only schemas owned by the
	// caller (or for which the caller has the **USE_SCHEMA** privilege) will be
	// retrieved. There is no guarantee of a specific ordering of the elements
	// in the array.
	//
	// Use ListAll() to get all SchemaInfo instances
	List(ctx context.Context, request ListSchemasRequest) (*ListSchemasResponse, error)

	// Update a schema.
	//
	// Updates a schema for a catalog. The caller must be the owner of the
	// schema or a metastore admin. If the caller is a metastore admin, only the
	// __owner__ field can be changed in the update. If the __name__ field must
	// be updated, the caller must be a metastore admin or have the
	// **CREATE_SCHEMA** privilege on the parent catalog.
	Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error)
}

A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace. A schema organizes tables, views and functions. To access (or list) a table or view in a schema, users must have the USE_SCHEMA data permission on the schema and its parent catalog, and they must have the SELECT permission on the table or view.

type SecurableOptionsMap

type SecurableOptionsMap map[string]string

A map of key-value properties attached to the securable.

type SecurablePropertiesMap

type SecurablePropertiesMap map[string]string

A map of key-value properties attached to the securable.

type SecurableType

type SecurableType string

The type of Unity Catalog securable

const SecurableTypeCatalog SecurableType = `catalog`
const SecurableTypeExternalLocation SecurableType = `external_location`
const SecurableTypeFunction SecurableType = `function`
const SecurableTypeMetastore SecurableType = `metastore`
const SecurableTypePipeline SecurableType = `pipeline`
const SecurableTypeProvider SecurableType = `provider`
const SecurableTypeRecipient SecurableType = `recipient`
const SecurableTypeSchema SecurableType = `schema`
const SecurableTypeShare SecurableType = `share`
const SecurableTypeStorageCredential SecurableType = `storage_credential`
const SecurableTypeTable SecurableType = `table`

func (*SecurableType) Set

func (f *SecurableType) Set(v string) error

Set raw string value and validate it against allowed values

func (*SecurableType) String

func (f *SecurableType) String() string

String representation for fmt.Print

func (*SecurableType) Type

func (f *SecurableType) Type() string

Type always returns SecurableType to satisfy [pflag.Value] interface

type StorageCredentialInfo

type StorageCredentialInfo struct {
	// The AWS IAM role configuration.
	AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"`
	// The Azure managed identity configuration.
	AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"`
	// The Azure service principal configuration.
	AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"`
	// Comment associated with the credential.
	Comment string `json:"comment,omitempty"`
	// Time at which this Credential was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of credential creator.
	CreatedBy string `json:"created_by,omitempty"`
	// The <Databricks> managed GCP service account configuration.
	DatabricksGcpServiceAccount *DatabricksGcpServiceAccountResponse `json:"databricks_gcp_service_account,omitempty"`
	// The unique identifier of the credential.
	Id string `json:"id,omitempty"`
	// Unique identifier of parent metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// The credential name. The name must be unique within the metastore.
	Name string `json:"name,omitempty"`
	// Username of current owner of credential.
	Owner string `json:"owner,omitempty"`
	// Whether the storage credential is only usable for read operations.
	ReadOnly bool `json:"read_only,omitempty"`
	// Time at which this credential was last modified, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified the credential.
	UpdatedBy string `json:"updated_by,omitempty"`
	// Whether this credential is the current metastore's root storage
	// credential.
	UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"`
}

type StorageCredentialsAPI

type StorageCredentialsAPI struct {
	// contains filtered or unexported fields
}

A storage credential represents an authentication and authorization mechanism for accessing data stored on your cloud tenant. Each storage credential is subject to Unity Catalog access-control policies that control which users and groups can access the credential. If a user does not have access to a storage credential in Unity Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant on the user’s behalf.

Databricks recommends using external locations rather than using storage credentials directly.

To create storage credentials, you must be a Databricks account admin. The account admin who creates the storage credential can delegate ownership to another user or group to manage permissions on it.

func NewStorageCredentials

func NewStorageCredentials(client *client.DatabricksClient) *StorageCredentialsAPI

func (*StorageCredentialsAPI) Create

Create a storage credential.

Creates a new storage credential. The request object is specific to the cloud:

* **AwsIamRole** for AWS credentials. * **AzureServicePrincipal** for Azure credentials. * **AzureManagedIdentity** for Azure managed credentials. * **DatabricksGcpServiceAccount** for GCP managed credentials.

The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore.

Example (ExternalLocationsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

credential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", credential)

// cleanup

err = w.StorageCredentials.DeleteByName(ctx, credential.Name)
if err != nil {
	panic(err)
}
Output:

Example (StorageCredentialsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.StorageCredentials.DeleteByName(ctx, created.Name)
if err != nil {
	panic(err)
}
Output:

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

storageCredential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
	Comment: "created via SDK",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storageCredential)
Output:

func (*StorageCredentialsAPI) Delete

Delete a credential.

Deletes a storage credential from the metastore. The caller must be an owner of the storage credential.

func (*StorageCredentialsAPI) DeleteByName

func (a *StorageCredentialsAPI) DeleteByName(ctx context.Context, name string) error

Delete a credential.

Deletes a storage credential from the metastore. The caller must be an owner of the storage credential.

func (*StorageCredentialsAPI) Get

Get a credential.

Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the storage credential.

Example (StorageCredentialsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byName, err := w.StorageCredentials.GetByName(ctx, created.Name)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byName)

// cleanup

err = w.StorageCredentials.DeleteByName(ctx, created.Name)
if err != nil {
	panic(err)
}
Output:

func (*StorageCredentialsAPI) GetByName

Get a credential.

Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the storage credential.

func (*StorageCredentialsAPI) Impl

Impl returns low-level StorageCredentials API implementation

func (*StorageCredentialsAPI) ListAll

List credentials.

Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore admin, all storage credentials will be retrieved. There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

Example (StorageCredentialsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.StorageCredentials.ListAll(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*StorageCredentialsAPI) StorageCredentialInfoNameToIdMap

func (a *StorageCredentialsAPI) StorageCredentialInfoNameToIdMap(ctx context.Context) (map[string]string, error)

StorageCredentialInfoNameToIdMap calls StorageCredentialsAPI.ListAll and creates a map of results with StorageCredentialInfo.Name as key and StorageCredentialInfo.Id as value.

Returns an error if there's more than one StorageCredentialInfo with the same .Name.

Note: All StorageCredentialInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*StorageCredentialsAPI) Update

Update a credential.

Updates a storage credential on the metastore. The caller must be the owner of the storage credential or a metastore admin. If the caller is a metastore admin, only the __owner__ credential can be changed.

Example (StorageCredentialsOnAws)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.StorageCredentials.Update(ctx, catalog.UpdateStorageCredential{
	Name:    created.Name,
	Comment: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
})
if err != nil {
	panic(err)
}

// cleanup

err = w.StorageCredentials.DeleteByName(ctx, created.Name)
if err != nil {
	panic(err)
}
Output:

func (*StorageCredentialsAPI) Validate

Validate a storage credential.

Validates a storage credential. At least one of __external_location_name__ and __url__ need to be provided. If only one of them is provided, it will be used for validation. And if both are provided, the __url__ will be used for validation, and __external_location_name__ will be ignored when checking overlapping urls.

Either the __storage_credential_name__ or the cloud-specific credential must be provided.

The caller must be a metastore admin or the storage credential owner or have the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the storage credential.

func (*StorageCredentialsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type StorageCredentialsService

type StorageCredentialsService interface {

	// Create a storage credential.
	//
	// Creates a new storage credential. The request object is specific to the
	// cloud:
	//
	// * **AwsIamRole** for AWS credentials. * **AzureServicePrincipal** for
	// Azure credentials. * **AzureManagedIdentity** for Azure managed
	// credentials. * **DatabricksGcpServiceAccount** for GCP managed
	// credentials.
	//
	// The caller must be a metastore admin and have the
	// **CREATE_STORAGE_CREDENTIAL** privilege on the metastore.
	Create(ctx context.Context, request CreateStorageCredential) (*StorageCredentialInfo, error)

	// Delete a credential.
	//
	// Deletes a storage credential from the metastore. The caller must be an
	// owner of the storage credential.
	Delete(ctx context.Context, request DeleteStorageCredentialRequest) error

	// Get a credential.
	//
	// Gets a storage credential from the metastore. The caller must be a
	// metastore admin, the owner of the storage credential, or have some
	// permission on the storage credential.
	Get(ctx context.Context, request GetStorageCredentialRequest) (*StorageCredentialInfo, error)

	// List credentials.
	//
	// Gets an array of storage credentials (as __StorageCredentialInfo__
	// objects). The array is limited to only those storage credentials the
	// caller has permission to access. If the caller is a metastore admin, all
	// storage credentials will be retrieved. There is no guarantee of a
	// specific ordering of the elements in the array.
	//
	// Use ListAll() to get all StorageCredentialInfo instances
	List(ctx context.Context) (*ListStorageCredentialsResponse, error)

	// Update a credential.
	//
	// Updates a storage credential on the metastore. The caller must be the
	// owner of the storage credential or a metastore admin. If the caller is a
	// metastore admin, only the __owner__ credential can be changed.
	Update(ctx context.Context, request UpdateStorageCredential) (*StorageCredentialInfo, error)

	// Validate a storage credential.
	//
	// Validates a storage credential. At least one of
	// __external_location_name__ and __url__ need to be provided. If only one
	// of them is provided, it will be used for validation. And if both are
	// provided, the __url__ will be used for validation, and
	// __external_location_name__ will be ignored when checking overlapping
	// urls.
	//
	// Either the __storage_credential_name__ or the cloud-specific credential
	// must be provided.
	//
	// The caller must be a metastore admin or the storage credential owner or
	// have the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the
	// storage credential.
	Validate(ctx context.Context, request ValidateStorageCredential) (*ValidateStorageCredentialResponse, error)
}

A storage credential represents an authentication and authorization mechanism for accessing data stored on your cloud tenant. Each storage credential is subject to Unity Catalog access-control policies that control which users and groups can access the credential. If a user does not have access to a storage credential in Unity Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant on the user’s behalf.

Databricks recommends using external locations rather than using storage credentials directly.

To create storage credentials, you must be a Databricks account admin. The account admin who creates the storage credential can delegate ownership to another user or group to manage permissions on it.

type SystemSchemaInfo

type SystemSchemaInfo struct {
	// Name of the system schema.
	Schema string `json:"schema,omitempty"`
	// The current state of enablement for the system schema. An empty string
	// means the system schema is available and ready for opt-in.
	State SystemSchemaInfoState `json:"state,omitempty"`
}

type SystemSchemaInfoState

type SystemSchemaInfoState string

The current state of enablement for the system schema. An empty string means the system schema is available and ready for opt-in.

const SystemSchemaInfoStateAvailable SystemSchemaInfoState = `AVAILABLE`
const SystemSchemaInfoStateDisableInitialized SystemSchemaInfoState = `DISABLE_INITIALIZED`
const SystemSchemaInfoStateEnableCompleted SystemSchemaInfoState = `ENABLE_COMPLETED`
const SystemSchemaInfoStateEnableInitialized SystemSchemaInfoState = `ENABLE_INITIALIZED`
const SystemSchemaInfoStateUnavailable SystemSchemaInfoState = `UNAVAILABLE`

func (*SystemSchemaInfoState) Set

Set raw string value and validate it against allowed values

func (*SystemSchemaInfoState) String

func (f *SystemSchemaInfoState) String() string

String representation for fmt.Print

func (*SystemSchemaInfoState) Type

func (f *SystemSchemaInfoState) Type() string

Type always returns SystemSchemaInfoState to satisfy [pflag.Value] interface

type SystemSchemasAPI

type SystemSchemasAPI struct {
	// contains filtered or unexported fields
}

A system schema is a schema that lives within the system catalog. A system schema may contain information about customer usage of Unity Catalog such as audit-logs, billing-logs, lineage information, etc.

func NewSystemSchemas

func NewSystemSchemas(client *client.DatabricksClient) *SystemSchemasAPI

func (*SystemSchemasAPI) Disable

func (a *SystemSchemasAPI) Disable(ctx context.Context, request DisableRequest) error

Disable a system schema.

Disables the system schema and removes it from the system catalog. The caller must be an account admin or a metastore admin.

func (*SystemSchemasAPI) DisableByMetastoreIdAndSchemaName

func (a *SystemSchemasAPI) DisableByMetastoreIdAndSchemaName(ctx context.Context, metastoreId string, schemaName DisableSchemaName) error

Disable a system schema.

Disables the system schema and removes it from the system catalog. The caller must be an account admin or a metastore admin.

func (*SystemSchemasAPI) Enable

func (a *SystemSchemasAPI) Enable(ctx context.Context, request EnableRequest) error

Enable a system schema.

Enables the system schema and adds it to the system catalog. The caller must be an account admin or a metastore admin.

func (*SystemSchemasAPI) Impl

Impl returns low-level SystemSchemas API implementation

func (*SystemSchemasAPI) ListAll

List system schemas.

Gets an array of system schemas for a metastore. The caller must be an account admin or a metastore admin.

This method is generated by Databricks SDK Code Generator.

func (*SystemSchemasAPI) ListByMetastoreId

func (a *SystemSchemasAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListSystemSchemasResponse, error)

List system schemas.

Gets an array of system schemas for a metastore. The caller must be an account admin or a metastore admin.

func (*SystemSchemasAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type SystemSchemasService

type SystemSchemasService interface {

	// Disable a system schema.
	//
	// Disables the system schema and removes it from the system catalog. The
	// caller must be an account admin or a metastore admin.
	Disable(ctx context.Context, request DisableRequest) error

	// Enable a system schema.
	//
	// Enables the system schema and adds it to the system catalog. The caller
	// must be an account admin or a metastore admin.
	Enable(ctx context.Context, request EnableRequest) error

	// List system schemas.
	//
	// Gets an array of system schemas for a metastore. The caller must be an
	// account admin or a metastore admin.
	//
	// Use ListAll() to get all SystemSchemaInfo instances
	List(ctx context.Context, request ListSystemSchemasRequest) (*ListSystemSchemasResponse, error)
}

A system schema is a schema that lives within the system catalog. A system schema may contain information about customer usage of Unity Catalog such as audit-logs, billing-logs, lineage information, etc.

type TableConstraint

type TableConstraint struct {
	ForeignKeyConstraint *ForeignKeyConstraint `json:"foreign_key_constraint,omitempty"`

	NamedTableConstraint *NamedTableConstraint `json:"named_table_constraint,omitempty"`

	PrimaryKeyConstraint *PrimaryKeyConstraint `json:"primary_key_constraint,omitempty"`
}

A table constraint, as defined by *one* of the following fields being set: __primary_key_constraint__, __foreign_key_constraint__, __named_table_constraint__.

type TableConstraintList

type TableConstraintList struct {
	// List of table constraints.
	TableConstraints []TableConstraint `json:"table_constraints,omitempty"`
}

type TableConstraintsAPI

type TableConstraintsAPI struct {
	// contains filtered or unexported fields
}

Primary key and foreign key constraints encode relationships between fields in tables.

Primary and foreign keys are informational only and are not enforced. Foreign keys must reference a primary key in another table. This primary key is the parent constraint of the foreign key and the table this primary key is on is the parent table of the foreign key. Similarly, the foreign key is the child constraint of its referenced primary key; the table of the foreign key is the child table of the primary key.

You can declare primary keys and foreign keys as part of the table specification during table creation. You can also add or drop constraints on existing tables.

func NewTableConstraints

func NewTableConstraints(client *client.DatabricksClient) *TableConstraintsAPI

func (*TableConstraintsAPI) Create

Create a table constraint.

Creates a new table constraint.

For the table constraint creation to succeed, the user must satisfy both of these conditions: - the user must have the **USE_CATALOG** privilege on the table's parent catalog, the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. - if the new constraint is a __ForeignKeyConstraint__, the user must have the **USE_CATALOG** privilege on the referenced parent table's catalog, the **USE_SCHEMA** privilege on the referenced parent table's schema, and be the owner of the referenced parent table.

func (*TableConstraintsAPI) Delete

Delete a table constraint.

Deletes a table constraint.

For the table constraint deletion to succeed, the user must satisfy both of these conditions: - the user must have the **USE_CATALOG** privilege on the table's parent catalog, the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. - if __cascade__ argument is **true**, the user must have the following permissions on all of the child tables: the **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** privilege on the table's schema, and be the owner of the table.

func (*TableConstraintsAPI) DeleteByFullName

func (a *TableConstraintsAPI) DeleteByFullName(ctx context.Context, fullName string) error

Delete a table constraint.

Deletes a table constraint.

For the table constraint deletion to succeed, the user must satisfy both of these conditions: - the user must have the **USE_CATALOG** privilege on the table's parent catalog, the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. - if __cascade__ argument is **true**, the user must have the following permissions on all of the child tables: the **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** privilege on the table's schema, and be the owner of the table.

func (*TableConstraintsAPI) Impl

Impl returns low-level TableConstraints API implementation

func (*TableConstraintsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type TableConstraintsService

type TableConstraintsService interface {

	// Create a table constraint.
	//
	// Creates a new table constraint.
	//
	// For the table constraint creation to succeed, the user must satisfy both
	// of these conditions: - the user must have the **USE_CATALOG** privilege
	// on the table's parent catalog, the **USE_SCHEMA** privilege on the
	// table's parent schema, and be the owner of the table. - if the new
	// constraint is a __ForeignKeyConstraint__, the user must have the
	// **USE_CATALOG** privilege on the referenced parent table's catalog, the
	// **USE_SCHEMA** privilege on the referenced parent table's schema, and be
	// the owner of the referenced parent table.
	Create(ctx context.Context, request CreateTableConstraint) (*TableConstraint, error)

	// Delete a table constraint.
	//
	// Deletes a table constraint.
	//
	// For the table constraint deletion to succeed, the user must satisfy both
	// of these conditions: - the user must have the **USE_CATALOG** privilege
	// on the table's parent catalog, the **USE_SCHEMA** privilege on the
	// table's parent schema, and be the owner of the table. - if __cascade__
	// argument is **true**, the user must have the following permissions on all
	// of the child tables: the **USE_CATALOG** privilege on the table's
	// catalog, the **USE_SCHEMA** privilege on the table's schema, and be the
	// owner of the table.
	Delete(ctx context.Context, request DeleteTableConstraintRequest) error
}

Primary key and foreign key constraints encode relationships between fields in tables.

Primary and foreign keys are informational only and are not enforced. Foreign keys must reference a primary key in another table. This primary key is the parent constraint of the foreign key and the table this primary key is on is the parent table of the foreign key. Similarly, the foreign key is the child constraint of its referenced primary key; the table of the foreign key is the child table of the primary key.

You can declare primary keys and foreign keys as part of the table specification during table creation. You can also add or drop constraints on existing tables.

type TableDependency

type TableDependency struct {
	// Full name of the dependent table, in the form of
	// __catalog_name__.__schema_name__.__table_name__.
	TableFullName string `json:"table_full_name"`
}

A table that is dependent on a SQL object.

type TableInfo

type TableInfo struct {
	// Name of parent catalog.
	CatalogName string `json:"catalog_name,omitempty"`
	// The array of __ColumnInfo__ definitions of the table's columns.
	Columns []ColumnInfo `json:"columns,omitempty"`
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Time at which this table was created, in epoch milliseconds.
	CreatedAt int64 `json:"created_at,omitempty"`
	// Username of table creator.
	CreatedBy string `json:"created_by,omitempty"`
	// Unique ID of the Data Access Configuration to use with the table data.
	DataAccessConfigurationId string `json:"data_access_configuration_id,omitempty"`
	// Data source format
	DataSourceFormat DataSourceFormat `json:"data_source_format,omitempty"`
	// Time at which this table was deleted, in epoch milliseconds. Field is
	// omitted if table is not deleted.
	DeletedAt int64 `json:"deleted_at,omitempty"`
	// Information pertaining to current state of the delta table.
	DeltaRuntimePropertiesKvpairs *DeltaRuntimePropertiesKvPairs `json:"delta_runtime_properties_kvpairs,omitempty"`

	EffectiveAutoMaintenanceFlag *EffectiveAutoMaintenanceFlag `json:"effective_auto_maintenance_flag,omitempty"`
	// Whether auto maintenance should be enabled for this object and objects
	// under it.
	EnableAutoMaintenance EnableAutoMaintenance `json:"enable_auto_maintenance,omitempty"`
	// Full name of table, in form of
	// __catalog_name__.__schema_name__.__table_name__
	FullName string `json:"full_name,omitempty"`
	// Unique identifier of parent metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// Name of table, relative to parent schema.
	Name string `json:"name,omitempty"`
	// Username of current owner of table.
	Owner string `json:"owner,omitempty"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`

	RowFilter *TableRowFilter `json:"row_filter,omitempty"`
	// Name of parent schema relative to its parent catalog.
	SchemaName string `json:"schema_name,omitempty"`
	// List of schemes whose objects can be referenced without qualification.
	SqlPath string `json:"sql_path,omitempty"`
	// Name of the storage credential, when a storage credential is configured
	// for use with this table.
	StorageCredentialName string `json:"storage_credential_name,omitempty"`
	// Storage root URL for table (for **MANAGED**, **EXTERNAL** tables)
	StorageLocation string `json:"storage_location,omitempty"`

	TableConstraints *TableConstraintList `json:"table_constraints,omitempty"`
	// Name of table, relative to parent schema.
	TableId string `json:"table_id,omitempty"`

	TableType TableType `json:"table_type,omitempty"`
	// Time at which this table was last modified, in epoch milliseconds.
	UpdatedAt int64 `json:"updated_at,omitempty"`
	// Username of user who last modified the table.
	UpdatedBy string `json:"updated_by,omitempty"`
	// View definition SQL (when __table_type__ is **VIEW**,
	// **MATERIALIZED_VIEW**, or **STREAMING_TABLE**)
	ViewDefinition string `json:"view_definition,omitempty"`
	// View dependencies (when table_type == **VIEW** or **MATERIALIZED_VIEW**,
	// **STREAMING_TABLE**) - when DependencyList is None, the dependency is not
	// provided; - when DependencyList is an empty list, the dependency is
	// provided but is empty; - when DependencyList is not an empty list,
	// dependencies are provided and recorded.
	ViewDependencies []Dependency `json:"view_dependencies,omitempty"`
}

type TableRowFilter

type TableRowFilter struct {
	// The list of table columns to be passed as input to the row filter
	// function. The column types should match the types of the filter function
	// arguments.
	InputColumnNames []string `json:"input_column_names"`
	// The full name of the row filter SQL UDF.
	Name string `json:"name"`
}

type TableSummary

type TableSummary struct {
	// The full name of the table.
	FullName string `json:"full_name,omitempty"`

	TableType TableType `json:"table_type,omitempty"`
}

type TableType

type TableType string
const TableTypeExternal TableType = `EXTERNAL`
const TableTypeManaged TableType = `MANAGED`
const TableTypeMaterializedView TableType = `MATERIALIZED_VIEW`
const TableTypeStreamingTable TableType = `STREAMING_TABLE`
const TableTypeView TableType = `VIEW`

func (*TableType) Set

func (f *TableType) Set(v string) error

Set raw string value and validate it against allowed values

func (*TableType) String

func (f *TableType) String() string

String representation for fmt.Print

func (*TableType) Type

func (f *TableType) Type() string

Type always returns TableType to satisfy [pflag.Value] interface

type TablesAPI

type TablesAPI struct {
	// contains filtered or unexported fields
}

A table resides in the third layer of Unity Catalog’s three-level namespace. It contains rows of data. To create a table, users must have CREATE_TABLE and USE_SCHEMA permissions on the schema, and they must have the USE_CATALOG permission on its parent catalog. To query a table, users must have the SELECT permission on the table, and they must have the USE_CATALOG permission on its parent catalog and the USE_SCHEMA permission on its parent schema.

A table can be managed or external. From an API perspective, a __VIEW__ is a particular kind of table (rather than a managed or external table).

func NewTables

func NewTables(client *client.DatabricksClient) *TablesAPI

func (*TablesAPI) Delete

func (a *TablesAPI) Delete(ctx context.Context, request DeleteTableRequest) error

Delete a table.

Deletes a table from the specified parent catalog and schema. The caller must be the owner of the parent catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

func (*TablesAPI) DeleteByFullName

func (a *TablesAPI) DeleteByFullName(ctx context.Context, fullName string) error

Delete a table.

Deletes a table from the specified parent catalog and schema. The caller must be the owner of the parent catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

func (*TablesAPI) Get

func (a *TablesAPI) Get(ctx context.Context, request GetTableRequest) (*TableInfo, error)

Get a table.

Gets a table from the metastore for a specific catalog and schema. The caller must be a metastore admin, be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, or be the owner of the table and have the **SELECT** privilege on it as well.

Example (Tables)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

tableName := fmt.Sprintf("sdk-%x", time.Now().UnixNano())

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

_, err = w.StatementExecution.ExecuteAndWait(ctx, sql.ExecuteStatementRequest{
	WarehouseId: os.Getenv("TEST_DEFAULT_WAREHOUSE_ID"),
	Catalog:     createdCatalog.Name,
	Schema:      createdSchema.Name,
	Statement:   fmt.Sprintf("CREATE TABLE %s AS SELECT 2+2 as four", tableName),
})
if err != nil {
	panic(err)
}

tableFullName := fmt.Sprintf("%s.%s.%s", createdCatalog.Name, createdSchema.Name, tableName)

createdTable, err := w.Tables.GetByFullName(ctx, tableFullName)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdTable)

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Tables.DeleteByFullName(ctx, tableFullName)
if err != nil {
	panic(err)
}
Output:

func (*TablesAPI) GetByFullName

func (a *TablesAPI) GetByFullName(ctx context.Context, fullName string) (*TableInfo, error)

Get a table.

Gets a table from the metastore for a specific catalog and schema. The caller must be a metastore admin, be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, or be the owner of the table and have the **SELECT** privilege on it as well.

func (*TablesAPI) GetByName

func (a *TablesAPI) GetByName(ctx context.Context, name string) (*TableInfo, error)

GetByName calls TablesAPI.TableInfoNameToTableIdMap and returns a single TableInfo.

Returns an error if there's more than one TableInfo with the same .Name.

Note: All TableInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*TablesAPI) Impl

func (a *TablesAPI) Impl() TablesService

Impl returns low-level Tables API implementation

func (*TablesAPI) ListAll

func (a *TablesAPI) ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error)

List tables.

Gets an array of all tables for the current metastore under the parent catalog and schema. The caller must be a metastore admin or an owner of (or have the **SELECT** privilege on) the table. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

Example (Tables)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

allTables, err := w.Tables.ListAll(ctx, catalog.ListTablesRequest{
	CatalogName: createdCatalog.Name,
	SchemaName:  createdSchema.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", allTables)

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*TablesAPI) ListSummariesAll

func (a *TablesAPI) ListSummariesAll(ctx context.Context, request ListSummariesRequest) ([]TableSummary, error)

List table summaries.

Gets an array of summaries for tables for a schema and catalog within the metastore. The table summaries returned are either:

* summaries for all tables (within the current metastore and parent catalog and schema), when the user is a metastore admin, or: * summaries for all tables and schemas (within the current metastore and parent catalog) for which the user has ownership or the **SELECT** privilege on the table and ownership or **USE_SCHEMA** privilege on the schema, provided that the user also has ownership or the **USE_CATALOG** privilege on the parent catalog.

There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

func (*TablesAPI) TableInfoNameToTableIdMap

func (a *TablesAPI) TableInfoNameToTableIdMap(ctx context.Context, request ListTablesRequest) (map[string]string, error)

TableInfoNameToTableIdMap calls TablesAPI.ListAll and creates a map of results with TableInfo.Name as key and TableInfo.TableId as value.

Returns an error if there's more than one TableInfo with the same .Name.

Note: All TableInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*TablesAPI) Update

func (a *TablesAPI) Update(ctx context.Context, request UpdateTableRequest) error

Update a table owner.

Change the owner of the table. The caller must be the owner of the parent catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

func (*TablesAPI) WithImpl

func (a *TablesAPI) WithImpl(impl TablesService) *TablesAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type TablesService

type TablesService interface {

	// Delete a table.
	//
	// Deletes a table from the specified parent catalog and schema. The caller
	// must be the owner of the parent catalog, have the **USE_CATALOG**
	// privilege on the parent catalog and be the owner of the parent schema, or
	// be the owner of the table and have the **USE_CATALOG** privilege on the
	// parent catalog and the **USE_SCHEMA** privilege on the parent schema.
	Delete(ctx context.Context, request DeleteTableRequest) error

	// Get a table.
	//
	// Gets a table from the metastore for a specific catalog and schema. The
	// caller must be a metastore admin, be the owner of the table and have the
	// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
	// privilege on the parent schema, or be the owner of the table and have the
	// **SELECT** privilege on it as well.
	Get(ctx context.Context, request GetTableRequest) (*TableInfo, error)

	// List tables.
	//
	// Gets an array of all tables for the current metastore under the parent
	// catalog and schema. The caller must be a metastore admin or an owner of
	// (or have the **SELECT** privilege on) the table. For the latter case, the
	// caller must also be the owner or have the **USE_CATALOG** privilege on
	// the parent catalog and the **USE_SCHEMA** privilege on the parent schema.
	// There is no guarantee of a specific ordering of the elements in the
	// array.
	//
	// Use ListAll() to get all TableInfo instances, which will iterate over every result page.
	List(ctx context.Context, request ListTablesRequest) (*ListTablesResponse, error)

	// List table summaries.
	//
	// Gets an array of summaries for tables for a schema and catalog within the
	// metastore. The table summaries returned are either:
	//
	// * summaries for all tables (within the current metastore and parent
	// catalog and schema), when the user is a metastore admin, or: * summaries
	// for all tables and schemas (within the current metastore and parent
	// catalog) for which the user has ownership or the **SELECT** privilege on
	// the table and ownership or **USE_SCHEMA** privilege on the schema,
	// provided that the user also has ownership or the **USE_CATALOG**
	// privilege on the parent catalog.
	//
	// There is no guarantee of a specific ordering of the elements in the
	// array.
	//
	// Use ListSummariesAll() to get all TableSummary instances, which will iterate over every result page.
	ListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error)

	// Update a table owner.
	//
	// Change the owner of the table. The caller must be the owner of the parent
	// catalog, have the **USE_CATALOG** privilege on the parent catalog and be
	// the owner of the parent schema, or be the owner of the table and have the
	// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
	// privilege on the parent schema.
	Update(ctx context.Context, request UpdateTableRequest) error
}

A table resides in the third layer of Unity Catalog’s three-level namespace. It contains rows of data. To create a table, users must have CREATE_TABLE and USE_SCHEMA permissions on the schema, and they must have the USE_CATALOG permission on its parent catalog. To query a table, users must have the SELECT permission on the table, and they must have the USE_CATALOG permission on its parent catalog and the USE_SCHEMA permission on its parent schema.

A table can be managed or external. From an API perspective, a __VIEW__ is a particular kind of table (rather than a managed or external table).

type UnassignRequest

type UnassignRequest struct {
	// Query for the ID of the metastore to delete.
	MetastoreId string `json:"-" url:"metastore_id"`
	// A workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

Delete an assignment

type UpdateCatalog

type UpdateCatalog struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Whether the current securable is accessible from all workspaces or a
	// specific set of workspaces.
	IsolationMode IsolationMode `json:"isolation_mode,omitempty"`
	// Name of catalog.
	Name string `json:"name,omitempty" url:"-"`
	// Username of current owner of catalog.
	Owner string `json:"owner,omitempty"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
}

type UpdateConnection

type UpdateConnection struct {
	// Name of the connection.
	Name string `json:"name"`
	// Name of the connection.
	NameArg string `json:"-" url:"-"`
	// A map of key-value properties attached to the securable.
	OptionsKvpairs map[string]string `json:"options_kvpairs"`
}

type UpdateExternalLocation

type UpdateExternalLocation struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Name of the storage credential used with this location.
	CredentialName string `json:"credential_name,omitempty"`
	// Force update even if changing url invalidates dependent external tables
	// or mounts.
	Force bool `json:"force,omitempty"`
	// Name of the external location.
	Name string `json:"name,omitempty" url:"-"`
	// The owner of the external location.
	Owner string `json:"owner,omitempty"`
	// Indicates whether the external location is read-only.
	ReadOnly bool `json:"read_only,omitempty"`
	// Path URL of the external location.
	Url string `json:"url,omitempty"`
}

type UpdateFunction

type UpdateFunction struct {
	// The fully-qualified name of the function (of the form
	// __catalog_name__.__schema_name__.__function__name__).
	Name string `json:"-" url:"-"`
	// Username of current owner of function.
	Owner string `json:"owner,omitempty"`
}

type UpdateMetastore

type UpdateMetastore struct {
	// The organization name of a Delta Sharing entity, to be used in
	// Databricks-to-Databricks Delta Sharing as the official name.
	DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"`
	// The lifetime of delta sharing recipient token in seconds.
	DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"`
	// The scope of Delta Sharing enabled for the metastore.
	DeltaSharingScope UpdateMetastoreDeltaSharingScope `json:"delta_sharing_scope,omitempty"`
	// Unique ID of the metastore.
	Id string `json:"-" url:"-"`
	// The user-specified name of the metastore.
	Name string `json:"name,omitempty"`
	// The owner of the metastore.
	Owner string `json:"owner,omitempty"`
	// Privilege model version of the metastore, of the form `major.minor`
	// (e.g., `1.0`).
	PrivilegeModelVersion string `json:"privilege_model_version,omitempty"`
	// UUID of storage credential to access the metastore storage_root.
	StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"`
}

type UpdateMetastoreAssignment

type UpdateMetastoreAssignment struct {
	// The name of the default catalog for the metastore.
	DefaultCatalogName string `json:"default_catalog_name,omitempty"`
	// The unique ID of the metastore.
	MetastoreId string `json:"metastore_id,omitempty"`
	// A workspace ID.
	WorkspaceId int64 `json:"-" url:"-"`
}

type UpdateMetastoreDeltaSharingScope

type UpdateMetastoreDeltaSharingScope string

The scope of Delta Sharing enabled for the metastore.

const UpdateMetastoreDeltaSharingScopeInternal UpdateMetastoreDeltaSharingScope = `INTERNAL`
const UpdateMetastoreDeltaSharingScopeInternalAndExternal UpdateMetastoreDeltaSharingScope = `INTERNAL_AND_EXTERNAL`

func (*UpdateMetastoreDeltaSharingScope) Set

Set raw string value and validate it against allowed values

func (*UpdateMetastoreDeltaSharingScope) String

String representation for fmt.Print

func (*UpdateMetastoreDeltaSharingScope) Type

Type always returns UpdateMetastoreDeltaSharingScope to satisfy [pflag.Value] interface

type UpdatePermissions

type UpdatePermissions struct {
	// Array of permissions change objects.
	Changes []PermissionsChange `json:"changes,omitempty"`
	// Full name of securable.
	FullName string `json:"-" url:"-"`
	// Type of securable.
	SecurableType SecurableType `json:"-" url:"-"`
}

type UpdatePredictiveOptimization

type UpdatePredictiveOptimization struct {
	// Whether to enable predictive optimization on the metastore.
	Enable bool `json:"enable"`
	// Unique identifier of metastore.
	MetastoreId string `json:"metastore_id"`
}

type UpdatePredictiveOptimizationResponse

type UpdatePredictiveOptimizationResponse struct {
	// Whether predictive optimization is enabled on the metastore.
	State bool `json:"state,omitempty"`
	// Id of the predictive optimization service principal. This will be the
	// user used to run optimization tasks.
	UserId int64 `json:"user_id,omitempty"`
	// Name of the predictive optimization service principal.
	Username string `json:"username,omitempty"`
}

type UpdateSchema

type UpdateSchema struct {
	// User-provided free-form text description.
	Comment string `json:"comment,omitempty"`
	// Full name of the schema.
	FullName string `json:"-" url:"-"`
	// Name of schema, relative to parent catalog.
	Name string `json:"name,omitempty"`
	// Username of current owner of schema.
	Owner string `json:"owner,omitempty"`
	// A map of key-value properties attached to the securable.
	Properties map[string]string `json:"properties,omitempty"`
}

type UpdateStorageCredential

type UpdateStorageCredential struct {
	// The AWS IAM role configuration.
	AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"`
	// The Azure managed identity configuration.
	AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"`
	// The Azure service principal configuration.
	AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"`
	// Comment associated with the credential.
	Comment string `json:"comment,omitempty"`
	// The <Databricks> managed GCP service account configuration.
	DatabricksGcpServiceAccount any `json:"databricks_gcp_service_account,omitempty"`
	// Force update even if there are dependent external locations or external
	// tables.
	Force bool `json:"force,omitempty"`
	// The credential name. The name must be unique within the metastore.
	Name string `json:"name,omitempty" url:"-"`
	// Username of current owner of credential.
	Owner string `json:"owner,omitempty"`
	// Whether the storage credential is only usable for read operations.
	ReadOnly bool `json:"read_only,omitempty"`
	// Supplying true to this argument skips validation of the updated
	// credential.
	SkipValidation bool `json:"skip_validation,omitempty"`
}

type UpdateTableRequest

type UpdateTableRequest struct {
	// Full name of the table.
	FullName string `json:"-" url:"-"`

	Owner string `json:"owner,omitempty"`
}

Update a table owner.

type UpdateVolumeRequestContent

type UpdateVolumeRequestContent struct {
	// The comment attached to the volume
	Comment string `json:"comment,omitempty"`
	// The three-level (fully qualified) name of the volume
	FullNameArg string `json:"-" url:"-"`
	// The name of the volume
	Name string `json:"name,omitempty"`
	// The identifier of the user who owns the volume
	Owner string `json:"owner,omitempty"`
}

type UpdateWorkspaceBindings

type UpdateWorkspaceBindings struct {
	// A list of workspace IDs.
	AssignWorkspaces []int64 `json:"assign_workspaces,omitempty"`
	// The name of the catalog.
	Name string `json:"-" url:"-"`
	// A list of workspace IDs.
	UnassignWorkspaces []int64 `json:"unassign_workspaces,omitempty"`
}

type ValidateStorageCredential

type ValidateStorageCredential struct {
	// The AWS IAM role configuration.
	AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"`
	// The Azure managed identity configuration.
	AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"`
	// The Azure service principal configuration.
	AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"`
	// The Databricks created GCP service account configuration.
	DatabricksGcpServiceAccount any `json:"databricks_gcp_service_account,omitempty"`
	// The name of an existing external location to validate.
	ExternalLocationName string `json:"external_location_name,omitempty"`
	// Whether the storage credential is only usable for read operations.
	ReadOnly bool `json:"read_only,omitempty"`
	// The name of the storage credential to validate.
	StorageCredentialName any `json:"storage_credential_name,omitempty"`
	// The external location url to validate.
	Url string `json:"url,omitempty"`
}

type ValidateStorageCredentialResponse

type ValidateStorageCredentialResponse struct {
	// Whether the tested location is a directory in cloud storage.
	IsDir bool `json:"isDir,omitempty"`
	// The results of the validation check.
	Results []ValidationResult `json:"results,omitempty"`
}

type ValidationResult

type ValidationResult struct {
	// Error message would exist when the result does not equal to **PASS**.
	Message string `json:"message,omitempty"`
	// The operation tested.
	Operation ValidationResultOperation `json:"operation,omitempty"`
	// The results of the tested operation.
	Result ValidationResultResult `json:"result,omitempty"`
}

type ValidationResultOperation

type ValidationResultOperation string

The operation tested.

const ValidationResultOperationDelete ValidationResultOperation = `DELETE`
const ValidationResultOperationList ValidationResultOperation = `LIST`
const ValidationResultOperationRead ValidationResultOperation = `READ`
const ValidationResultOperationWrite ValidationResultOperation = `WRITE`

func (*ValidationResultOperation) Set

Set raw string value and validate it against allowed values

func (*ValidationResultOperation) String

func (f *ValidationResultOperation) String() string

String representation for fmt.Print

func (*ValidationResultOperation) Type

Type always returns ValidationResultOperation to satisfy [pflag.Value] interface

type ValidationResultResult

type ValidationResultResult string

The results of the tested operation.

const ValidationResultResultFail ValidationResultResult = `FAIL`
const ValidationResultResultPass ValidationResultResult = `PASS`
const ValidationResultResultSkip ValidationResultResult = `SKIP`

func (*ValidationResultResult) Set

Set raw string value and validate it against allowed values

func (*ValidationResultResult) String

func (f *ValidationResultResult) String() string

String representation for fmt.Print

func (*ValidationResultResult) Type

func (f *ValidationResultResult) Type() string

Type always returns ValidationResultResult to satisfy [pflag.Value] interface

type VolumeInfo

type VolumeInfo struct {
	// The name of the catalog where the schema and the volume are
	CatalogName string `json:"catalog_name,omitempty"`
	// The comment attached to the volume
	Comment string `json:"comment,omitempty"`

	CreatedAt int64 `json:"created_at,omitempty"`
	// The identifier of the user who created the volume
	CreatedBy string `json:"created_by,omitempty"`
	// The three-level (fully qualified) name of the volume
	FullName string `json:"full_name,omitempty"`
	// The unique identifier of the metastore
	MetastoreId string `json:"metastore_id,omitempty"`
	// The name of the volume
	Name string `json:"name,omitempty"`
	// The identifier of the user who owns the volume
	Owner string `json:"owner,omitempty"`
	// The name of the schema where the volume is
	SchemaName string `json:"schema_name,omitempty"`
	// The storage location on the cloud
	StorageLocation string `json:"storage_location,omitempty"`

	UpdatedAt int64 `json:"updated_at,omitempty"`
	// The identifier of the user who updated the volume last time
	UpdatedBy string `json:"updated_by,omitempty"`
	// The unique identifier of the volume
	VolumeId string `json:"volume_id,omitempty"`

	VolumeType VolumeType `json:"volume_type,omitempty"`
}

type VolumeType

type VolumeType string
const VolumeTypeExternal VolumeType = `EXTERNAL`
const VolumeTypeManaged VolumeType = `MANAGED`

func (*VolumeType) Set

func (f *VolumeType) Set(v string) error

Set raw string value and validate it against allowed values

func (*VolumeType) String

func (f *VolumeType) String() string

String representation for fmt.Print

func (*VolumeType) Type

func (f *VolumeType) Type() string

Type always returns VolumeType to satisfy [pflag.Value] interface

type VolumesAPI

type VolumesAPI struct {
	// contains filtered or unexported fields
}

Volumes are a Unity Catalog (UC) capability for accessing, storing, governing, organizing and processing files. Use cases include running machine learning on unstructured data such as image, audio, video, or PDF files, organizing data sets during the data exploration stages in data science, working with libraries that require access to the local file system on cluster machines, storing library and config files of arbitrary formats such as .whl or .txt centrally and providing secure access across workspaces to it, or transforming and querying non-tabular data files in ETL.

func NewVolumes

func NewVolumes(client *client.DatabricksClient) *VolumesAPI

func (*VolumesAPI) Create

Create a Volume.

Creates a new volume.

The user could create either an external volume or a managed volume. An external volume will be created in the specified external location, while a managed volume will be located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore.

For the volume creation to succeed, the user must satisfy following conditions: - The caller must be a metastore admin, or be the owner of the parent catalog and schema, or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller must have **CREATE VOLUME** privilege on the parent schema.

For an external volume, following conditions also need to satisfy - The caller must have **CREATE EXTERNAL VOLUME** privilege on the external location. - There are no other tables, nor volumes existing in the specified storage location. - The specified storage location is not under the location of other tables, nor volumes, or catalogs or schemas.

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

storageCredential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
	Comment: "created via SDK",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storageCredential)

externalLocation, err := w.ExternalLocations.Create(ctx, catalog.CreateExternalLocation{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CredentialName: storageCredential.Name,
	Comment:        "created via SDK",
	Url:            "s3://" + os.Getenv("TEST_BUCKET") + "/" + fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", externalLocation)

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

createdVolume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{
	CatalogName:     createdCatalog.Name,
	SchemaName:      createdSchema.Name,
	Name:            fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageLocation: externalLocation.Url,
	VolumeType:      catalog.VolumeTypeExternal,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdVolume)

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Volumes.DeleteByFullNameArg(ctx, createdVolume.FullName)
if err != nil {
	panic(err)
}
Output:

func (*VolumesAPI) Delete

func (a *VolumesAPI) Delete(ctx context.Context, request DeleteVolumeRequest) error

Delete a Volume.

Deletes a volume from the specified parent catalog and schema.

The caller must be a metastore admin or an owner of the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

func (*VolumesAPI) DeleteByFullNameArg

func (a *VolumesAPI) DeleteByFullNameArg(ctx context.Context, fullNameArg string) error

Delete a Volume.

Deletes a volume from the specified parent catalog and schema.

The caller must be a metastore admin or an owner of the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

func (*VolumesAPI) GetByName

func (a *VolumesAPI) GetByName(ctx context.Context, name string) (*VolumeInfo, error)

GetByName calls VolumesAPI.VolumeInfoNameToVolumeIdMap and returns a single VolumeInfo.

Returns an error if there's more than one VolumeInfo with the same .Name.

Note: All VolumeInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*VolumesAPI) Impl

func (a *VolumesAPI) Impl() VolumesService

Impl returns low-level Volumes API implementation

func (*VolumesAPI) ListAll

func (a *VolumesAPI) ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error)

List Volumes.

Gets an array of all volumes for the current metastore under the parent catalog and schema.

The returned volumes are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the volumes. A regular user needs to be the owner or have the **READ VOLUME** privilege on the volume to recieve the volumes in the response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

There is no guarantee of a specific ordering of the elements in the array.

This method is generated by Databricks SDK Code Generator.

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

allVolumes, err := w.Volumes.ListAll(ctx, catalog.ListVolumesRequest{
	CatalogName: createdCatalog.Name,
	SchemaName:  createdSchema.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", allVolumes)

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*VolumesAPI) Read

func (a *VolumesAPI) Read(ctx context.Context, request ReadVolumeRequest) (*VolumeInfo, error)

Get a Volume.

Gets a volume from the metastore for a specific catalog and schema.

The caller must be a metastore admin or an owner of (or have the **READ VOLUME** privilege on) the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

storageCredential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
	Comment: "created via SDK",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storageCredential)

externalLocation, err := w.ExternalLocations.Create(ctx, catalog.CreateExternalLocation{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CredentialName: storageCredential.Name,
	Comment:        "created via SDK",
	Url:            "s3://" + os.Getenv("TEST_BUCKET") + "/" + fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", externalLocation)

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

createdVolume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{
	CatalogName:     createdCatalog.Name,
	SchemaName:      createdSchema.Name,
	Name:            fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageLocation: externalLocation.Url,
	VolumeType:      catalog.VolumeTypeExternal,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdVolume)

loadedVolume, err := w.Volumes.ReadByFullNameArg(ctx, createdVolume.FullName)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", loadedVolume)

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Volumes.DeleteByFullNameArg(ctx, createdVolume.FullName)
if err != nil {
	panic(err)
}
Output:

func (*VolumesAPI) ReadByFullNameArg

func (a *VolumesAPI) ReadByFullNameArg(ctx context.Context, fullNameArg string) (*VolumeInfo, error)

Get a Volume.

Gets a volume from the metastore for a specific catalog and schema.

The caller must be a metastore admin or an owner of (or have the **READ VOLUME** privilege on) the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

func (*VolumesAPI) Update

Update a Volume.

Updates the specified volume under the specified parent catalog and schema.

The caller must be a metastore admin or an owner of the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema.

Currently only the name, the owner or the comment of the volume could be updated.

Example (Volumes)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

storageCredential, err := w.StorageCredentials.Create(ctx, catalog.CreateStorageCredential{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsIamRole: &catalog.AwsIamRole{
		RoleArn: os.Getenv("TEST_METASTORE_DATA_ACCESS_ARN"),
	},
	Comment: "created via SDK",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storageCredential)

externalLocation, err := w.ExternalLocations.Create(ctx, catalog.CreateExternalLocation{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CredentialName: storageCredential.Name,
	Comment:        "created via SDK",
	Url:            "s3://" + os.Getenv("TEST_BUCKET") + "/" + fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", externalLocation)

createdCatalog, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdCatalog)

createdSchema, err := w.Schemas.Create(ctx, catalog.CreateSchema{
	Name:        fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	CatalogName: createdCatalog.Name,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdSchema)

createdVolume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{
	CatalogName:     createdCatalog.Name,
	SchemaName:      createdSchema.Name,
	Name:            fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	StorageLocation: externalLocation.Url,
	VolumeType:      catalog.VolumeTypeExternal,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", createdVolume)

loadedVolume, err := w.Volumes.ReadByFullNameArg(ctx, createdVolume.FullName)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", loadedVolume)

_, err = w.Volumes.Update(ctx, catalog.UpdateVolumeRequestContent{
	FullNameArg: loadedVolume.FullName,
	Comment:     "Updated volume comment",
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Schemas.DeleteByFullName(ctx, createdSchema.FullName)
if err != nil {
	panic(err)
}
err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  createdCatalog.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
err = w.Volumes.DeleteByFullNameArg(ctx, createdVolume.FullName)
if err != nil {
	panic(err)
}
Output:

func (*VolumesAPI) VolumeInfoNameToVolumeIdMap

func (a *VolumesAPI) VolumeInfoNameToVolumeIdMap(ctx context.Context, request ListVolumesRequest) (map[string]string, error)

VolumeInfoNameToVolumeIdMap calls VolumesAPI.ListAll and creates a map of results with VolumeInfo.Name as key and VolumeInfo.VolumeId as value.

Returns an error if there's more than one VolumeInfo with the same .Name.

Note: All VolumeInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*VolumesAPI) WithImpl

func (a *VolumesAPI) WithImpl(impl VolumesService) *VolumesAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type VolumesService

type VolumesService interface {

	// Create a Volume.
	//
	// Creates a new volume.
	//
	// The user could create either an external volume or a managed volume. An
	// external volume will be created in the specified external location, while
	// a managed volume will be located in the default location which is
	// specified by the parent schema, or the parent catalog, or the Metastore.
	//
	// For the volume creation to succeed, the user must satisfy following
	// conditions: - The caller must be a metastore admin, or be the owner of
	// the parent catalog and schema, or have the **USE_CATALOG** privilege on
	// the parent catalog and the **USE_SCHEMA** privilege on the parent schema.
	// - The caller must have **CREATE VOLUME** privilege on the parent schema.
	//
	// For an external volume, following conditions also need to satisfy - The
	// caller must have **CREATE EXTERNAL VOLUME** privilege on the external
	// location. - There are no other tables, nor volumes existing in the
	// specified storage location. - The specified storage location is not under
	// the location of other tables, nor volumes, or catalogs or schemas.
	Create(ctx context.Context, request CreateVolumeRequestContent) (*VolumeInfo, error)

	// Delete a Volume.
	//
	// Deletes a volume from the specified parent catalog and schema.
	//
	// The caller must be a metastore admin or an owner of the volume. For the
	// latter case, the caller must also be the owner or have the
	// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
	// privilege on the parent schema.
	Delete(ctx context.Context, request DeleteVolumeRequest) error

	// List Volumes.
	//
	// Gets an array of all volumes for the current metastore under the parent
	// catalog and schema.
	//
	// The returned volumes are filtered based on the privileges of the calling
	// user. For example, the metastore admin is able to list all the volumes. A
	// regular user needs to be the owner or have the **READ VOLUME** privilege
	// on the volume to recieve the volumes in the response. For the latter
	// case, the caller must also be the owner or have the **USE_CATALOG**
	// privilege on the parent catalog and the **USE_SCHEMA** privilege on the
	// parent schema.
	//
	// There is no guarantee of a specific ordering of the elements in the
	// array.
	//
	// Use ListAll() to get all VolumeInfo instances
	List(ctx context.Context, request ListVolumesRequest) (*ListVolumesResponseContent, error)

	// Get a Volume.
	//
	// Gets a volume from the metastore for a specific catalog and schema.
	//
	// The caller must be a metastore admin or an owner of (or have the **READ
	// VOLUME** privilege on) the volume. For the latter case, the caller must
	// also be the owner or have the **USE_CATALOG** privilege on the parent
	// catalog and the **USE_SCHEMA** privilege on the parent schema.
	Read(ctx context.Context, request ReadVolumeRequest) (*VolumeInfo, error)

	// Update a Volume.
	//
	// Updates the specified volume under the specified parent catalog and
	// schema.
	//
	// The caller must be a metastore admin or an owner of the volume. For the
	// latter case, the caller must also be the owner or have the
	// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
	// privilege on the parent schema.
	//
	// Currently only the name, the owner or the comment of the volume could be
	// updated.
	Update(ctx context.Context, request UpdateVolumeRequestContent) (*VolumeInfo, error)
}

Volumes are a Unity Catalog (UC) capability for accessing, storing, governing, organizing and processing files. Use cases include running machine learning on unstructured data such as image, audio, video, or PDF files, organizing data sets during the data exploration stages in data science, working with libraries that require access to the local file system on cluster machines, storing library and config files of arbitrary formats such as .whl or .txt centrally and providing secure access across workspaces to it, or transforming and querying non-tabular data files in ETL.

type WorkspaceBindingsAPI

type WorkspaceBindingsAPI struct {
	// contains filtered or unexported fields
}

A catalog in Databricks can be configured as __OPEN__ or __ISOLATED__. An __OPEN__ catalog can be accessed from any workspace, while an __ISOLATED__ catalog can only be access from a configured list of workspaces.

A catalog's workspace bindings can be configured by a metastore admin or the owner of the catalog.

func NewWorkspaceBindings

func NewWorkspaceBindings(client *client.DatabricksClient) *WorkspaceBindingsAPI

func (*WorkspaceBindingsAPI) Get

Get catalog workspace bindings.

Gets workspace bindings of the catalog. The caller must be a metastore admin or an owner of the catalog.

Example (CatalogWorkspaceBindings)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

bindings, err := w.WorkspaceBindings.GetByName(ctx, created.Name)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", bindings)

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  created.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*WorkspaceBindingsAPI) GetByName

Get catalog workspace bindings.

Gets workspace bindings of the catalog. The caller must be a metastore admin or an owner of the catalog.

func (*WorkspaceBindingsAPI) Impl

Impl returns low-level WorkspaceBindings API implementation

func (*WorkspaceBindingsAPI) Update

Update catalog workspace bindings.

Updates workspace bindings of the catalog. The caller must be a metastore admin or an owner of the catalog.

Example (CatalogWorkspaceBindings)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

thisWorkspaceId := func(v string) int64 {
	i, err := strconv.ParseInt(v, 10, 64)
	if err != nil {
		panic(fmt.Sprintf("`%s` is not int64: %s", v, err))
	}
	return i
}(os.Getenv("THIS_WORKSPACE_ID"))

created, err := w.Catalogs.Create(ctx, catalog.CreateCatalog{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.WorkspaceBindings.Update(ctx, catalog.UpdateWorkspaceBindings{
	Name:             created.Name,
	AssignWorkspaces: []int64{thisWorkspaceId},
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Catalogs.Delete(ctx, catalog.DeleteCatalogRequest{
	Name:  created.Name,
	Force: true,
})
if err != nil {
	panic(err)
}
Output:

func (*WorkspaceBindingsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type WorkspaceBindingsService

type WorkspaceBindingsService interface {

	// Get catalog workspace bindings.
	//
	// Gets workspace bindings of the catalog. The caller must be a metastore
	// admin or an owner of the catalog.
	Get(ctx context.Context, request GetWorkspaceBindingRequest) (*CurrentWorkspaceBindings, error)

	// Update catalog workspace bindings.
	//
	// Updates workspace bindings of the catalog. The caller must be a metastore
	// admin or an owner of the catalog.
	Update(ctx context.Context, request UpdateWorkspaceBindings) (*CurrentWorkspaceBindings, error)
}

A catalog in Databricks can be configured as __OPEN__ or __ISOLATED__. An __OPEN__ catalog can be accessed from any workspace, while an __ISOLATED__ catalog can only be access from a configured list of workspaces.

A catalog's workspace bindings can be configured by a metastore admin or the owner of the catalog.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL