sql

package
v0.26.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 29, 2023 License: Apache-2.0 Imports: 9 Imported by: 19

Documentation

Overview

These APIs allow you to manage Alerts, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Query History, Query Visualizations, Statement Execution, Warehouses, etc.

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AccessControl

type AccessControl struct {
	GroupName string `json:"group_name,omitempty"`
	// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *
	// `CAN_MANAGE`: Can manage the query
	PermissionLevel PermissionLevel `json:"permission_level,omitempty"`

	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AccessControl) MarshalJSON added in v0.23.0

func (s AccessControl) MarshalJSON() ([]byte, error)

func (*AccessControl) UnmarshalJSON added in v0.23.0

func (s *AccessControl) UnmarshalJSON(b []byte) error

type Alert

type Alert struct {
	// Timestamp when the alert was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Alert ID.
	Id string `json:"id,omitempty"`
	// Timestamp when the alert was last triggered.
	LastTriggeredAt string `json:"last_triggered_at,omitempty"`
	// Name of the alert.
	Name string `json:"name,omitempty"`
	// Alert configuration options.
	Options *AlertOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`

	Query *AlertQuery `json:"query,omitempty"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`
	// State of the alert. Possible values are: `unknown` (yet to be evaluated),
	// `triggered` (evaluated and fulfilled trigger conditions), or `ok`
	// (evaluated and did not fulfill trigger conditions).
	State AlertState `json:"state,omitempty"`
	// Timestamp when the alert was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Alert) MarshalJSON added in v0.23.0

func (s Alert) MarshalJSON() ([]byte, error)

func (*Alert) UnmarshalJSON added in v0.23.0

func (s *Alert) UnmarshalJSON(b []byte) error

type AlertOptions

type AlertOptions struct {
	// Name of column in the query result to compare in alert evaluation.
	Column string `json:"column"`
	// Custom body of alert notification, if it exists. See [here] for custom
	// templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomBody string `json:"custom_body,omitempty"`
	// Custom subject of alert notification, if it exists. This includes email
	// subject, Slack notification header, etc. See [here] for custom templating
	// instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomSubject string `json:"custom_subject,omitempty"`
	// State that alert evaluates to when query result is empty.
	EmptyResultState AlertOptionsEmptyResultState `json:"empty_result_state,omitempty"`
	// Whether or not the alert is muted. If an alert is muted, it will not
	// notify users and notification destinations when triggered.
	Muted bool `json:"muted,omitempty"`
	// Operator used to compare in alert evaluation: `>`, `>=`, `<`, `<=`, `==`,
	// `!=`
	Op string `json:"op"`
	// Value used to compare in alert evaluation. Supported types include
	// strings (eg. 'foobar'), floats (eg. 123.4), and booleans (true).
	Value any `json:"value"`

	ForceSendFields []string `json:"-"`
}

Alert configuration options.

func (AlertOptions) MarshalJSON added in v0.23.0

func (s AlertOptions) MarshalJSON() ([]byte, error)

func (*AlertOptions) UnmarshalJSON added in v0.23.0

func (s *AlertOptions) UnmarshalJSON(b []byte) error

type AlertOptionsEmptyResultState added in v0.20.0

type AlertOptionsEmptyResultState string

State that alert evaluates to when query result is empty.

const AlertOptionsEmptyResultStateOk AlertOptionsEmptyResultState = `ok`
const AlertOptionsEmptyResultStateTriggered AlertOptionsEmptyResultState = `triggered`
const AlertOptionsEmptyResultStateUnknown AlertOptionsEmptyResultState = `unknown`

func (*AlertOptionsEmptyResultState) Set added in v0.20.0

Set raw string value and validate it against allowed values

func (*AlertOptionsEmptyResultState) String added in v0.20.0

String representation for fmt.Print

func (*AlertOptionsEmptyResultState) Type added in v0.20.0

Type always returns AlertOptionsEmptyResultState to satisfy [pflag.Value] interface

type AlertQuery added in v0.13.0

type AlertQuery struct {
	// The timestamp when this query was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Query ID.
	Id string `json:"id,omitempty"`
	// Indicates whether the query is trashed. Trashed queries can't be used in
	// dashboards, or appear in search results. If this boolean is `true`, the
	// `options` property for this query includes a `moved_to_trash_at`
	// timestamp. Trashed queries are permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether the query is a draft. Draft queries only appear in list views for
	// their owners. Visualizations from draft queries cannot appear on
	// dashboards.
	IsDraft bool `json:"is_draft,omitempty"`
	// Text parameter types are not safe from SQL injection for all types of
	// data source. Set this Boolean parameter to `true` if a query either does
	// not use any text type parameters or uses a data source type where text
	// type parameters are handled safely.
	IsSafe bool `json:"is_safe,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`

	Options *QueryOptions `json:"options,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// The timestamp at which this query was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`
	// The ID of the user who created this query.
	UserId int `json:"user_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AlertQuery) MarshalJSON added in v0.23.0

func (s AlertQuery) MarshalJSON() ([]byte, error)

func (*AlertQuery) UnmarshalJSON added in v0.23.0

func (s *AlertQuery) UnmarshalJSON(b []byte) error

type AlertState

type AlertState string

State of the alert. Possible values are: `unknown` (yet to be evaluated), `triggered` (evaluated and fulfilled trigger conditions), or `ok` (evaluated and did not fulfill trigger conditions).

const AlertStateOk AlertState = `ok`
const AlertStateTriggered AlertState = `triggered`
const AlertStateUnknown AlertState = `unknown`

func (*AlertState) Set

func (f *AlertState) Set(v string) error

Set raw string value and validate it against allowed values

func (*AlertState) String

func (f *AlertState) String() string

String representation for fmt.Print

func (*AlertState) Type

func (f *AlertState) Type() string

Type always returns AlertState to satisfy [pflag.Value] interface

type AlertsAPI

type AlertsAPI struct {
	// contains filtered or unexported fields
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewAlerts

func NewAlerts(client *client.DatabricksClient) *AlertsAPI

func (*AlertsAPI) AlertNameToIdMap

func (a *AlertsAPI) AlertNameToIdMap(ctx context.Context) (map[string]string, error)

AlertNameToIdMap calls AlertsAPI.List and creates a map of results with Alert.Name as key and Alert.Id as value.

Returns an error if there's more than one Alert with the same .Name.

Note: All Alert instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*AlertsAPI) Create

func (a *AlertsAPI) Create(ctx context.Context, request CreateAlert) (*Alert, error)

Create an alert.

Creates an alert. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies users or notification destinations if the condition was met.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) Delete

func (a *AlertsAPI) Delete(ctx context.Context, request DeleteAlertRequest) error

Delete an alert.

Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to the trash.

func (*AlertsAPI) DeleteByAlertId

func (a *AlertsAPI) DeleteByAlertId(ctx context.Context, alertId string) error

Delete an alert.

Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to the trash.

func (*AlertsAPI) Get

func (a *AlertsAPI) Get(ctx context.Context, request GetAlertRequest) (*Alert, error)

Get an alert.

Gets an alert.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

byId, err := w.Alerts.GetByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) GetByAlertId

func (a *AlertsAPI) GetByAlertId(ctx context.Context, alertId string) (*Alert, error)

Get an alert.

Gets an alert.

func (*AlertsAPI) GetByName

func (a *AlertsAPI) GetByName(ctx context.Context, name string) (*Alert, error)

GetByName calls AlertsAPI.AlertNameToIdMap and returns a single Alert.

Returns an error if there's more than one Alert with the same .Name.

Note: All Alert instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*AlertsAPI) Impl

func (a *AlertsAPI) Impl() AlertsService

Impl returns low-level Alerts API implementation

func (*AlertsAPI) List

func (a *AlertsAPI) List(ctx context.Context) ([]Alert, error)

Get alerts.

Gets a list of alerts.

func (*AlertsAPI) Update

func (a *AlertsAPI) Update(ctx context.Context, request EditAlert) error

Update an alert.

Updates an alert.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

err = w.Alerts.Update(ctx, sql.EditAlert{
	Options: sql.AlertOptions{
		Column: "1",
		Op:     "==",
		Value:  "1",
	},
	AlertId: alert.Id,
	Name:    fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	QueryId: query.Id,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteByAlertId(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) WithImpl

func (a *AlertsAPI) WithImpl(impl AlertsService) *AlertsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type AlertsService

type AlertsService interface {

	// Create an alert.
	//
	// Creates an alert. An alert is a Databricks SQL object that periodically
	// runs a query, evaluates a condition of its result, and notifies users or
	// notification destinations if the condition was met.
	Create(ctx context.Context, request CreateAlert) (*Alert, error)

	// Delete an alert.
	//
	// Deletes an alert. Deleted alerts are no longer accessible and cannot be
	// restored. **Note:** Unlike queries and dashboards, alerts cannot be moved
	// to the trash.
	Delete(ctx context.Context, request DeleteAlertRequest) error

	// Get an alert.
	//
	// Gets an alert.
	Get(ctx context.Context, request GetAlertRequest) (*Alert, error)

	// Get alerts.
	//
	// Gets a list of alerts.
	List(ctx context.Context) ([]Alert, error)

	// Update an alert.
	//
	// Updates an alert.
	Update(ctx context.Context, request EditAlert) error
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type BaseChunkInfo added in v0.20.0

type BaseChunkInfo struct {
	// The number of bytes in the result chunk.
	ByteCount int64 `json:"byte_count,omitempty"`
	// The position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// The number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// The starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`

	ForceSendFields []string `json:"-"`
}

Describes metadata for a particular chunk, within a result set; this structure is used both within a manifest, and when fetching individual chunk data or links.

func (BaseChunkInfo) MarshalJSON added in v0.23.0

func (s BaseChunkInfo) MarshalJSON() ([]byte, error)

func (*BaseChunkInfo) UnmarshalJSON added in v0.23.0

func (s *BaseChunkInfo) UnmarshalJSON(b []byte) error

type CancelExecutionRequest added in v0.3.0

type CancelExecutionRequest struct {
	StatementId string `json:"-" url:"-"`
}

Cancel statement execution

type Channel

type Channel struct {
	DbsqlVersion string `json:"dbsql_version,omitempty"`

	Name ChannelName `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Channel) MarshalJSON added in v0.23.0

func (s Channel) MarshalJSON() ([]byte, error)

func (*Channel) UnmarshalJSON added in v0.23.0

func (s *Channel) UnmarshalJSON(b []byte) error

type ChannelInfo

type ChannelInfo struct {
	// DBSQL Version the channel is mapped to
	DbsqlVersion string `json:"dbsql_version,omitempty"`
	// Name of the channel
	Name ChannelName `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

Channel information for the SQL warehouse at the time of query execution

func (ChannelInfo) MarshalJSON added in v0.23.0

func (s ChannelInfo) MarshalJSON() ([]byte, error)

func (*ChannelInfo) UnmarshalJSON added in v0.23.0

func (s *ChannelInfo) UnmarshalJSON(b []byte) error

type ChannelName

type ChannelName string
const ChannelNameChannelNameCurrent ChannelName = `CHANNEL_NAME_CURRENT`
const ChannelNameChannelNameCustom ChannelName = `CHANNEL_NAME_CUSTOM`
const ChannelNameChannelNamePreview ChannelName = `CHANNEL_NAME_PREVIEW`
const ChannelNameChannelNamePrevious ChannelName = `CHANNEL_NAME_PREVIOUS`
const ChannelNameChannelNameUnspecified ChannelName = `CHANNEL_NAME_UNSPECIFIED`

func (*ChannelName) Set

func (f *ChannelName) Set(v string) error

Set raw string value and validate it against allowed values

func (*ChannelName) String

func (f *ChannelName) String() string

String representation for fmt.Print

func (*ChannelName) Type

func (f *ChannelName) Type() string

Type always returns ChannelName to satisfy [pflag.Value] interface

type ColumnInfo added in v0.3.0

type ColumnInfo struct {
	// The name of the column.
	Name string `json:"name,omitempty"`
	// The ordinal position of the column (starting at position 0).
	Position int `json:"position,omitempty"`
	// The format of the interval type.
	TypeIntervalType string `json:"type_interval_type,omitempty"`
	// The name of the base data type. This doesn't include details for complex
	// types such as STRUCT, MAP or ARRAY.
	TypeName ColumnInfoTypeName `json:"type_name,omitempty"`
	// Specifies the number of digits in a number. This applies to the DECIMAL
	// type.
	TypePrecision int `json:"type_precision,omitempty"`
	// Specifies the number of digits to the right of the decimal point in a
	// number. This applies to the DECIMAL type.
	TypeScale int `json:"type_scale,omitempty"`
	// The full SQL type specification.
	TypeText string `json:"type_text,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ColumnInfo) MarshalJSON added in v0.23.0

func (s ColumnInfo) MarshalJSON() ([]byte, error)

func (*ColumnInfo) UnmarshalJSON added in v0.23.0

func (s *ColumnInfo) UnmarshalJSON(b []byte) error

type ColumnInfoTypeName added in v0.3.0

type ColumnInfoTypeName string

The name of the base data type. This doesn't include details for complex types such as STRUCT, MAP or ARRAY.

const ColumnInfoTypeNameArray ColumnInfoTypeName = `ARRAY`
const ColumnInfoTypeNameBinary ColumnInfoTypeName = `BINARY`
const ColumnInfoTypeNameBoolean ColumnInfoTypeName = `BOOLEAN`
const ColumnInfoTypeNameByte ColumnInfoTypeName = `BYTE`
const ColumnInfoTypeNameChar ColumnInfoTypeName = `CHAR`
const ColumnInfoTypeNameDate ColumnInfoTypeName = `DATE`
const ColumnInfoTypeNameDecimal ColumnInfoTypeName = `DECIMAL`
const ColumnInfoTypeNameDouble ColumnInfoTypeName = `DOUBLE`
const ColumnInfoTypeNameFloat ColumnInfoTypeName = `FLOAT`
const ColumnInfoTypeNameInt ColumnInfoTypeName = `INT`
const ColumnInfoTypeNameInterval ColumnInfoTypeName = `INTERVAL`
const ColumnInfoTypeNameLong ColumnInfoTypeName = `LONG`
const ColumnInfoTypeNameMap ColumnInfoTypeName = `MAP`
const ColumnInfoTypeNameNull ColumnInfoTypeName = `NULL`
const ColumnInfoTypeNameShort ColumnInfoTypeName = `SHORT`
const ColumnInfoTypeNameString ColumnInfoTypeName = `STRING`
const ColumnInfoTypeNameStruct ColumnInfoTypeName = `STRUCT`
const ColumnInfoTypeNameTimestamp ColumnInfoTypeName = `TIMESTAMP`
const ColumnInfoTypeNameUserDefinedType ColumnInfoTypeName = `USER_DEFINED_TYPE`

func (*ColumnInfoTypeName) Set added in v0.3.0

func (f *ColumnInfoTypeName) Set(v string) error

Set raw string value and validate it against allowed values

func (*ColumnInfoTypeName) String added in v0.3.0

func (f *ColumnInfoTypeName) String() string

String representation for fmt.Print

func (*ColumnInfoTypeName) Type added in v0.3.0

func (f *ColumnInfoTypeName) Type() string

Type always returns ColumnInfoTypeName to satisfy [pflag.Value] interface

type CreateAlert added in v0.3.0

type CreateAlert struct {
	// Name of the alert.
	Name string `json:"name"`
	// Alert configuration options.
	Options AlertOptions `json:"options"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// Query ID.
	QueryId string `json:"query_id"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateAlert) MarshalJSON added in v0.23.0

func (s CreateAlert) MarshalJSON() ([]byte, error)

func (*CreateAlert) UnmarshalJSON added in v0.23.0

func (s *CreateAlert) UnmarshalJSON(b []byte) error

type CreateDashboardRequest

type CreateDashboardRequest struct {
	// Indicates whether the dashboard filters are enabled
	DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"`
	// Indicates whether this query object should appear in the current user's
	// favorites list. The application uses this flag to determine whether or
	// not the "favorite star " should selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// The title of this dashboard that appears in list views and at the top of
	// the dashboard page.
	Name string `json:"name"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// Run as role
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	Tags []string `json:"tags,omitempty"`

	ForceSendFields []string `json:"-"`
}

Create a dashboard object

func (CreateDashboardRequest) MarshalJSON added in v0.23.0

func (s CreateDashboardRequest) MarshalJSON() ([]byte, error)

func (*CreateDashboardRequest) UnmarshalJSON added in v0.23.0

func (s *CreateDashboardRequest) UnmarshalJSON(b []byte) error

type CreateQueryVisualizationRequest added in v0.19.0

type CreateQueryVisualizationRequest struct {
	// A short description of this visualization. This is not displayed in the
	// UI.
	Description string `json:"description,omitempty"`
	// The name of the visualization that appears on dashboards and the query
	// screen.
	Name string `json:"name,omitempty"`
	// The options object varies widely from one visualization type to the next
	// and is unsupported. Databricks does not recommend modifying visualization
	// settings in JSON.
	Options any `json:"options"`
	// The identifier returned by :method:queries/create
	QueryId string `json:"query_id"`
	// The type of visualization: chart, table, pivot table, and so on.
	Type string `json:"type"`

	ForceSendFields []string `json:"-"`
}

Add visualization to a query

func (CreateQueryVisualizationRequest) MarshalJSON added in v0.23.0

func (s CreateQueryVisualizationRequest) MarshalJSON() ([]byte, error)

func (*CreateQueryVisualizationRequest) UnmarshalJSON added in v0.23.0

func (s *CreateQueryVisualizationRequest) UnmarshalJSON(b []byte) error

type CreateWarehouseRequest

type CreateWarehouseRequest struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType CreateWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateWarehouseRequest) MarshalJSON added in v0.23.0

func (s CreateWarehouseRequest) MarshalJSON() ([]byte, error)

func (*CreateWarehouseRequest) UnmarshalJSON added in v0.23.0

func (s *CreateWarehouseRequest) UnmarshalJSON(b []byte) error

type CreateWarehouseRequestWarehouseType added in v0.9.0

type CreateWarehouseRequestWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const CreateWarehouseRequestWarehouseTypeClassic CreateWarehouseRequestWarehouseType = `CLASSIC`
const CreateWarehouseRequestWarehouseTypePro CreateWarehouseRequestWarehouseType = `PRO`
const CreateWarehouseRequestWarehouseTypeTypeUnspecified CreateWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED`

func (*CreateWarehouseRequestWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*CreateWarehouseRequestWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*CreateWarehouseRequestWarehouseType) Type added in v0.9.0

Type always returns CreateWarehouseRequestWarehouseType to satisfy [pflag.Value] interface

type CreateWarehouseResponse

type CreateWarehouseResponse struct {
	// Id for the SQL warehouse. This value is unique across all SQL warehouses.
	Id string `json:"id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateWarehouseResponse) MarshalJSON added in v0.23.0

func (s CreateWarehouseResponse) MarshalJSON() ([]byte, error)

func (*CreateWarehouseResponse) UnmarshalJSON added in v0.23.0

func (s *CreateWarehouseResponse) UnmarshalJSON(b []byte) error

type CreateWidget added in v0.19.0

type CreateWidget struct {
	// Dashboard ID returned by :method:dashboards/create.
	DashboardId string `json:"dashboard_id"`

	Id string `json:"-" url:"-"`

	Options WidgetOptions `json:"options"`
	// If this is a textbox widget, the application displays this text. This
	// field is ignored if the widget contains a visualization in the
	// `visualization` field.
	Text string `json:"text,omitempty"`
	// Query Vizualization ID returned by :method:queryvisualizations/create.
	VisualizationId string `json:"visualization_id,omitempty"`
	// Width of a widget
	Width int `json:"width"`

	ForceSendFields []string `json:"-"`
}

func (CreateWidget) MarshalJSON added in v0.23.0

func (s CreateWidget) MarshalJSON() ([]byte, error)

func (*CreateWidget) UnmarshalJSON added in v0.23.0

func (s *CreateWidget) UnmarshalJSON(b []byte) error

type Dashboard

type Dashboard struct {
	// Whether the authenticated user can edit the query definition.
	CanEdit bool `json:"can_edit,omitempty"`
	// Timestamp when this dashboard was created.
	CreatedAt string `json:"created_at,omitempty"`
	// In the web application, query filters that share a name are coupled to a
	// single selection box if this value is `true`.
	DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"`
	// The ID for this dashboard.
	Id string `json:"id,omitempty"`
	// Indicates whether a dashboard is trashed. Trashed dashboards won't appear
	// in list views. If this boolean is `true`, the `options` property for this
	// dashboard includes a `moved_to_trash_at` timestamp. Items in trash are
	// permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether a dashboard is a draft. Draft dashboards only appear in list
	// views for their owners.
	IsDraft bool `json:"is_draft,omitempty"`
	// Indicates whether this query object appears in the current user's
	// favorites list. This flag determines whether the star icon for favorites
	// is selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// The title of the dashboard that appears in list views and at the top of
	// the dashboard page.
	Name string `json:"name,omitempty"`

	Options *DashboardOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *
	// `CAN_MANAGE`: Can manage the query
	PermissionTier PermissionLevel `json:"permission_tier,omitempty"`
	// URL slug. Usually mirrors the query name with dashes (`-`) instead of
	// spaces. Appears in the URL for this query.
	Slug string `json:"slug,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// Timestamp when this dashboard was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`
	// The ID of the user that created and owns this dashboard.
	UserId int `json:"user_id,omitempty"`

	Widgets []Widget `json:"widgets,omitempty"`

	ForceSendFields []string `json:"-"`
}

A JSON representing a dashboard containing widgets of visualizations and text boxes.

func (Dashboard) MarshalJSON added in v0.23.0

func (s Dashboard) MarshalJSON() ([]byte, error)

func (*Dashboard) UnmarshalJSON added in v0.23.0

func (s *Dashboard) UnmarshalJSON(b []byte) error

type DashboardOptions

type DashboardOptions struct {
	// The timestamp when this dashboard was moved to trash. Only present when
	// the `is_archived` property is `true`. Trashed items are deleted after
	// thirty days.
	MovedToTrashAt string `json:"moved_to_trash_at,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DashboardOptions) MarshalJSON added in v0.23.0

func (s DashboardOptions) MarshalJSON() ([]byte, error)

func (*DashboardOptions) UnmarshalJSON added in v0.23.0

func (s *DashboardOptions) UnmarshalJSON(b []byte) error

type DashboardWidgetsAPI added in v0.19.0

type DashboardWidgetsAPI struct {
	// contains filtered or unexported fields
}

This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace. Data structures may change over time.

func NewDashboardWidgets added in v0.19.0

func NewDashboardWidgets(client *client.DatabricksClient) *DashboardWidgetsAPI

func (*DashboardWidgetsAPI) Create added in v0.19.0

func (a *DashboardWidgetsAPI) Create(ctx context.Context, request CreateWidget) (*Widget, error)

Add widget to a dashboard.

func (*DashboardWidgetsAPI) Delete added in v0.19.0

Remove widget.

func (*DashboardWidgetsAPI) DeleteById added in v0.19.0

func (a *DashboardWidgetsAPI) DeleteById(ctx context.Context, id string) error

Remove widget.

func (*DashboardWidgetsAPI) Impl added in v0.19.0

Impl returns low-level DashboardWidgets API implementation

func (*DashboardWidgetsAPI) Update added in v0.19.0

func (a *DashboardWidgetsAPI) Update(ctx context.Context, request CreateWidget) (*Widget, error)

Update existing widget.

func (*DashboardWidgetsAPI) WithImpl added in v0.19.0

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type DashboardWidgetsService added in v0.19.0

type DashboardWidgetsService interface {

	// Add widget to a dashboard.
	Create(ctx context.Context, request CreateWidget) (*Widget, error)

	// Remove widget.
	Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error

	// Update existing widget.
	Update(ctx context.Context, request CreateWidget) (*Widget, error)
}

This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace. Data structures may change over time.

type DashboardsAPI

type DashboardsAPI struct {
	// contains filtered or unexported fields
}

In general, there is little need to modify dashboards using the API. However, it can be useful to use dashboard objects to look-up a collection of related query IDs. The API can also be used to duplicate multiple dashboards at once since you can get a dashboard definition with a GET request and then POST it to create a new one. Dashboards can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewDashboards

func NewDashboards(client *client.DatabricksClient) *DashboardsAPI

func (*DashboardsAPI) Create

func (a *DashboardsAPI) Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error)

Create a dashboard object.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) DashboardNameToIdMap

func (a *DashboardsAPI) DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error)

DashboardNameToIdMap calls DashboardsAPI.ListAll and creates a map of results with Dashboard.Name as key and Dashboard.Id as value.

Returns an error if there's more than one Dashboard with the same .Name.

Note: All Dashboard instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) Delete

func (a *DashboardsAPI) Delete(ctx context.Context, request DeleteDashboardRequest) error

Remove a dashboard.

Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) DeleteByDashboardId

func (a *DashboardsAPI) DeleteByDashboardId(ctx context.Context, dashboardId string) error

Remove a dashboard.

Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared.

func (*DashboardsAPI) Get

Retrieve a definition.

Returns a JSON representation of a dashboard object, including its visualization and query objects.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := w.Dashboards.GetByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) GetByDashboardId

func (a *DashboardsAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error)

Retrieve a definition.

Returns a JSON representation of a dashboard object, including its visualization and query objects.

func (*DashboardsAPI) GetByName

func (a *DashboardsAPI) GetByName(ctx context.Context, name string) (*Dashboard, error)

GetByName calls DashboardsAPI.DashboardNameToIdMap and returns a single Dashboard.

Returns an error if there's more than one Dashboard with the same .Name.

Note: All Dashboard instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) Impl

func (a *DashboardsAPI) Impl() DashboardsService

Impl returns low-level Dashboards API implementation

func (*DashboardsAPI) List added in v0.24.0

Get dashboard objects.

Fetch a paginated list of dashboard objects.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) ListAll

func (a *DashboardsAPI) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error)

Get dashboard objects.

Fetch a paginated list of dashboard objects.

This method is generated by Databricks SDK Code Generator.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Dashboards.ListAll(ctx, sql.ListDashboardsRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*DashboardsAPI) Restore

func (a *DashboardsAPI) Restore(ctx context.Context, request RestoreDashboardRequest) error

Restore a dashboard.

A restored dashboard appears in list views and searches and can be shared.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.CreateDashboardRequest{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Dashboards.Restore(ctx, sql.RestoreDashboardRequest{
	DashboardId: created.Id,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) WithImpl

func (a *DashboardsAPI) WithImpl(impl DashboardsService) *DashboardsAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type DashboardsService

type DashboardsService interface {

	// Create a dashboard object.
	Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error)

	// Remove a dashboard.
	//
	// Moves a dashboard to the trash. Trashed dashboards do not appear in list
	// views or searches, and cannot be shared.
	Delete(ctx context.Context, request DeleteDashboardRequest) error

	// Retrieve a definition.
	//
	// Returns a JSON representation of a dashboard object, including its
	// visualization and query objects.
	Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error)

	// Get dashboard objects.
	//
	// Fetch a paginated list of dashboard objects.
	//
	// Use ListAll() to get all Dashboard instances, which will iterate over every result page.
	List(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error)

	// Restore a dashboard.
	//
	// A restored dashboard appears in list views and searches and can be
	// shared.
	Restore(ctx context.Context, request RestoreDashboardRequest) error
}

In general, there is little need to modify dashboards using the API. However, it can be useful to use dashboard objects to look-up a collection of related query IDs. The API can also be used to duplicate multiple dashboards at once since you can get a dashboard definition with a GET request and then POST it to create a new one. Dashboards can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type DataSource

type DataSource struct {
	// Data source ID.
	Id string `json:"id,omitempty"`
	// The string name of this data source / SQL warehouse as it appears in the
	// Databricks SQL web application.
	Name string `json:"name,omitempty"`
	// Reserved for internal use.
	PauseReason string `json:"pause_reason,omitempty"`
	// Reserved for internal use.
	Paused int `json:"paused,omitempty"`
	// Reserved for internal use.
	SupportsAutoLimit bool `json:"supports_auto_limit,omitempty"`
	// Reserved for internal use.
	Syntax string `json:"syntax,omitempty"`
	// The type of data source. For SQL warehouses, this will be
	// `databricks_internal`.
	Type string `json:"type,omitempty"`
	// Reserved for internal use.
	ViewOnly bool `json:"view_only,omitempty"`
	// The ID of the associated SQL warehouse, if this data source is backed by
	// a SQL warehouse.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

A JSON object representing a DBSQL data source / SQL warehouse.

func (DataSource) MarshalJSON added in v0.23.0

func (s DataSource) MarshalJSON() ([]byte, error)

func (*DataSource) UnmarshalJSON added in v0.23.0

func (s *DataSource) UnmarshalJSON(b []byte) error

type DataSourcesAPI

type DataSourcesAPI struct {
	// contains filtered or unexported fields
}

This API is provided to assist you in making new query objects. When creating a query object, you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. If you don't already know the `data_source_id` for your desired SQL warehouse, this API will help you find it.

This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL.

func NewDataSources

func NewDataSources(client *client.DatabricksClient) *DataSourcesAPI

func (*DataSourcesAPI) DataSourceNameToIdMap

func (a *DataSourcesAPI) DataSourceNameToIdMap(ctx context.Context) (map[string]string, error)

DataSourceNameToIdMap calls DataSourcesAPI.List and creates a map of results with DataSource.Name as key and DataSource.Id as value.

Returns an error if there's more than one DataSource with the same .Name.

Note: All DataSource instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*DataSourcesAPI) GetByName

func (a *DataSourcesAPI) GetByName(ctx context.Context, name string) (*DataSource, error)

GetByName calls DataSourcesAPI.DataSourceNameToIdMap and returns a single DataSource.

Returns an error if there's more than one DataSource with the same .Name.

Note: All DataSource instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*DataSourcesAPI) Impl

Impl returns low-level DataSources API implementation

func (*DataSourcesAPI) List

func (a *DataSourcesAPI) List(ctx context.Context) ([]DataSource, error)

Get a list of SQL warehouses.

Retrieves a full list of SQL warehouses available in this workspace. All fields that appear in this API response are enumerated for clarity. However, you need only a SQL warehouse's `id` to create new queries against it.

func (*DataSourcesAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type DataSourcesService

type DataSourcesService interface {

	// Get a list of SQL warehouses.
	//
	// Retrieves a full list of SQL warehouses available in this workspace. All
	// fields that appear in this API response are enumerated for clarity.
	// However, you need only a SQL warehouse's `id` to create new queries
	// against it.
	List(ctx context.Context) ([]DataSource, error)
}

This API is provided to assist you in making new query objects. When creating a query object, you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. If you don't already know the `data_source_id` for your desired SQL warehouse, this API will help you find it.

This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL.

type DbsqlPermissionsAPI

type DbsqlPermissionsAPI struct {
	// contains filtered or unexported fields
}

The SQL Permissions API is similar to the endpoints of the :method:permissions/set. However, this exposes only one endpoint, which gets the Access Control List for a given object. You cannot modify any permissions using this API.

There are three levels of permission:

- `CAN_VIEW`: Allows read-only access

- `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`)

- `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`)

func NewDbsqlPermissions

func NewDbsqlPermissions(client *client.DatabricksClient) *DbsqlPermissionsAPI

func (*DbsqlPermissionsAPI) Get

Get object ACL.

Gets a JSON representation of the access control list (ACL) for a specified object.

func (*DbsqlPermissionsAPI) GetByObjectTypeAndObjectId

func (a *DbsqlPermissionsAPI) GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error)

Get object ACL.

Gets a JSON representation of the access control list (ACL) for a specified object.

func (*DbsqlPermissionsAPI) Impl

Impl returns low-level DbsqlPermissions API implementation

func (*DbsqlPermissionsAPI) Set

Set object ACL.

Sets the access control list (ACL) for a specified object. This operation will complete rewrite the ACL.

func (*DbsqlPermissionsAPI) TransferOwnership

func (a *DbsqlPermissionsAPI) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error)

Transfer object ownership.

Transfers ownership of a dashboard, query, or alert to an active user. Requires an admin API key.

func (*DbsqlPermissionsAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type DbsqlPermissionsService

type DbsqlPermissionsService interface {

	// Get object ACL.
	//
	// Gets a JSON representation of the access control list (ACL) for a
	// specified object.
	Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error)

	// Set object ACL.
	//
	// Sets the access control list (ACL) for a specified object. This operation
	// will complete rewrite the ACL.
	Set(ctx context.Context, request SetRequest) (*SetResponse, error)

	// Transfer object ownership.
	//
	// Transfers ownership of a dashboard, query, or alert to an active user.
	// Requires an admin API key.
	TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error)
}

The SQL Permissions API is similar to the endpoints of the :method:permissions/set. However, this exposes only one endpoint, which gets the Access Control List for a given object. You cannot modify any permissions using this API.

There are three levels of permission:

- `CAN_VIEW`: Allows read-only access

- `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`)

- `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`)

type DeleteAlertRequest

type DeleteAlertRequest struct {
	AlertId string `json:"-" url:"-"`
}

Delete an alert

type DeleteDashboardRequest

type DeleteDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Remove a dashboard

type DeleteDashboardWidgetRequest added in v0.19.0

type DeleteDashboardWidgetRequest struct {
	Id string `json:"-" url:"-"`
}

Remove widget

type DeleteQueryRequest

type DeleteQueryRequest struct {
	QueryId string `json:"-" url:"-"`
}

Delete a query

type DeleteQueryVisualizationRequest added in v0.19.0

type DeleteQueryVisualizationRequest struct {
	Id string `json:"-" url:"-"`
}

Remove visualization

type DeleteWarehouseRequest

type DeleteWarehouseRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Delete a warehouse

type Disposition added in v0.3.0

type Disposition string

The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`.

Statements executed with `INLINE` disposition will return result data inline, in `JSON_ARRAY` format, in a series of chunks. If a given statement produces a result set with a size larger than 25 MiB, that statement execution is aborted, and no result set will be available.

**NOTE** Byte limits are computed based upon internal representations of the result set data, and might not match the sizes visible in JSON responses.

Statements executed with `EXTERNAL_LINKS` disposition will return result data as external links: URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS` disposition allows statements to generate arbitrarily sized result sets for fetching up to 100 GiB. The resulting links have two important properties:

1. They point to resources _external_ to the Databricks compute; therefore any associated authentication information (typically a personal access token, OAuth token, or similar) _must be removed_ when fetching from these links.

2. These are presigned URLs with a specific expiration, indicated in the response. The behavior when attempting to use an expired link is cloud specific.

const DispositionExternalLinks Disposition = `EXTERNAL_LINKS`
const DispositionInline Disposition = `INLINE`

func (*Disposition) Set added in v0.3.0

func (f *Disposition) Set(v string) error

Set raw string value and validate it against allowed values

func (*Disposition) String added in v0.3.0

func (f *Disposition) String() string

String representation for fmt.Print

func (*Disposition) Type added in v0.3.0

func (f *Disposition) Type() string

Type always returns Disposition to satisfy [pflag.Value] interface

type EditAlert

type EditAlert struct {
	AlertId string `json:"-" url:"-"`
	// Name of the alert.
	Name string `json:"name"`
	// Alert configuration options.
	Options AlertOptions `json:"options"`
	// Query ID.
	QueryId string `json:"query_id"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EditAlert) MarshalJSON added in v0.23.0

func (s EditAlert) MarshalJSON() ([]byte, error)

func (*EditAlert) UnmarshalJSON added in v0.23.0

func (s *EditAlert) UnmarshalJSON(b []byte) error

type EditWarehouseRequest

type EditWarehouseRequest struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute.
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Required. Id of the warehouse to configure.
	Id string `json:"-" url:"-"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType EditWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EditWarehouseRequest) MarshalJSON added in v0.23.0

func (s EditWarehouseRequest) MarshalJSON() ([]byte, error)

func (*EditWarehouseRequest) UnmarshalJSON added in v0.23.0

func (s *EditWarehouseRequest) UnmarshalJSON(b []byte) error

type EditWarehouseRequestWarehouseType added in v0.9.0

type EditWarehouseRequestWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const EditWarehouseRequestWarehouseTypeClassic EditWarehouseRequestWarehouseType = `CLASSIC`
const EditWarehouseRequestWarehouseTypePro EditWarehouseRequestWarehouseType = `PRO`
const EditWarehouseRequestWarehouseTypeTypeUnspecified EditWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED`

func (*EditWarehouseRequestWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*EditWarehouseRequestWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*EditWarehouseRequestWarehouseType) Type added in v0.9.0

Type always returns EditWarehouseRequestWarehouseType to satisfy [pflag.Value] interface

type EndpointConfPair

type EndpointConfPair struct {
	Key string `json:"key,omitempty"`

	Value string `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointConfPair) MarshalJSON added in v0.23.0

func (s EndpointConfPair) MarshalJSON() ([]byte, error)

func (*EndpointConfPair) UnmarshalJSON added in v0.23.0

func (s *EndpointConfPair) UnmarshalJSON(b []byte) error

type EndpointHealth

type EndpointHealth struct {
	// Details about errors that are causing current degraded/failed status.
	Details string `json:"details,omitempty"`
	// The reason for failure to bring up clusters for this warehouse. This is
	// available when status is 'FAILED' and sometimes when it is DEGRADED.
	FailureReason *TerminationReason `json:"failure_reason,omitempty"`
	// Deprecated. split into summary and details for security
	Message string `json:"message,omitempty"`
	// Health status of the warehouse.
	Status Status `json:"status,omitempty"`
	// A short summary of the health status in case of degraded/failed
	// warehouses.
	Summary string `json:"summary,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointHealth) MarshalJSON added in v0.23.0

func (s EndpointHealth) MarshalJSON() ([]byte, error)

func (*EndpointHealth) UnmarshalJSON added in v0.23.0

func (s *EndpointHealth) UnmarshalJSON(b []byte) error

type EndpointInfo

type EndpointInfo struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Optional health status. Assume the warehouse is healthy if this field is
	// not set.
	Health *EndpointHealth `json:"health,omitempty"`
	// unique identifier for warehouse
	Id string `json:"id,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// the jdbc connection string for this warehouse
	JdbcUrl string `json:"jdbc_url,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// current number of active sessions for the warehouse
	NumActiveSessions int64 `json:"num_active_sessions,omitempty"`
	// current number of clusters running for the service
	NumClusters int `json:"num_clusters,omitempty"`
	// ODBC parameters for the SQL warehouse
	OdbcParams *OdbcParams `json:"odbc_params,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// State of the warehouse
	State State `json:"state,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType EndpointInfoWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointInfo) MarshalJSON added in v0.23.0

func (s EndpointInfo) MarshalJSON() ([]byte, error)

func (*EndpointInfo) UnmarshalJSON added in v0.23.0

func (s *EndpointInfo) UnmarshalJSON(b []byte) error

type EndpointInfoWarehouseType added in v0.9.0

type EndpointInfoWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const EndpointInfoWarehouseTypeClassic EndpointInfoWarehouseType = `CLASSIC`
const EndpointInfoWarehouseTypePro EndpointInfoWarehouseType = `PRO`
const EndpointInfoWarehouseTypeTypeUnspecified EndpointInfoWarehouseType = `TYPE_UNSPECIFIED`

func (*EndpointInfoWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*EndpointInfoWarehouseType) String added in v0.9.0

func (f *EndpointInfoWarehouseType) String() string

String representation for fmt.Print

func (*EndpointInfoWarehouseType) Type added in v0.9.0

Type always returns EndpointInfoWarehouseType to satisfy [pflag.Value] interface

type EndpointTagPair

type EndpointTagPair struct {
	Key string `json:"key,omitempty"`

	Value string `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointTagPair) MarshalJSON added in v0.23.0

func (s EndpointTagPair) MarshalJSON() ([]byte, error)

func (*EndpointTagPair) UnmarshalJSON added in v0.23.0

func (s *EndpointTagPair) UnmarshalJSON(b []byte) error

type EndpointTags

type EndpointTags struct {
	CustomTags []EndpointTagPair `json:"custom_tags,omitempty"`
}

type ExecuteStatementRequest added in v0.3.0

type ExecuteStatementRequest struct {
	// Applies the given byte limit to the statement's result size. Byte counts
	// are based on internal data representations and might not match the final
	// size in the requested `format`. If the result was truncated due to the
	// byte limit, then `truncated` in the response is set to `true`. When using
	// `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is
	// applied if `byte_limit` is not explcitly set.
	ByteLimit int64 `json:"byte_limit,omitempty"`
	// Sets default catalog for statement execution, similar to [`USE CATALOG`]
	// in SQL.
	//
	// [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html
	Catalog string `json:"catalog,omitempty"`
	// The fetch disposition provides two modes of fetching results: `INLINE`
	// and `EXTERNAL_LINKS`.
	//
	// Statements executed with `INLINE` disposition will return result data
	// inline, in `JSON_ARRAY` format, in a series of chunks. If a given
	// statement produces a result set with a size larger than 25 MiB, that
	// statement execution is aborted, and no result set will be available.
	//
	// **NOTE** Byte limits are computed based upon internal representations of
	// the result set data, and might not match the sizes visible in JSON
	// responses.
	//
	// Statements executed with `EXTERNAL_LINKS` disposition will return result
	// data as external links: URLs that point to cloud storage internal to the
	// workspace. Using `EXTERNAL_LINKS` disposition allows statements to
	// generate arbitrarily sized result sets for fetching up to 100 GiB. The
	// resulting links have two important properties:
	//
	// 1. They point to resources _external_ to the Databricks compute;
	// therefore any associated authentication information (typically a personal
	// access token, OAuth token, or similar) _must be removed_ when fetching
	// from these links.
	//
	// 2. These are presigned URLs with a specific expiration, indicated in the
	// response. The behavior when attempting to use an expired link is cloud
	// specific.
	Disposition Disposition `json:"disposition,omitempty"`
	// Statement execution supports three result formats: `JSON_ARRAY`
	// (default), `ARROW_STREAM`, and `CSV`.
	//
	// Important: The formats `ARROW_STREAM` and `CSV` are supported only with
	// `EXTERNAL_LINKS` disposition. `JSON_ARRAY` is supported in `INLINE` and
	// `EXTERNAL_LINKS` disposition.
	//
	// When specifying `format=JSON_ARRAY`, result data will be formatted as an
	// array of arrays of values, where each value is either the *string
	// representation* of a value, or `null`. For example, the output of `SELECT
	// concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)`
	// would look like this:
	//
	// “` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null
	// ], ] “`
	//
	// When specifying `format=JSON_ARRAY` and `disposition=EXTERNAL_LINKS`,
	// each chunk in the result contains compact JSON with no indentation or
	// extra whitespace.
	//
	// When specifying `format=ARROW_STREAM` and `disposition=EXTERNAL_LINKS`,
	// each chunk in the result will be formatted as Apache Arrow Stream. See
	// the [Apache Arrow streaming format].
	//
	// When specifying `format=CSV` and `disposition=EXTERNAL_LINKS`, each chunk
	// in the result will be a CSV according to [RFC 4180] standard. All the
	// columns values will have *string representation* similar to the
	// `JSON_ARRAY` format, and `null` values will be encoded as “null”.
	// Only the first chunk in the result would contain a header row with column
	// names. For example, the output of `SELECT concat('id-', id) AS strCol, id
	// AS intCol, null as nullCol FROM range(3)` would look like this:
	//
	// “` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null “`
	//
	// [Apache Arrow streaming format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format
	// [RFC 4180]: https://www.rfc-editor.org/rfc/rfc4180
	Format Format `json:"format,omitempty"`
	// When `wait_timeout > 0s`, the call will block up to the specified time.
	// If the statement execution doesn't finish within this time,
	// `on_wait_timeout` determines whether the execution should continue or be
	// canceled. When set to `CONTINUE`, the statement execution continues
	// asynchronously and the call returns a statement ID which can be used for
	// polling with :method:statementexecution/getStatement. When set to
	// `CANCEL`, the statement execution is canceled and the call returns with a
	// `CANCELED` state.
	OnWaitTimeout ExecuteStatementRequestOnWaitTimeout `json:"on_wait_timeout,omitempty"`
	// A list of parameters to pass into a SQL statement containing parameter
	// markers. A parameter consists of a name, a value, and optionally a type.
	// To represent a NULL value, the `value` field may be omitted or set to
	// `null` explicitly. If the `type` field is omitted, the value is
	// interpreted as a string.
	//
	// If the type is given, parameters will be checked for type correctness
	// according to the given type. A value is correct if the provided string
	// can be converted to the requested type using the `cast` function. The
	// exact semantics are described in the section [`cast` function] of the SQL
	// language reference.
	//
	// For example, the following statement contains two parameters, `my_name`
	// and `my_date`:
	//
	// SELECT * FROM my_table WHERE name = :my_name AND date = :my_date
	//
	// The parameters can be passed in the request body as follows:
	//
	// { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND
	// date = :my_date", "parameters": [ { "name": "my_name", "value": "the
	// name" }, { "name": "my_date", "value": "2020-01-01", "type": "DATE" } ] }
	//
	// Currently, positional parameters denoted by a `?` marker are not
	// supported by the Databricks SQL Statement Execution API.
	//
	// Also see the section [Parameter markers] of the SQL language reference.
	//
	// [Parameter markers]: https://docs.databricks.com/sql/language-manual/sql-ref-parameter-marker.html
	// [`cast` function]: https://docs.databricks.com/sql/language-manual/functions/cast.html
	Parameters []StatementParameterListItem `json:"parameters,omitempty"`
	// Applies the given row limit to the statement's result set, but unlike the
	// `LIMIT` clause in SQL, it also sets the `truncated` field in the response
	// to indicate whether the result was trimmed due to the limit or not.
	RowLimit int64 `json:"row_limit,omitempty"`
	// Sets default schema for statement execution, similar to [`USE SCHEMA`] in
	// SQL.
	//
	// [`USE SCHEMA`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html
	Schema string `json:"schema,omitempty"`
	// The SQL statement to execute. The statement can optionally be
	// parameterized, see `parameters`.
	Statement string `json:"statement"`
	// The time in seconds the call will wait for the statement's result set as
	// `Ns`, where `N` can be set to 0 or to a value between 5 and 50.
	//
	// When set to `0s`, the statement will execute in asynchronous mode and the
	// call will not wait for the execution to finish. In this case, the call
	// returns directly with `PENDING` state and a statement ID which can be
	// used for polling with :method:statementexecution/getStatement.
	//
	// When set between 5 and 50 seconds, the call will behave synchronously up
	// to this timeout and wait for the statement execution to finish. If the
	// execution finishes within this time, the call returns immediately with a
	// manifest and result data (or a `FAILED` state in case of an execution
	// error). If the statement takes longer to execute, `on_wait_timeout`
	// determines what should happen after the timeout is reached.
	WaitTimeout string `json:"wait_timeout,omitempty"`
	// Warehouse upon which to execute a statement. See also [What are SQL
	// warehouses?](/sql/admin/warehouse-type.html)
	WarehouseId string `json:"warehouse_id"`

	ForceSendFields []string `json:"-"`
}

func (ExecuteStatementRequest) MarshalJSON added in v0.23.0

func (s ExecuteStatementRequest) MarshalJSON() ([]byte, error)

func (*ExecuteStatementRequest) UnmarshalJSON added in v0.23.0

func (s *ExecuteStatementRequest) UnmarshalJSON(b []byte) error

type ExecuteStatementRequestOnWaitTimeout added in v0.20.0

type ExecuteStatementRequestOnWaitTimeout string

When `wait_timeout > 0s`, the call will block up to the specified time. If the statement execution doesn't finish within this time, `on_wait_timeout` determines whether the execution should continue or be canceled. When set to `CONTINUE`, the statement execution continues asynchronously and the call returns a statement ID which can be used for polling with :method:statementexecution/getStatement. When set to `CANCEL`, the statement execution is canceled and the call returns with a `CANCELED` state.

const ExecuteStatementRequestOnWaitTimeoutCancel ExecuteStatementRequestOnWaitTimeout = `CANCEL`
const ExecuteStatementRequestOnWaitTimeoutContinue ExecuteStatementRequestOnWaitTimeout = `CONTINUE`

func (*ExecuteStatementRequestOnWaitTimeout) Set added in v0.20.0

Set raw string value and validate it against allowed values

func (*ExecuteStatementRequestOnWaitTimeout) String added in v0.20.0

String representation for fmt.Print

func (*ExecuteStatementRequestOnWaitTimeout) Type added in v0.20.0

Type always returns ExecuteStatementRequestOnWaitTimeout to satisfy [pflag.Value] interface

type ExecuteStatementResponse added in v0.3.0

type ExecuteStatementResponse struct {
	// The result manifest provides schema and metadata for the result set.
	Manifest *ResultManifest `json:"manifest,omitempty"`
	// Contains the result data of a single chunk when using `INLINE`
	// disposition. When using `EXTERNAL_LINKS` disposition, the array
	// `external_links` is used instead to provide presigned URLs to the result
	// data in cloud storage. Exactly one of these alternatives is used. (While
	// the `external_links` array prepares the API to return multiple links in a
	// single response. Currently only a single link is returned.)
	Result *ResultData `json:"result,omitempty"`
	// The statement ID is returned upon successfully submitting a SQL
	// statement, and is a required reference for all subsequent calls.
	StatementId string `json:"statement_id,omitempty"`
	// The status response includes execution state and if relevant, error
	// information.
	Status *StatementStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ExecuteStatementResponse) MarshalJSON added in v0.23.0

func (s ExecuteStatementResponse) MarshalJSON() ([]byte, error)

func (*ExecuteStatementResponse) UnmarshalJSON added in v0.23.0

func (s *ExecuteStatementResponse) UnmarshalJSON(b []byte) error
type ExternalLink struct {
	// The number of bytes in the result chunk.
	ByteCount int64 `json:"byte_count,omitempty"`
	// The position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// Indicates the date-time that the given external link will expire and
	// becomes invalid, after which point a new `external_link` must be
	// requested.
	Expiration string `json:"expiration,omitempty"`
	// A presigned URL pointing to a chunk of result data, hosted by an external
	// service, with a short expiration time (<= 15 minutes). As this URL
	// contains a temporary credential, it should be considered sensitive and
	// the client should expose this URL in a log.
	ExternalLink string `json:"external_link,omitempty"`
	// When fetching, provides the `chunk_index` for the _next_ chunk. If
	// absent, indicates there are no more chunks. The next chunk can be fetched
	// with a :method:statementexecution/getStatementResultChunkN request.
	NextChunkIndex int `json:"next_chunk_index,omitempty"`
	// When fetching, provides a link to fetch the _next_ chunk. If absent,
	// indicates there are no more chunks. This link is an absolute `path` to be
	// joined with your `$DATABRICKS_HOST`, and should be treated as an opaque
	// link. This is an alternative to using `next_chunk_index`.
	NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"`
	// The number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// The starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ExternalLink) MarshalJSON added in v0.23.0

func (s ExternalLink) MarshalJSON() ([]byte, error)

func (*ExternalLink) UnmarshalJSON added in v0.23.0

func (s *ExternalLink) UnmarshalJSON(b []byte) error

type Format added in v0.3.0

type Format string
const FormatArrowStream Format = `ARROW_STREAM`
const FormatCsv Format = `CSV`
const FormatJsonArray Format = `JSON_ARRAY`

func (*Format) Set added in v0.3.0

func (f *Format) Set(v string) error

Set raw string value and validate it against allowed values

func (*Format) String added in v0.3.0

func (f *Format) String() string

String representation for fmt.Print

func (*Format) Type added in v0.3.0

func (f *Format) Type() string

Type always returns Format to satisfy [pflag.Value] interface

type GetAlertRequest

type GetAlertRequest struct {
	AlertId string `json:"-" url:"-"`
}

Get an alert

type GetDashboardRequest

type GetDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Retrieve a definition

type GetDbsqlPermissionRequest

type GetDbsqlPermissionRequest struct {
	// Object ID. An ACL is returned for the object with this UUID.
	ObjectId string `json:"-" url:"-"`
	// The type of object permissions to check.
	ObjectType ObjectTypePlural `json:"-" url:"-"`
}

Get object ACL

type GetQueryRequest

type GetQueryRequest struct {
	QueryId string `json:"-" url:"-"`
}

Get a query definition.

type GetResponse

type GetResponse struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// An object's type and UUID, separated by a forward slash (/) character.
	ObjectId string `json:"object_id,omitempty"`
	// A singular noun object type.
	ObjectType ObjectType `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetResponse) MarshalJSON added in v0.23.0

func (s GetResponse) MarshalJSON() ([]byte, error)

func (*GetResponse) UnmarshalJSON added in v0.23.0

func (s *GetResponse) UnmarshalJSON(b []byte) error

type GetStatementRequest added in v0.3.0

type GetStatementRequest struct {
	StatementId string `json:"-" url:"-"`
}

Get status, manifest, and result first chunk

type GetStatementResponse added in v0.3.0

type GetStatementResponse struct {
	// The result manifest provides schema and metadata for the result set.
	Manifest *ResultManifest `json:"manifest,omitempty"`
	// Contains the result data of a single chunk when using `INLINE`
	// disposition. When using `EXTERNAL_LINKS` disposition, the array
	// `external_links` is used instead to provide presigned URLs to the result
	// data in cloud storage. Exactly one of these alternatives is used. (While
	// the `external_links` array prepares the API to return multiple links in a
	// single response. Currently only a single link is returned.)
	Result *ResultData `json:"result,omitempty"`
	// The statement ID is returned upon successfully submitting a SQL
	// statement, and is a required reference for all subsequent calls.
	StatementId string `json:"statement_id,omitempty"`
	// The status response includes execution state and if relevant, error
	// information.
	Status *StatementStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetStatementResponse) MarshalJSON added in v0.23.0

func (s GetStatementResponse) MarshalJSON() ([]byte, error)

func (*GetStatementResponse) UnmarshalJSON added in v0.23.0

func (s *GetStatementResponse) UnmarshalJSON(b []byte) error

type GetStatementResultChunkNRequest added in v0.3.0

type GetStatementResultChunkNRequest struct {
	ChunkIndex int `json:"-" url:"-"`

	StatementId string `json:"-" url:"-"`
}

Get result chunk by index

type GetWarehousePermissionLevelsRequest added in v0.15.0

type GetWarehousePermissionLevelsRequest struct {
	// The SQL warehouse for which to get or manage permissions.
	WarehouseId string `json:"-" url:"-"`
}

Get SQL warehouse permission levels

type GetWarehousePermissionLevelsResponse added in v0.15.0

type GetWarehousePermissionLevelsResponse struct {
	// Specific permission levels
	PermissionLevels []WarehousePermissionsDescription `json:"permission_levels,omitempty"`
}

type GetWarehousePermissionsRequest added in v0.15.0

type GetWarehousePermissionsRequest struct {
	// The SQL warehouse for which to get or manage permissions.
	WarehouseId string `json:"-" url:"-"`
}

Get SQL warehouse permissions

type GetWarehouseRequest

type GetWarehouseRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Get warehouse info

type GetWarehouseResponse

type GetWarehouseResponse struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Optional health status. Assume the warehouse is healthy if this field is
	// not set.
	Health *EndpointHealth `json:"health,omitempty"`
	// unique identifier for warehouse
	Id string `json:"id,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// the jdbc connection string for this warehouse
	JdbcUrl string `json:"jdbc_url,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// current number of active sessions for the warehouse
	NumActiveSessions int64 `json:"num_active_sessions,omitempty"`
	// current number of clusters running for the service
	NumClusters int `json:"num_clusters,omitempty"`
	// ODBC parameters for the SQL warehouse
	OdbcParams *OdbcParams `json:"odbc_params,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// State of the warehouse
	State State `json:"state,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType GetWarehouseResponseWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetWarehouseResponse) MarshalJSON added in v0.23.0

func (s GetWarehouseResponse) MarshalJSON() ([]byte, error)

func (*GetWarehouseResponse) UnmarshalJSON added in v0.23.0

func (s *GetWarehouseResponse) UnmarshalJSON(b []byte) error

type GetWarehouseResponseWarehouseType added in v0.9.0

type GetWarehouseResponseWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const GetWarehouseResponseWarehouseTypeClassic GetWarehouseResponseWarehouseType = `CLASSIC`
const GetWarehouseResponseWarehouseTypePro GetWarehouseResponseWarehouseType = `PRO`
const GetWarehouseResponseWarehouseTypeTypeUnspecified GetWarehouseResponseWarehouseType = `TYPE_UNSPECIFIED`

func (*GetWarehouseResponseWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*GetWarehouseResponseWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*GetWarehouseResponseWarehouseType) Type added in v0.9.0

Type always returns GetWarehouseResponseWarehouseType to satisfy [pflag.Value] interface

type GetWorkspaceWarehouseConfigResponse

type GetWorkspaceWarehouseConfigResponse struct {
	// Optional: Channel selection details
	Channel *Channel `json:"channel,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"`
	// Spark confs for external hive metastore configuration JSON serialized
	// size must be less than <= 512K
	DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"`
	// List of Warehouse Types allowed in this workspace (limits allowed value
	// of the type field in CreateWarehouse and EditWarehouse). Note: Some types
	// cannot be disabled, they don't need to be specified in
	// SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing
	// warehouses to be converted to another type. Used by frontend to save
	// specific type availability in the warehouse create and edit form UI.
	EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"`
	// GCP only: Google Service Account used to pass to cluster to access Google
	// Cloud Storage
	GoogleServiceAccount string `json:"google_service_account,omitempty"`
	// AWS Only: Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Security policy for warehouses
	SecurityPolicy GetWorkspaceWarehouseConfigResponseSecurityPolicy `json:"security_policy,omitempty"`
	// SQL configuration parameters
	SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetWorkspaceWarehouseConfigResponse) MarshalJSON added in v0.23.0

func (s GetWorkspaceWarehouseConfigResponse) MarshalJSON() ([]byte, error)

func (*GetWorkspaceWarehouseConfigResponse) UnmarshalJSON added in v0.23.0

func (s *GetWorkspaceWarehouseConfigResponse) UnmarshalJSON(b []byte) error

type GetWorkspaceWarehouseConfigResponseSecurityPolicy

type GetWorkspaceWarehouseConfigResponseSecurityPolicy string

Security policy for warehouses

const GetWorkspaceWarehouseConfigResponseSecurityPolicyDataAccessControl GetWorkspaceWarehouseConfigResponseSecurityPolicy = `DATA_ACCESS_CONTROL`
const GetWorkspaceWarehouseConfigResponseSecurityPolicyNone GetWorkspaceWarehouseConfigResponseSecurityPolicy = `NONE`
const GetWorkspaceWarehouseConfigResponseSecurityPolicyPassthrough GetWorkspaceWarehouseConfigResponseSecurityPolicy = `PASSTHROUGH`

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) Set

Set raw string value and validate it against allowed values

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) String

String representation for fmt.Print

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) Type

Type always returns GetWorkspaceWarehouseConfigResponseSecurityPolicy to satisfy [pflag.Value] interface

type ListDashboardsRequest

type ListDashboardsRequest struct {
	// Name of dashboard attribute to order by.
	Order ListOrder `json:"-" url:"order,omitempty"`
	// Page number to retrieve.
	Page int `json:"-" url:"page,omitempty"`
	// Number of dashboards to return per page.
	PageSize int `json:"-" url:"page_size,omitempty"`
	// Full text search term.
	Q string `json:"-" url:"q,omitempty"`

	ForceSendFields []string `json:"-"`
}

Get dashboard objects

func (ListDashboardsRequest) MarshalJSON added in v0.23.0

func (s ListDashboardsRequest) MarshalJSON() ([]byte, error)

func (*ListDashboardsRequest) UnmarshalJSON added in v0.23.0

func (s *ListDashboardsRequest) UnmarshalJSON(b []byte) error

type ListOrder

type ListOrder string
const ListOrderCreatedAt ListOrder = `created_at`
const ListOrderName ListOrder = `name`

func (*ListOrder) Set

func (f *ListOrder) Set(v string) error

Set raw string value and validate it against allowed values

func (*ListOrder) String

func (f *ListOrder) String() string

String representation for fmt.Print

func (*ListOrder) Type

func (f *ListOrder) Type() string

Type always returns ListOrder to satisfy [pflag.Value] interface

type ListQueriesRequest

type ListQueriesRequest struct {
	// Name of query attribute to order by. Default sort order is ascending.
	// Append a dash (`-`) to order descending instead.
	//
	// - `name`: The name of the query.
	//
	// - `created_at`: The timestamp the query was created.
	//
	// - `runtime`: The time it took to run this query. This is blank for
	// parameterized queries. A blank value is treated as the highest value for
	// sorting.
	//
	// - `executed_at`: The timestamp when the query was last run.
	//
	// - `created_by`: The user name of the user that created the query.
	Order string `json:"-" url:"order,omitempty"`
	// Page number to retrieve.
	Page int `json:"-" url:"page,omitempty"`
	// Number of queries to return per page.
	PageSize int `json:"-" url:"page_size,omitempty"`
	// Full text search term
	Q string `json:"-" url:"q,omitempty"`

	ForceSendFields []string `json:"-"`
}

Get a list of queries

func (ListQueriesRequest) MarshalJSON added in v0.23.0

func (s ListQueriesRequest) MarshalJSON() ([]byte, error)

func (*ListQueriesRequest) UnmarshalJSON added in v0.23.0

func (s *ListQueriesRequest) UnmarshalJSON(b []byte) error

type ListQueriesResponse

type ListQueriesResponse struct {
	// Whether there is another page of results.
	HasNextPage bool `json:"has_next_page,omitempty"`
	// A token that can be used to get the next page of results.
	NextPageToken string `json:"next_page_token,omitempty"`

	Res []QueryInfo `json:"res,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListQueriesResponse) MarshalJSON added in v0.23.0

func (s ListQueriesResponse) MarshalJSON() ([]byte, error)

func (*ListQueriesResponse) UnmarshalJSON added in v0.23.0

func (s *ListQueriesResponse) UnmarshalJSON(b []byte) error

type ListQueryHistoryRequest

type ListQueryHistoryRequest struct {
	// A filter to limit query history results. This field is optional.
	FilterBy *QueryFilter `json:"-" url:"filter_by,omitempty"`
	// Whether to include metrics about query.
	IncludeMetrics bool `json:"-" url:"include_metrics,omitempty"`
	// Limit the number of results returned in one page. The default is 100.
	MaxResults int `json:"-" url:"max_results,omitempty"`
	// A token that can be used to get the next page of results.
	PageToken string `json:"-" url:"page_token,omitempty"`

	ForceSendFields []string `json:"-"`
}

List Queries

func (ListQueryHistoryRequest) MarshalJSON added in v0.23.0

func (s ListQueryHistoryRequest) MarshalJSON() ([]byte, error)

func (*ListQueryHistoryRequest) UnmarshalJSON added in v0.23.0

func (s *ListQueryHistoryRequest) UnmarshalJSON(b []byte) error

type ListResponse

type ListResponse struct {
	// The total number of dashboards.
	Count int `json:"count,omitempty"`
	// The current page being displayed.
	Page int `json:"page,omitempty"`
	// The number of dashboards per page.
	PageSize int `json:"page_size,omitempty"`
	// List of dashboards returned.
	Results []Dashboard `json:"results,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListResponse) MarshalJSON added in v0.23.0

func (s ListResponse) MarshalJSON() ([]byte, error)

func (*ListResponse) UnmarshalJSON added in v0.23.0

func (s *ListResponse) UnmarshalJSON(b []byte) error

type ListWarehousesRequest

type ListWarehousesRequest struct {
	// Service Principal which will be used to fetch the list of warehouses. If
	// not specified, the user from the session header is used.
	RunAsUserId int `json:"-" url:"run_as_user_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

List warehouses

func (ListWarehousesRequest) MarshalJSON added in v0.23.0

func (s ListWarehousesRequest) MarshalJSON() ([]byte, error)

func (*ListWarehousesRequest) UnmarshalJSON added in v0.23.0

func (s *ListWarehousesRequest) UnmarshalJSON(b []byte) error

type ListWarehousesResponse

type ListWarehousesResponse struct {
	// A list of warehouses and their configurations.
	Warehouses []EndpointInfo `json:"warehouses,omitempty"`
}

type ObjectType

type ObjectType string

A singular noun object type.

const ObjectTypeAlert ObjectType = `alert`
const ObjectTypeDashboard ObjectType = `dashboard`
const ObjectTypeDataSource ObjectType = `data_source`
const ObjectTypeQuery ObjectType = `query`

func (*ObjectType) Set

func (f *ObjectType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ObjectType) String

func (f *ObjectType) String() string

String representation for fmt.Print

func (*ObjectType) Type

func (f *ObjectType) Type() string

Type always returns ObjectType to satisfy [pflag.Value] interface

type ObjectTypePlural

type ObjectTypePlural string

Always a plural of the object type.

const ObjectTypePluralAlerts ObjectTypePlural = `alerts`
const ObjectTypePluralDashboards ObjectTypePlural = `dashboards`
const ObjectTypePluralDataSources ObjectTypePlural = `data_sources`
const ObjectTypePluralQueries ObjectTypePlural = `queries`

func (*ObjectTypePlural) Set

func (f *ObjectTypePlural) Set(v string) error

Set raw string value and validate it against allowed values

func (*ObjectTypePlural) String

func (f *ObjectTypePlural) String() string

String representation for fmt.Print

func (*ObjectTypePlural) Type

func (f *ObjectTypePlural) Type() string

Type always returns ObjectTypePlural to satisfy [pflag.Value] interface

type OdbcParams

type OdbcParams struct {
	Hostname string `json:"hostname,omitempty"`

	Path string `json:"path,omitempty"`

	Port int `json:"port,omitempty"`

	Protocol string `json:"protocol,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (OdbcParams) MarshalJSON added in v0.23.0

func (s OdbcParams) MarshalJSON() ([]byte, error)

func (*OdbcParams) UnmarshalJSON added in v0.23.0

func (s *OdbcParams) UnmarshalJSON(b []byte) error

type OwnableObjectType

type OwnableObjectType string

The singular form of the type of object which can be owned.

const OwnableObjectTypeAlert OwnableObjectType = `alert`
const OwnableObjectTypeDashboard OwnableObjectType = `dashboard`
const OwnableObjectTypeQuery OwnableObjectType = `query`

func (*OwnableObjectType) Set

func (f *OwnableObjectType) Set(v string) error

Set raw string value and validate it against allowed values

func (*OwnableObjectType) String

func (f *OwnableObjectType) String() string

String representation for fmt.Print

func (*OwnableObjectType) Type

func (f *OwnableObjectType) Type() string

Type always returns OwnableObjectType to satisfy [pflag.Value] interface

type Parameter

type Parameter struct {
	// The literal parameter marker that appears between double curly braces in
	// the query text.
	Name string `json:"name,omitempty"`
	// The text displayed in a parameter picking widget.
	Title string `json:"title,omitempty"`
	// Parameters can have several different types.
	Type ParameterType `json:"type,omitempty"`
	// The default value for this parameter.
	Value any `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Parameter) MarshalJSON added in v0.23.0

func (s Parameter) MarshalJSON() ([]byte, error)

func (*Parameter) UnmarshalJSON added in v0.23.0

func (s *Parameter) UnmarshalJSON(b []byte) error

type ParameterType

type ParameterType string

Parameters can have several different types.

const ParameterTypeDatetime ParameterType = `datetime`
const ParameterTypeNumber ParameterType = `number`
const ParameterTypeText ParameterType = `text`

func (*ParameterType) Set

func (f *ParameterType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ParameterType) String

func (f *ParameterType) String() string

String representation for fmt.Print

func (*ParameterType) Type

func (f *ParameterType) Type() string

Type always returns ParameterType to satisfy [pflag.Value] interface

type PermissionLevel

type PermissionLevel string

* `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * `CAN_MANAGE`: Can manage the query

const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE`

Can manage the query

const PermissionLevelCanRun PermissionLevel = `CAN_RUN`

Can run the query

const PermissionLevelCanView PermissionLevel = `CAN_VIEW`

Can view the query

func (*PermissionLevel) Set

func (f *PermissionLevel) Set(v string) error

Set raw string value and validate it against allowed values

func (*PermissionLevel) String

func (f *PermissionLevel) String() string

String representation for fmt.Print

func (*PermissionLevel) Type

func (f *PermissionLevel) Type() string

Type always returns PermissionLevel to satisfy [pflag.Value] interface

type PlansState

type PlansState string

Whether plans exist for the execution, or the reason why they are missing

const PlansStateEmpty PlansState = `EMPTY`
const PlansStateExists PlansState = `EXISTS`
const PlansStateIgnoredLargePlansSize PlansState = `IGNORED_LARGE_PLANS_SIZE`
const PlansStateIgnoredSmallDuration PlansState = `IGNORED_SMALL_DURATION`
const PlansStateIgnoredSparkPlanType PlansState = `IGNORED_SPARK_PLAN_TYPE`
const PlansStateUnknown PlansState = `UNKNOWN`

func (*PlansState) Set

func (f *PlansState) Set(v string) error

Set raw string value and validate it against allowed values

func (*PlansState) String

func (f *PlansState) String() string

String representation for fmt.Print

func (*PlansState) Type

func (f *PlansState) Type() string

Type always returns PlansState to satisfy [pflag.Value] interface

type QueriesAPI

type QueriesAPI struct {
	// contains filtered or unexported fields
}

These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewQueries

func NewQueries(client *client.DatabricksClient) *QueriesAPI

func (*QueriesAPI) Create

func (a *QueriesAPI) Create(ctx context.Context, request QueryPostContent) (*Query, error)

Create a new query definition.

Creates a new query definition. Queries created with this endpoint belong to the authenticated user making the request.

The `data_source_id` field specifies the ID of the SQL warehouse to run this query against. You can use the Data Sources API to see a complete list of available SQL warehouses. Or you can copy the `data_source_id` from an existing query.

**Note**: You cannot add a visualization until you create the query.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SELECT 1",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SHOW TABLES",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) Delete

func (a *QueriesAPI) Delete(ctx context.Context, request DeleteQueryRequest) error

Delete a query.

Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is deleted after 30 days.

func (*QueriesAPI) DeleteByQueryId

func (a *QueriesAPI) DeleteByQueryId(ctx context.Context, queryId string) error

Delete a query.

Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is deleted after 30 days.

func (*QueriesAPI) Get

func (a *QueriesAPI) Get(ctx context.Context, request GetQueryRequest) (*Query, error)

Get a query definition.

Retrieve a query object definition along with contextual permissions information about the currently authenticated user.

Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SHOW TABLES",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

byId, err := w.Queries.GetByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) GetByName

func (a *QueriesAPI) GetByName(ctx context.Context, name string) (*Query, error)

GetByName calls QueriesAPI.QueryNameToIdMap and returns a single Query.

Returns an error if there's more than one Query with the same .Name.

Note: All Query instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) GetByQueryId

func (a *QueriesAPI) GetByQueryId(ctx context.Context, queryId string) (*Query, error)

Get a query definition.

Retrieve a query object definition along with contextual permissions information about the currently authenticated user.

func (*QueriesAPI) Impl

func (a *QueriesAPI) Impl() QueriesService

Impl returns low-level Queries API implementation

func (*QueriesAPI) List added in v0.24.0

Get a list of queries.

Gets a list of queries. Optionally, this list can be filtered by a search term.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) ListAll

func (a *QueriesAPI) ListAll(ctx context.Context, request ListQueriesRequest) ([]Query, error)

Get a list of queries.

Gets a list of queries. Optionally, this list can be filtered by a search term.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) QueryNameToIdMap

func (a *QueriesAPI) QueryNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error)

QueryNameToIdMap calls QueriesAPI.ListAll and creates a map of results with Query.Name as key and Query.Id as value.

Returns an error if there's more than one Query with the same .Name.

Note: All Query instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) Restore

func (a *QueriesAPI) Restore(ctx context.Context, request RestoreQueryRequest) error

Restore a query.

Restore a query that has been moved to the trash. A restored query appears in list views and searches. You can use restored queries for alerts.

func (*QueriesAPI) Update

func (a *QueriesAPI) Update(ctx context.Context, request QueryEditContent) (*Query, error)

Change a query definition.

Modify this query definition.

**Note**: You cannot undo this operation.

Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.QueryPostContent{
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "test query from Go SDK",
	Query:        "SHOW TABLES",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

updated, err := w.Queries.Update(ctx, sql.QueryEditContent{
	QueryId:      query.Id,
	Name:         fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	DataSourceId: srcs[0].Id,
	Description:  "UPDATED: test query from Go SDK",
	Query:        "SELECT 2+2",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", updated)

// cleanup

err = w.Queries.DeleteByQueryId(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) WithImpl

func (a *QueriesAPI) WithImpl(impl QueriesService) *QueriesAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type QueriesService

type QueriesService interface {

	// Create a new query definition.
	//
	// Creates a new query definition. Queries created with this endpoint belong
	// to the authenticated user making the request.
	//
	// The `data_source_id` field specifies the ID of the SQL warehouse to run
	// this query against. You can use the Data Sources API to see a complete
	// list of available SQL warehouses. Or you can copy the `data_source_id`
	// from an existing query.
	//
	// **Note**: You cannot add a visualization until you create the query.
	Create(ctx context.Context, request QueryPostContent) (*Query, error)

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and they cannot be used for alerts. The trash is
	// deleted after 30 days.
	Delete(ctx context.Context, request DeleteQueryRequest) error

	// Get a query definition.
	//
	// Retrieve a query object definition along with contextual permissions
	// information about the currently authenticated user.
	Get(ctx context.Context, request GetQueryRequest) (*Query, error)

	// Get a list of queries.
	//
	// Gets a list of queries. Optionally, this list can be filtered by a search
	// term.
	//
	// Use ListAll() to get all Query instances, which will iterate over every result page.
	List(ctx context.Context, request ListQueriesRequest) (*QueryList, error)

	// Restore a query.
	//
	// Restore a query that has been moved to the trash. A restored query
	// appears in list views and searches. You can use restored queries for
	// alerts.
	Restore(ctx context.Context, request RestoreQueryRequest) error

	// Change a query definition.
	//
	// Modify this query definition.
	//
	// **Note**: You cannot undo this operation.
	Update(ctx context.Context, request QueryEditContent) (*Query, error)
}

These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type Query

type Query struct {
	// Describes whether the authenticated user is allowed to edit the
	// definition of this query.
	CanEdit bool `json:"can_edit,omitempty"`
	// The timestamp when this query was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Query ID.
	Id string `json:"id,omitempty"`
	// Indicates whether the query is trashed. Trashed queries can't be used in
	// dashboards, or appear in search results. If this boolean is `true`, the
	// `options` property for this query includes a `moved_to_trash_at`
	// timestamp. Trashed queries are permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether the query is a draft. Draft queries only appear in list views for
	// their owners. Visualizations from draft queries cannot appear on
	// dashboards.
	IsDraft bool `json:"is_draft,omitempty"`
	// Whether this query object appears in the current user's favorites list.
	// This flag determines whether the star icon for favorites is selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// Text parameter types are not safe from SQL injection for all types of
	// data source. Set this Boolean parameter to `true` if a query either does
	// not use any text type parameters or uses a data source type where text
	// type parameters are handled safely.
	IsSafe bool `json:"is_safe,omitempty"`

	LastModifiedBy *User `json:"last_modified_by,omitempty"`
	// The ID of the user who last saved changes to this query.
	LastModifiedById int `json:"last_modified_by_id,omitempty"`
	// If there is a cached result for this query and user, this field includes
	// the query result ID. If this query uses parameters, this field is always
	// null.
	LatestQueryDataId string `json:"latest_query_data_id,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`

	Options *QueryOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *
	// `CAN_MANAGE`: Can manage the query
	PermissionTier PermissionLevel `json:"permission_tier,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`
	// A SHA-256 hash of the query text along with the authenticated user ID.
	QueryHash string `json:"query_hash,omitempty"`
	// Run as role
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// The timestamp at which this query was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`
	// The ID of the user who created this query.
	UserId int `json:"user_id,omitempty"`

	Visualizations []Visualization `json:"visualizations,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Query) MarshalJSON added in v0.23.0

func (s Query) MarshalJSON() ([]byte, error)

func (*Query) UnmarshalJSON added in v0.23.0

func (s *Query) UnmarshalJSON(b []byte) error

type QueryEditContent added in v0.3.0

type QueryEditContent struct {
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`
	// Exclusively used for storing a list parameter definitions. A parameter is
	// an object with `title`, `name`, `type`, and `value` properties. The
	// `value` field here is the default value. It can be overridden at runtime.
	Options any `json:"options,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`

	QueryId string `json:"-" url:"-"`

	ForceSendFields []string `json:"-"`
}

func (QueryEditContent) MarshalJSON added in v0.23.0

func (s QueryEditContent) MarshalJSON() ([]byte, error)

func (*QueryEditContent) UnmarshalJSON added in v0.23.0

func (s *QueryEditContent) UnmarshalJSON(b []byte) error

type QueryFilter

type QueryFilter struct {
	QueryStartTimeRange *TimeRange `json:"query_start_time_range,omitempty" url:"query_start_time_range,omitempty"`

	Statuses []QueryStatus `json:"statuses,omitempty" url:"statuses,omitempty"`
	// A list of user IDs who ran the queries.
	UserIds []int `json:"user_ids,omitempty" url:"user_ids,omitempty"`
	// A list of warehouse IDs.
	WarehouseIds []string `json:"warehouse_ids,omitempty" url:"warehouse_ids,omitempty"`
}

A filter to limit query history results. This field is optional.

type QueryHistoryAPI

type QueryHistoryAPI struct {
	// contains filtered or unexported fields
}

Access the history of queries through SQL warehouses.

func NewQueryHistory

func NewQueryHistory(client *client.DatabricksClient) *QueryHistoryAPI

func (*QueryHistoryAPI) Impl

Impl returns low-level QueryHistory API implementation

func (*QueryHistoryAPI) List added in v0.24.0

List Queries.

List the history of queries through SQL warehouses.

You can filter by user ID, warehouse ID, status, and time range.

This method is generated by Databricks SDK Code Generator.

func (*QueryHistoryAPI) ListAll

func (a *QueryHistoryAPI) ListAll(ctx context.Context, request ListQueryHistoryRequest) ([]QueryInfo, error)

List Queries.

List the history of queries through SQL warehouses.

You can filter by user ID, warehouse ID, status, and time range.

This method is generated by Databricks SDK Code Generator.

Example (SqlQueryHistory)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

_, err = w.QueryHistory.ListAll(ctx, sql.ListQueryHistoryRequest{
	FilterBy: &sql.QueryFilter{
		QueryStartTimeRange: &sql.TimeRange{
			StartTimeMs: 1690243200000,
			EndTimeMs:   1690329600000,
		},
	},
})
if err != nil {
	panic(err)
}
Output:

func (*QueryHistoryAPI) WithImpl

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type QueryHistoryService

type QueryHistoryService interface {

	// List Queries.
	//
	// List the history of queries through SQL warehouses.
	//
	// You can filter by user ID, warehouse ID, status, and time range.
	//
	// Use ListAll() to get all QueryInfo instances, which will iterate over every result page.
	List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error)
}

Access the history of queries through SQL warehouses.

type QueryInfo

type QueryInfo struct {
	// Reserved for internal use.
	CanSubscribeToLiveQuery bool `json:"canSubscribeToLiveQuery,omitempty"`
	// Channel information for the SQL warehouse at the time of query execution
	ChannelUsed *ChannelInfo `json:"channel_used,omitempty"`
	// Total execution time of the query from the client’s point of view, in
	// milliseconds.
	Duration int `json:"duration,omitempty"`
	// Alias for `warehouse_id`.
	EndpointId string `json:"endpoint_id,omitempty"`
	// Message describing why the query could not complete.
	ErrorMessage string `json:"error_message,omitempty"`
	// The ID of the user whose credentials were used to run the query.
	ExecutedAsUserId int `json:"executed_as_user_id,omitempty"`
	// The email address or username of the user whose credentials were used to
	// run the query.
	ExecutedAsUserName string `json:"executed_as_user_name,omitempty"`
	// The time execution of the query ended.
	ExecutionEndTimeMs int `json:"execution_end_time_ms,omitempty"`
	// Whether more updates for the query are expected.
	IsFinal bool `json:"is_final,omitempty"`
	// A key that can be used to look up query details.
	LookupKey string `json:"lookup_key,omitempty"`
	// Metrics about query execution.
	Metrics *QueryMetrics `json:"metrics,omitempty"`
	// Whether plans exist for the execution, or the reason why they are missing
	PlansState PlansState `json:"plans_state,omitempty"`
	// The time the query ended.
	QueryEndTimeMs int `json:"query_end_time_ms,omitempty"`
	// The query ID.
	QueryId string `json:"query_id,omitempty"`
	// The time the query started.
	QueryStartTimeMs int `json:"query_start_time_ms,omitempty"`
	// The text of the query.
	QueryText string `json:"query_text,omitempty"`
	// The number of results returned by the query.
	RowsProduced int `json:"rows_produced,omitempty"`
	// URL to the query plan.
	SparkUiUrl string `json:"spark_ui_url,omitempty"`
	// Type of statement for this query
	StatementType QueryStatementType `json:"statement_type,omitempty"`
	// Query status with one the following values: * `QUEUED`: Query has been
	// received and queued. * `RUNNING`: Query has started. * `CANCELED`: Query
	// has been cancelled by the user. * `FAILED`: Query has failed. *
	// `FINISHED`: Query has completed.
	Status QueryStatus `json:"status,omitempty"`
	// The ID of the user who ran the query.
	UserId int `json:"user_id,omitempty"`
	// The email address or username of the user who ran the query.
	UserName string `json:"user_name,omitempty"`
	// Warehouse ID.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryInfo) MarshalJSON added in v0.23.0

func (s QueryInfo) MarshalJSON() ([]byte, error)

func (*QueryInfo) UnmarshalJSON added in v0.23.0

func (s *QueryInfo) UnmarshalJSON(b []byte) error

type QueryList

type QueryList struct {
	// The total number of queries.
	Count int `json:"count,omitempty"`
	// The page number that is currently displayed.
	Page int `json:"page,omitempty"`
	// The number of queries per page.
	PageSize int `json:"page_size,omitempty"`
	// List of queries returned.
	Results []Query `json:"results,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryList) MarshalJSON added in v0.23.0

func (s QueryList) MarshalJSON() ([]byte, error)

func (*QueryList) UnmarshalJSON added in v0.23.0

func (s *QueryList) UnmarshalJSON(b []byte) error

type QueryMetrics

type QueryMetrics struct {
	// Time spent loading metadata and optimizing the query, in milliseconds.
	CompilationTimeMs int `json:"compilation_time_ms,omitempty"`
	// Time spent executing the query, in milliseconds.
	ExecutionTimeMs int `json:"execution_time_ms,omitempty"`
	// Reserved for internal use.
	MetadataTimeMs int `json:"metadata_time_ms,omitempty"`
	// Total amount of data sent over the network between executor nodes during
	// shuffle, in bytes.
	NetworkSentBytes int `json:"network_sent_bytes,omitempty"`
	// Timestamp of when the query was enqueued waiting while the warehouse was
	// at max load. This field is optional and will not appear if the query
	// skipped the overloading queue.
	OverloadingQueueStartTimestamp int `json:"overloading_queue_start_timestamp,omitempty"`
	// Total execution time for all individual Photon query engine tasks in the
	// query, in milliseconds.
	PhotonTotalTimeMs int `json:"photon_total_time_ms,omitempty"`
	// Reserved for internal use.
	PlanningPhases []any `json:"planning_phases,omitempty"`
	// Reserved for internal use.
	PlanningTimeMs int `json:"planning_time_ms,omitempty"`
	// Timestamp of when the query was enqueued waiting for a cluster to be
	// provisioned for the warehouse. This field is optional and will not appear
	// if the query skipped the provisioning queue.
	ProvisioningQueueStartTimestamp int `json:"provisioning_queue_start_timestamp,omitempty"`
	// Total number of bytes in all tables not read due to pruning
	PrunedBytes int `json:"pruned_bytes,omitempty"`
	// Total number of files from all tables not read due to pruning
	PrunedFilesCount int `json:"pruned_files_count,omitempty"`
	// Timestamp of when the underlying compute started compilation of the
	// query.
	QueryCompilationStartTimestamp int `json:"query_compilation_start_timestamp,omitempty"`
	// Reserved for internal use.
	QueryExecutionTimeMs int `json:"query_execution_time_ms,omitempty"`
	// Total size of data read by the query, in bytes.
	ReadBytes int `json:"read_bytes,omitempty"`
	// Size of persistent data read from the cache, in bytes.
	ReadCacheBytes int `json:"read_cache_bytes,omitempty"`
	// Number of files read after pruning.
	ReadFilesCount int `json:"read_files_count,omitempty"`
	// Number of partitions read after pruning.
	ReadPartitionsCount int `json:"read_partitions_count,omitempty"`
	// Size of persistent data read from cloud object storage on your cloud
	// tenant, in bytes.
	ReadRemoteBytes int `json:"read_remote_bytes,omitempty"`
	// Time spent fetching the query results after the execution finished, in
	// milliseconds.
	ResultFetchTimeMs int `json:"result_fetch_time_ms,omitempty"`
	// true if the query result was fetched from cache, false otherwise.
	ResultFromCache bool `json:"result_from_cache,omitempty"`
	// Total number of rows returned by the query.
	RowsProducedCount int `json:"rows_produced_count,omitempty"`
	// Total number of rows read by the query.
	RowsReadCount int `json:"rows_read_count,omitempty"`
	// Size of data temporarily written to disk while executing the query, in
	// bytes.
	SpillToDiskBytes int `json:"spill_to_disk_bytes,omitempty"`
	// Sum of execution time for all of the query’s tasks, in milliseconds.
	TaskTotalTimeMs int `json:"task_total_time_ms,omitempty"`
	// Total execution time of the query from the client’s point of view, in
	// milliseconds.
	TotalTimeMs int `json:"total_time_ms,omitempty"`
	// Size pf persistent data written to cloud object storage in your cloud
	// tenant, in bytes.
	WriteRemoteBytes int `json:"write_remote_bytes,omitempty"`

	ForceSendFields []string `json:"-"`
}

Metrics about query execution.

func (QueryMetrics) MarshalJSON added in v0.23.0

func (s QueryMetrics) MarshalJSON() ([]byte, error)

func (*QueryMetrics) UnmarshalJSON added in v0.23.0

func (s *QueryMetrics) UnmarshalJSON(b []byte) error

type QueryOptions

type QueryOptions struct {
	// The timestamp when this query was moved to trash. Only present when the
	// `is_archived` property is `true`. Trashed items are deleted after thirty
	// days.
	MovedToTrashAt string `json:"moved_to_trash_at,omitempty"`

	Parameters []Parameter `json:"parameters,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryOptions) MarshalJSON added in v0.23.0

func (s QueryOptions) MarshalJSON() ([]byte, error)

func (*QueryOptions) UnmarshalJSON added in v0.23.0

func (s *QueryOptions) UnmarshalJSON(b []byte) error

type QueryPostContent

type QueryPostContent struct {
	// Data source ID.
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`
	// Exclusively used for storing a list parameter definitions. A parameter is
	// an object with `title`, `name`, `type`, and `value` properties. The
	// `value` field here is the default value. It can be overridden at runtime.
	Options any `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`
	// Run as role
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryPostContent) MarshalJSON added in v0.23.0

func (s QueryPostContent) MarshalJSON() ([]byte, error)

func (*QueryPostContent) UnmarshalJSON added in v0.23.0

func (s *QueryPostContent) UnmarshalJSON(b []byte) error

type QueryStatementType

type QueryStatementType string

Type of statement for this query

const QueryStatementTypeAlter QueryStatementType = `ALTER`
const QueryStatementTypeAnalyze QueryStatementType = `ANALYZE`
const QueryStatementTypeCopy QueryStatementType = `COPY`
const QueryStatementTypeCreate QueryStatementType = `CREATE`
const QueryStatementTypeDelete QueryStatementType = `DELETE`
const QueryStatementTypeDescribe QueryStatementType = `DESCRIBE`
const QueryStatementTypeDrop QueryStatementType = `DROP`
const QueryStatementTypeExplain QueryStatementType = `EXPLAIN`
const QueryStatementTypeGrant QueryStatementType = `GRANT`
const QueryStatementTypeInsert QueryStatementType = `INSERT`
const QueryStatementTypeMerge QueryStatementType = `MERGE`
const QueryStatementTypeOptimize QueryStatementType = `OPTIMIZE`
const QueryStatementTypeOther QueryStatementType = `OTHER`
const QueryStatementTypeRefresh QueryStatementType = `REFRESH`
const QueryStatementTypeReplace QueryStatementType = `REPLACE`
const QueryStatementTypeRevoke QueryStatementType = `REVOKE`
const QueryStatementTypeSelect QueryStatementType = `SELECT`
const QueryStatementTypeSet QueryStatementType = `SET`
const QueryStatementTypeShow QueryStatementType = `SHOW`
const QueryStatementTypeTruncate QueryStatementType = `TRUNCATE`
const QueryStatementTypeUpdate QueryStatementType = `UPDATE`
const QueryStatementTypeUse QueryStatementType = `USE`

func (*QueryStatementType) Set

func (f *QueryStatementType) Set(v string) error

Set raw string value and validate it against allowed values

func (*QueryStatementType) String

func (f *QueryStatementType) String() string

String representation for fmt.Print

func (*QueryStatementType) Type

func (f *QueryStatementType) Type() string

Type always returns QueryStatementType to satisfy [pflag.Value] interface

type QueryStatus

type QueryStatus string

Query status with one the following values: * `QUEUED`: Query has been received and queued. * `RUNNING`: Query has started. * `CANCELED`: Query has been cancelled by the user. * `FAILED`: Query has failed. * `FINISHED`: Query has completed.

const QueryStatusCanceled QueryStatus = `CANCELED`

Query has been cancelled by the user.

const QueryStatusFailed QueryStatus = `FAILED`

Query has failed.

const QueryStatusFinished QueryStatus = `FINISHED`

Query has completed.

const QueryStatusQueued QueryStatus = `QUEUED`

Query has been received and queued.

const QueryStatusRunning QueryStatus = `RUNNING`

Query has started.

func (*QueryStatus) Set

func (f *QueryStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*QueryStatus) String

func (f *QueryStatus) String() string

String representation for fmt.Print

func (*QueryStatus) Type

func (f *QueryStatus) Type() string

Type always returns QueryStatus to satisfy [pflag.Value] interface

type QueryVisualizationsAPI added in v0.19.0

type QueryVisualizationsAPI struct {
	// contains filtered or unexported fields
}

This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace. Data structures may change over time.

func NewQueryVisualizations added in v0.19.0

func NewQueryVisualizations(client *client.DatabricksClient) *QueryVisualizationsAPI

func (*QueryVisualizationsAPI) Create added in v0.19.0

Add visualization to a query.

func (*QueryVisualizationsAPI) Delete added in v0.19.0

Remove visualization.

func (*QueryVisualizationsAPI) DeleteById added in v0.19.0

func (a *QueryVisualizationsAPI) DeleteById(ctx context.Context, id string) error

Remove visualization.

func (*QueryVisualizationsAPI) Impl added in v0.19.0

Impl returns low-level QueryVisualizations API implementation

func (*QueryVisualizationsAPI) Update added in v0.19.0

Edit existing visualization.

func (*QueryVisualizationsAPI) WithImpl added in v0.19.0

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type QueryVisualizationsService added in v0.19.0

type QueryVisualizationsService interface {

	// Add visualization to a query.
	Create(ctx context.Context, request CreateQueryVisualizationRequest) (*Visualization, error)

	// Remove visualization.
	Delete(ctx context.Context, request DeleteQueryVisualizationRequest) error

	// Edit existing visualization.
	Update(ctx context.Context, request Visualization) (*Visualization, error)
}

This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace. Data structures may change over time.

type RepeatedEndpointConfPairs

type RepeatedEndpointConfPairs struct {
	// Deprecated: Use configuration_pairs
	ConfigPair []EndpointConfPair `json:"config_pair,omitempty"`

	ConfigurationPairs []EndpointConfPair `json:"configuration_pairs,omitempty"`
}

type RestoreDashboardRequest

type RestoreDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Restore a dashboard

type RestoreQueryRequest

type RestoreQueryRequest struct {
	QueryId string `json:"-" url:"-"`
}

Restore a query

type ResultData added in v0.3.0

type ResultData struct {
	// The number of bytes in the result chunk.
	ByteCount int64 `json:"byte_count,omitempty"`
	// The position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// The `JSON_ARRAY` format is an array of arrays of values, where each
	// non-null value is formatted as a string. Null values are encoded as JSON
	// `null`.
	DataArray [][]string `json:"data_array,omitempty"`

	ExternalLinks []ExternalLink `json:"external_links,omitempty"`
	// When fetching, provides the `chunk_index` for the _next_ chunk. If
	// absent, indicates there are no more chunks. The next chunk can be fetched
	// with a :method:statementexecution/getStatementResultChunkN request.
	NextChunkIndex int `json:"next_chunk_index,omitempty"`
	// When fetching, provides a link to fetch the _next_ chunk. If absent,
	// indicates there are no more chunks. This link is an absolute `path` to be
	// joined with your `$DATABRICKS_HOST`, and should be treated as an opaque
	// link. This is an alternative to using `next_chunk_index`.
	NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"`
	// The number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// The starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`

	ForceSendFields []string `json:"-"`
}

Contains the result data of a single chunk when using `INLINE` disposition. When using `EXTERNAL_LINKS` disposition, the array `external_links` is used instead to provide presigned URLs to the result data in cloud storage. Exactly one of these alternatives is used. (While the `external_links` array prepares the API to return multiple links in a single response. Currently only a single link is returned.)

func (ResultData) MarshalJSON added in v0.23.0

func (s ResultData) MarshalJSON() ([]byte, error)

func (*ResultData) UnmarshalJSON added in v0.23.0

func (s *ResultData) UnmarshalJSON(b []byte) error

type ResultManifest added in v0.3.0

type ResultManifest struct {
	// Array of result set chunk metadata.
	Chunks []BaseChunkInfo `json:"chunks,omitempty"`

	Format Format `json:"format,omitempty"`
	// The schema is an ordered list of column descriptions.
	Schema *ResultSchema `json:"schema,omitempty"`
	// The total number of bytes in the result set. This field is not available
	// when using `INLINE` disposition.
	TotalByteCount int64 `json:"total_byte_count,omitempty"`
	// The total number of chunks that the result set has been divided into.
	TotalChunkCount int `json:"total_chunk_count,omitempty"`
	// The total number of rows in the result set.
	TotalRowCount int64 `json:"total_row_count,omitempty"`
	// Indicates whether the result is truncated due to `row_limit` or
	// `byte_limit`.
	Truncated bool `json:"truncated,omitempty"`

	ForceSendFields []string `json:"-"`
}

The result manifest provides schema and metadata for the result set.

func (ResultManifest) MarshalJSON added in v0.23.0

func (s ResultManifest) MarshalJSON() ([]byte, error)

func (*ResultManifest) UnmarshalJSON added in v0.23.0

func (s *ResultManifest) UnmarshalJSON(b []byte) error

type ResultSchema added in v0.3.0

type ResultSchema struct {
	ColumnCount int `json:"column_count,omitempty"`

	Columns []ColumnInfo `json:"columns,omitempty"`

	ForceSendFields []string `json:"-"`
}

The schema is an ordered list of column descriptions.

func (ResultSchema) MarshalJSON added in v0.23.0

func (s ResultSchema) MarshalJSON() ([]byte, error)

func (*ResultSchema) UnmarshalJSON added in v0.23.0

func (s *ResultSchema) UnmarshalJSON(b []byte) error

type RunAsRole added in v0.19.0

type RunAsRole string

Run as role

const RunAsRoleOwner RunAsRole = `owner`
const RunAsRoleViewer RunAsRole = `viewer`

func (*RunAsRole) Set added in v0.19.0

func (f *RunAsRole) Set(v string) error

Set raw string value and validate it against allowed values

func (*RunAsRole) String added in v0.19.0

func (f *RunAsRole) String() string

String representation for fmt.Print

func (*RunAsRole) Type added in v0.19.0

func (f *RunAsRole) Type() string

Type always returns RunAsRole to satisfy [pflag.Value] interface

type ServiceError added in v0.3.0

type ServiceError struct {
	ErrorCode ServiceErrorCode `json:"error_code,omitempty"`
	// A brief summary of the error condition.
	Message string `json:"message,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ServiceError) MarshalJSON added in v0.23.0

func (s ServiceError) MarshalJSON() ([]byte, error)

func (*ServiceError) UnmarshalJSON added in v0.23.0

func (s *ServiceError) UnmarshalJSON(b []byte) error

type ServiceErrorCode added in v0.3.0

type ServiceErrorCode string
const ServiceErrorCodeAborted ServiceErrorCode = `ABORTED`
const ServiceErrorCodeAlreadyExists ServiceErrorCode = `ALREADY_EXISTS`
const ServiceErrorCodeBadRequest ServiceErrorCode = `BAD_REQUEST`
const ServiceErrorCodeCancelled ServiceErrorCode = `CANCELLED`
const ServiceErrorCodeDeadlineExceeded ServiceErrorCode = `DEADLINE_EXCEEDED`
const ServiceErrorCodeInternalError ServiceErrorCode = `INTERNAL_ERROR`
const ServiceErrorCodeIoError ServiceErrorCode = `IO_ERROR`
const ServiceErrorCodeNotFound ServiceErrorCode = `NOT_FOUND`
const ServiceErrorCodeResourceExhausted ServiceErrorCode = `RESOURCE_EXHAUSTED`
const ServiceErrorCodeServiceUnderMaintenance ServiceErrorCode = `SERVICE_UNDER_MAINTENANCE`
const ServiceErrorCodeTemporarilyUnavailable ServiceErrorCode = `TEMPORARILY_UNAVAILABLE`
const ServiceErrorCodeUnauthenticated ServiceErrorCode = `UNAUTHENTICATED`
const ServiceErrorCodeUnknown ServiceErrorCode = `UNKNOWN`
const ServiceErrorCodeWorkspaceTemporarilyUnavailable ServiceErrorCode = `WORKSPACE_TEMPORARILY_UNAVAILABLE`

func (*ServiceErrorCode) Set added in v0.3.0

func (f *ServiceErrorCode) Set(v string) error

Set raw string value and validate it against allowed values

func (*ServiceErrorCode) String added in v0.3.0

func (f *ServiceErrorCode) String() string

String representation for fmt.Print

func (*ServiceErrorCode) Type added in v0.3.0

func (f *ServiceErrorCode) Type() string

Type always returns ServiceErrorCode to satisfy [pflag.Value] interface

type SetRequest

type SetRequest struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// Object ID. The ACL for the object with this UUID is overwritten by this
	// request's POST content.
	ObjectId string `json:"-" url:"-"`
	// The type of object permission to set.
	ObjectType ObjectTypePlural `json:"-" url:"-"`
}

Set object ACL

type SetResponse

type SetResponse struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// An object's type and UUID, separated by a forward slash (/) character.
	ObjectId string `json:"object_id,omitempty"`
	// A singular noun object type.
	ObjectType ObjectType `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (SetResponse) MarshalJSON added in v0.23.0

func (s SetResponse) MarshalJSON() ([]byte, error)

func (*SetResponse) UnmarshalJSON added in v0.23.0

func (s *SetResponse) UnmarshalJSON(b []byte) error

type SetWorkspaceWarehouseConfigRequest

type SetWorkspaceWarehouseConfigRequest struct {
	// Optional: Channel selection details
	Channel *Channel `json:"channel,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"`
	// Spark confs for external hive metastore configuration JSON serialized
	// size must be less than <= 512K
	DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"`
	// List of Warehouse Types allowed in this workspace (limits allowed value
	// of the type field in CreateWarehouse and EditWarehouse). Note: Some types
	// cannot be disabled, they don't need to be specified in
	// SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing
	// warehouses to be converted to another type. Used by frontend to save
	// specific type availability in the warehouse create and edit form UI.
	EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"`
	// GCP only: Google Service Account used to pass to cluster to access Google
	// Cloud Storage
	GoogleServiceAccount string `json:"google_service_account,omitempty"`
	// AWS Only: Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Security policy for warehouses
	SecurityPolicy SetWorkspaceWarehouseConfigRequestSecurityPolicy `json:"security_policy,omitempty"`
	// SQL configuration parameters
	SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (SetWorkspaceWarehouseConfigRequest) MarshalJSON added in v0.23.0

func (s SetWorkspaceWarehouseConfigRequest) MarshalJSON() ([]byte, error)

func (*SetWorkspaceWarehouseConfigRequest) UnmarshalJSON added in v0.23.0

func (s *SetWorkspaceWarehouseConfigRequest) UnmarshalJSON(b []byte) error

type SetWorkspaceWarehouseConfigRequestSecurityPolicy

type SetWorkspaceWarehouseConfigRequestSecurityPolicy string

Security policy for warehouses

const SetWorkspaceWarehouseConfigRequestSecurityPolicyDataAccessControl SetWorkspaceWarehouseConfigRequestSecurityPolicy = `DATA_ACCESS_CONTROL`
const SetWorkspaceWarehouseConfigRequestSecurityPolicyNone SetWorkspaceWarehouseConfigRequestSecurityPolicy = `NONE`
const SetWorkspaceWarehouseConfigRequestSecurityPolicyPassthrough SetWorkspaceWarehouseConfigRequestSecurityPolicy = `PASSTHROUGH`

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) Set

Set raw string value and validate it against allowed values

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) String

String representation for fmt.Print

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) Type

Type always returns SetWorkspaceWarehouseConfigRequestSecurityPolicy to satisfy [pflag.Value] interface

type SpotInstancePolicy

type SpotInstancePolicy string

Configurations whether the warehouse should use spot instances.

const SpotInstancePolicyCostOptimized SpotInstancePolicy = `COST_OPTIMIZED`
const SpotInstancePolicyPolicyUnspecified SpotInstancePolicy = `POLICY_UNSPECIFIED`
const SpotInstancePolicyReliabilityOptimized SpotInstancePolicy = `RELIABILITY_OPTIMIZED`

func (*SpotInstancePolicy) Set

func (f *SpotInstancePolicy) Set(v string) error

Set raw string value and validate it against allowed values

func (*SpotInstancePolicy) String

func (f *SpotInstancePolicy) String() string

String representation for fmt.Print

func (*SpotInstancePolicy) Type

func (f *SpotInstancePolicy) Type() string

Type always returns SpotInstancePolicy to satisfy [pflag.Value] interface

type StartRequest

type StartRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Start a warehouse

type State

type State string

State of the warehouse

const StateDeleted State = `DELETED`
const StateDeleting State = `DELETING`
const StateRunning State = `RUNNING`
const StateStarting State = `STARTING`
const StateStopped State = `STOPPED`
const StateStopping State = `STOPPING`

func (*State) Set

func (f *State) Set(v string) error

Set raw string value and validate it against allowed values

func (*State) String

func (f *State) String() string

String representation for fmt.Print

func (*State) Type

func (f *State) Type() string

Type always returns State to satisfy [pflag.Value] interface

type StatementExecutionAPI added in v0.3.0

type StatementExecutionAPI struct {
	// contains filtered or unexported fields
}

The Databricks SQL Statement Execution API can be used to execute SQL statements on a SQL warehouse and fetch the result.

**Getting started**

We suggest beginning with the Databricks SQL Statement Execution API tutorial.

**Overview of statement execution and result fetching**

Statement execution begins by issuing a :method:statementexecution/executeStatement request with a valid SQL statement and warehouse ID, along with optional parameters such as the data catalog and output format. If no other parameters are specified, the server will wait for up to 10s before returning a response. If the statement has completed within this timespan, the response will include the result data as a JSON array and metadata. Otherwise, if no result is available after the 10s timeout expired, the response will provide the statement ID that can be used to poll for results by using a :method:statementexecution/getStatement request.

You can specify whether the call should behave synchronously, asynchronously or start synchronously with a fallback to asynchronous execution. This is controlled with the `wait_timeout` and `on_wait_timeout` settings. If `wait_timeout` is set between 5-50 seconds (default: 10s), the call waits for results up to the specified timeout; when set to `0s`, the call is asynchronous and responds immediately with a statement ID. The `on_wait_timeout` setting specifies what should happen when the timeout is reached while the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to `CANCEL`, which cancels the statement.

In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns directly with a statement ID. The status of the statement execution can be polled by issuing :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded, this call also returns the result and metadata in the response. - Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can be used to fetch status and results in the same way as in the asynchronous mode.

Depending on the size, the result can be split into multiple chunks. If the statement execution is successful, the statement response contains a manifest and the first chunk of the result. The manifest contains schema information and provides metadata for each chunk in the result. Result chunks can be retrieved by index with :method:statementexecution/getStatementResultChunkN which may be called in any order and in parallel. For sequential fetching, each chunk, apart from the last, also contains a `next_chunk_index` and `next_chunk_internal_link` that point to the next chunk.

A statement can be canceled with :method:statementexecution/cancelExecution.

**Fetching result data: format and disposition**

To specify the format of the result data, use the `format` field, which can be set to one of the following options: `JSON_ARRAY` (JSON), `ARROW_STREAM` (Apache Arrow Columnar), or `CSV`.

There are two ways to receive statement results, controlled by the `disposition` setting, which can be either `INLINE` or `EXTERNAL_LINKS`:

- `INLINE`: In this mode, the result data is directly included in the response. It's best suited for smaller results. This mode can only be used with the `JSON_ARRAY` format.

- `EXTERNAL_LINKS`: In this mode, the response provides links that can be used to download the result data in chunks separately. This approach is ideal for larger results and offers higher throughput. This mode can be used with all the formats: `JSON_ARRAY`, `ARROW_STREAM`, and `CSV`.

By default, the API uses `format=JSON_ARRAY` and `disposition=INLINE`.

**Limits and limitations**

Note: The byte limit for INLINE disposition is based on internal storage metrics and will not exactly match the byte count of the actual payload.

- Statements with `disposition=INLINE` are limited to 25 MiB and will fail when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` are limited to 100 GiB. Result sets larger than this limit will be truncated. Truncation is indicated by the `truncated` field in the result manifest. - The maximum query text size is 16 MiB. - Cancelation might silently fail. A successful response from a cancel request indicates that the cancel request was successfully received and sent to the processing engine. However, an outstanding statement might have already completed execution when the cancel request arrives. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for things such as caller delays and network latency from caller to service. - The system will auto-close a statement after one hour if the client stops polling and thus you must poll at least once an hour. - The results are only available for one hour after success; polling does not extend this.

func NewStatementExecution added in v0.3.0

func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionAPI

func (*StatementExecutionAPI) CancelExecution added in v0.3.0

func (a *StatementExecutionAPI) CancelExecution(ctx context.Context, request CancelExecutionRequest) error

Cancel statement execution.

Requests that an executing statement be canceled. Callers must poll for status to see the terminal state.

func (*StatementExecutionAPI) ExecuteAndWait added in v0.10.0

[EXPERIMENTAL] Execute a query and wait for results to be available

func (*StatementExecutionAPI) ExecuteStatement added in v0.3.0

Execute a SQL statement.

func (*StatementExecutionAPI) GetStatement added in v0.3.0

Get status, manifest, and result first chunk.

This request can be used to poll for the statement's status. When the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and further calls will receive an HTTP 404 response.

**NOTE** This call currently might take up to 5 seconds to get the latest status and result.

func (*StatementExecutionAPI) GetStatementByStatementId added in v0.3.0

func (a *StatementExecutionAPI) GetStatementByStatementId(ctx context.Context, statementId string) (*GetStatementResponse, error)

Get status, manifest, and result first chunk.

This request can be used to poll for the statement's status. When the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and further calls will receive an HTTP 404 response.

**NOTE** This call currently might take up to 5 seconds to get the latest status and result.

func (*StatementExecutionAPI) GetStatementResultChunkN added in v0.3.0

func (a *StatementExecutionAPI) GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error)

Get result chunk by index.

After the statement execution has `SUCCEEDED`, this request can be used to fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is typically fetched with :method:statementexecution/executeStatement or :method:statementexecution/getStatement, this request can be used to fetch subsequent chunks. The response structure is identical to the nested `result` element described in the :method:statementexecution/getStatement request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set.

func (*StatementExecutionAPI) GetStatementResultChunkNByStatementIdAndChunkIndex added in v0.3.0

func (a *StatementExecutionAPI) GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error)

Get result chunk by index.

After the statement execution has `SUCCEEDED`, this request can be used to fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is typically fetched with :method:statementexecution/executeStatement or :method:statementexecution/getStatement, this request can be used to fetch subsequent chunks. The response structure is identical to the nested `result` element described in the :method:statementexecution/getStatement request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set.

func (*StatementExecutionAPI) Impl added in v0.3.0

Impl returns low-level StatementExecution API implementation

func (*StatementExecutionAPI) WithImpl added in v0.3.0

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type StatementExecutionService added in v0.3.0

type StatementExecutionService interface {

	// Cancel statement execution.
	//
	// Requests that an executing statement be canceled. Callers must poll for
	// status to see the terminal state.
	CancelExecution(ctx context.Context, request CancelExecutionRequest) error

	// Execute a SQL statement.
	ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error)

	// Get status, manifest, and result first chunk.
	//
	// This request can be used to poll for the statement's status. When the
	// `status.state` field is `SUCCEEDED` it will also return the result
	// manifest and the first chunk of the result data. When the statement is in
	// the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200
	// with the state set. After at least 12 hours in terminal state, the
	// statement is removed from the warehouse and further calls will receive an
	// HTTP 404 response.
	//
	// **NOTE** This call currently might take up to 5 seconds to get the latest
	// status and result.
	GetStatement(ctx context.Context, request GetStatementRequest) (*GetStatementResponse, error)

	// Get result chunk by index.
	//
	// After the statement execution has `SUCCEEDED`, this request can be used
	// to fetch any chunk by index. Whereas the first chunk with `chunk_index=0`
	// is typically fetched with :method:statementexecution/executeStatement or
	// :method:statementexecution/getStatement, this request can be used to
	// fetch subsequent chunks. The response structure is identical to the
	// nested `result` element described in the
	// :method:statementexecution/getStatement request, and similarly includes
	// the `next_chunk_index` and `next_chunk_internal_link` fields for simple
	// iteration through the result set.
	GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error)
}

The Databricks SQL Statement Execution API can be used to execute SQL statements on a SQL warehouse and fetch the result.

**Getting started**

We suggest beginning with the Databricks SQL Statement Execution API tutorial.

**Overview of statement execution and result fetching**

Statement execution begins by issuing a :method:statementexecution/executeStatement request with a valid SQL statement and warehouse ID, along with optional parameters such as the data catalog and output format. If no other parameters are specified, the server will wait for up to 10s before returning a response. If the statement has completed within this timespan, the response will include the result data as a JSON array and metadata. Otherwise, if no result is available after the 10s timeout expired, the response will provide the statement ID that can be used to poll for results by using a :method:statementexecution/getStatement request.

You can specify whether the call should behave synchronously, asynchronously or start synchronously with a fallback to asynchronous execution. This is controlled with the `wait_timeout` and `on_wait_timeout` settings. If `wait_timeout` is set between 5-50 seconds (default: 10s), the call waits for results up to the specified timeout; when set to `0s`, the call is asynchronous and responds immediately with a statement ID. The `on_wait_timeout` setting specifies what should happen when the timeout is reached while the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to `CANCEL`, which cancels the statement.

In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns directly with a statement ID. The status of the statement execution can be polled by issuing :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded, this call also returns the result and metadata in the response. - Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can be used to fetch status and results in the same way as in the asynchronous mode.

Depending on the size, the result can be split into multiple chunks. If the statement execution is successful, the statement response contains a manifest and the first chunk of the result. The manifest contains schema information and provides metadata for each chunk in the result. Result chunks can be retrieved by index with :method:statementexecution/getStatementResultChunkN which may be called in any order and in parallel. For sequential fetching, each chunk, apart from the last, also contains a `next_chunk_index` and `next_chunk_internal_link` that point to the next chunk.

A statement can be canceled with :method:statementexecution/cancelExecution.

**Fetching result data: format and disposition**

To specify the format of the result data, use the `format` field, which can be set to one of the following options: `JSON_ARRAY` (JSON), `ARROW_STREAM` (Apache Arrow Columnar), or `CSV`.

There are two ways to receive statement results, controlled by the `disposition` setting, which can be either `INLINE` or `EXTERNAL_LINKS`:

- `INLINE`: In this mode, the result data is directly included in the response. It's best suited for smaller results. This mode can only be used with the `JSON_ARRAY` format.

- `EXTERNAL_LINKS`: In this mode, the response provides links that can be used to download the result data in chunks separately. This approach is ideal for larger results and offers higher throughput. This mode can be used with all the formats: `JSON_ARRAY`, `ARROW_STREAM`, and `CSV`.

By default, the API uses `format=JSON_ARRAY` and `disposition=INLINE`.

**Limits and limitations**

Note: The byte limit for INLINE disposition is based on internal storage metrics and will not exactly match the byte count of the actual payload.

- Statements with `disposition=INLINE` are limited to 25 MiB and will fail when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` are limited to 100 GiB. Result sets larger than this limit will be truncated. Truncation is indicated by the `truncated` field in the result manifest. - The maximum query text size is 16 MiB. - Cancelation might silently fail. A successful response from a cancel request indicates that the cancel request was successfully received and sent to the processing engine. However, an outstanding statement might have already completed execution when the cancel request arrives. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for things such as caller delays and network latency from caller to service. - The system will auto-close a statement after one hour if the client stops polling and thus you must poll at least once an hour. - The results are only available for one hour after success; polling does not extend this.

type StatementParameterListItem added in v0.18.0

type StatementParameterListItem struct {
	// The name of a parameter marker to be substituted in the statement.
	Name string `json:"name"`
	// The data type, given as a string. For example: `INT`, `STRING`,
	// `DECIMAL(10,2)`. If no type is given the type is assumed to be `STRING`.
	// Complex types, such as `ARRAY`, `MAP`, and `STRUCT` are not supported.
	// For valid types, refer to the section [Data
	// types](/sql/language-manual/functions/cast.html) of the SQL language
	// reference.
	Type string `json:"type,omitempty"`
	// The value to substitute, represented as a string. If omitted, the value
	// is interpreted as NULL.
	Value string `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (StatementParameterListItem) MarshalJSON added in v0.23.0

func (s StatementParameterListItem) MarshalJSON() ([]byte, error)

func (*StatementParameterListItem) UnmarshalJSON added in v0.23.0

func (s *StatementParameterListItem) UnmarshalJSON(b []byte) error

type StatementState added in v0.3.0

type StatementState string

Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution failed; reason for failure described in accomanying error message - `CANCELED`: user canceled; can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution successful, and statement closed; result no longer available for fetch

const StatementStateCanceled StatementState = `CANCELED`

user canceled; can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL`

const StatementStateClosed StatementState = `CLOSED`

execution successful, and statement closed; result no longer available for fetch

const StatementStateFailed StatementState = `FAILED`

execution failed; reason for failure described in accomanying error message

const StatementStatePending StatementState = `PENDING`

waiting for warehouse

const StatementStateRunning StatementState = `RUNNING`

running

const StatementStateSucceeded StatementState = `SUCCEEDED`

execution was successful, result data available for fetch

func (*StatementState) Set added in v0.3.0

func (f *StatementState) Set(v string) error

Set raw string value and validate it against allowed values

func (*StatementState) String added in v0.3.0

func (f *StatementState) String() string

String representation for fmt.Print

func (*StatementState) Type added in v0.3.0

func (f *StatementState) Type() string

Type always returns StatementState to satisfy [pflag.Value] interface

type StatementStatus added in v0.3.0

type StatementStatus struct {
	Error *ServiceError `json:"error,omitempty"`
	// Statement execution state: - `PENDING`: waiting for warehouse -
	// `RUNNING`: running - `SUCCEEDED`: execution was successful, result data
	// available for fetch - `FAILED`: execution failed; reason for failure
	// described in accomanying error message - `CANCELED`: user canceled; can
	// come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL`
	// - `CLOSED`: execution successful, and statement closed; result no longer
	// available for fetch
	State StatementState `json:"state,omitempty"`
}

The status response includes execution state and if relevant, error information.

type Status

type Status string

Health status of the warehouse.

const StatusDegraded Status = `DEGRADED`
const StatusFailed Status = `FAILED`
const StatusHealthy Status = `HEALTHY`
const StatusStatusUnspecified Status = `STATUS_UNSPECIFIED`

func (*Status) Set

func (f *Status) Set(v string) error

Set raw string value and validate it against allowed values

func (*Status) String

func (f *Status) String() string

String representation for fmt.Print

func (*Status) Type

func (f *Status) Type() string

Type always returns Status to satisfy [pflag.Value] interface

type StopRequest

type StopRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Stop a warehouse

type Success

type Success struct {
	Message SuccessMessage `json:"message,omitempty"`
}

type SuccessMessage

type SuccessMessage string
const SuccessMessageSuccess SuccessMessage = `Success`

func (*SuccessMessage) Set

func (f *SuccessMessage) Set(v string) error

Set raw string value and validate it against allowed values

func (*SuccessMessage) String

func (f *SuccessMessage) String() string

String representation for fmt.Print

func (*SuccessMessage) Type

func (f *SuccessMessage) Type() string

Type always returns SuccessMessage to satisfy [pflag.Value] interface

type TerminationReason

type TerminationReason struct {
	// status code indicating why the cluster was terminated
	Code TerminationReasonCode `json:"code,omitempty"`
	// list of parameters that provide additional information about why the
	// cluster was terminated
	Parameters map[string]string `json:"parameters,omitempty"`
	// type of the termination
	Type TerminationReasonType `json:"type,omitempty"`
}

type TerminationReasonCode

type TerminationReasonCode string

status code indicating why the cluster was terminated

const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED`
const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE`
const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE`
const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`
const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`
const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`
const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED`
const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE`
const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE`
const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE`
const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE`
const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`
const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION`
const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING`
const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING`
const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`
const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE`
const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE`
const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT`
const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`
const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE`
const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE`
const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT`
const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN`
const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST`
const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE`
const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE`
const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE`
const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY`
const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE`
const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE`
const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE`
const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY`
const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED`
const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED`
const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE`
const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE`
const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED`
const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY`
const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE`
const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE`
const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE`
const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR`
const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT`
const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE`
const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE`
const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED`
const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE`
const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`
const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY`
const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT`
const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE`
const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE`
const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE`
const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE`
const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED`
const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED`
const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR`
const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION`
const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE`
const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES`
const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD`
const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR`
const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE`
const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION`
const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE`
const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE`
const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE`
const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED`
const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE`
const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN`
const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE`
const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE`
const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST`
const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE`
const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR`
const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR`

func (*TerminationReasonCode) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonCode) String

func (f *TerminationReasonCode) String() string

String representation for fmt.Print

func (*TerminationReasonCode) Type

func (f *TerminationReasonCode) Type() string

Type always returns TerminationReasonCode to satisfy [pflag.Value] interface

type TerminationReasonType

type TerminationReasonType string

type of the termination

const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR`
const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE`
const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT`
const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS`

func (*TerminationReasonType) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonType) String

func (f *TerminationReasonType) String() string

String representation for fmt.Print

func (*TerminationReasonType) Type

func (f *TerminationReasonType) Type() string

Type always returns TerminationReasonType to satisfy [pflag.Value] interface

type TimeRange

type TimeRange struct {
	// Limit results to queries that started before this time.
	EndTimeMs int `json:"end_time_ms,omitempty" url:"end_time_ms,omitempty"`
	// Limit results to queries that started after this time.
	StartTimeMs int `json:"start_time_ms,omitempty" url:"start_time_ms,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (TimeRange) MarshalJSON added in v0.23.0

func (s TimeRange) MarshalJSON() ([]byte, error)

func (*TimeRange) UnmarshalJSON added in v0.23.0

func (s *TimeRange) UnmarshalJSON(b []byte) error

type TransferOwnershipObjectId

type TransferOwnershipObjectId struct {
	// Email address for the new owner, who must exist in the workspace.
	NewOwner string `json:"new_owner,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (TransferOwnershipObjectId) MarshalJSON added in v0.23.0

func (s TransferOwnershipObjectId) MarshalJSON() ([]byte, error)

func (*TransferOwnershipObjectId) UnmarshalJSON added in v0.23.0

func (s *TransferOwnershipObjectId) UnmarshalJSON(b []byte) error

type TransferOwnershipRequest

type TransferOwnershipRequest struct {
	// Email address for the new owner, who must exist in the workspace.
	NewOwner string `json:"new_owner,omitempty"`
	// The ID of the object on which to change ownership.
	ObjectId TransferOwnershipObjectId `json:"-" url:"-"`
	// The type of object on which to change ownership.
	ObjectType OwnableObjectType `json:"-" url:"-"`

	ForceSendFields []string `json:"-"`
}

Transfer object ownership

func (TransferOwnershipRequest) MarshalJSON added in v0.23.0

func (s TransferOwnershipRequest) MarshalJSON() ([]byte, error)

func (*TransferOwnershipRequest) UnmarshalJSON added in v0.23.0

func (s *TransferOwnershipRequest) UnmarshalJSON(b []byte) error

type User

type User struct {
	Email string `json:"email,omitempty"`

	Id int `json:"id,omitempty"`

	Name string `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (User) MarshalJSON added in v0.23.0

func (s User) MarshalJSON() ([]byte, error)

func (*User) UnmarshalJSON added in v0.23.0

func (s *User) UnmarshalJSON(b []byte) error

type Visualization

type Visualization struct {
	CreatedAt string `json:"created_at,omitempty"`
	// A short description of this visualization. This is not displayed in the
	// UI.
	Description string `json:"description,omitempty"`
	// The UUID for this visualization.
	Id string `json:"id,omitempty" url:"-"`
	// The name of the visualization that appears on dashboards and the query
	// screen.
	Name string `json:"name,omitempty"`
	// The options object varies widely from one visualization type to the next
	// and is unsupported. Databricks does not recommend modifying visualization
	// settings in JSON.
	Options any `json:"options,omitempty"`
	// The type of visualization: chart, table, pivot table, and so on.
	Type string `json:"type,omitempty"`

	UpdatedAt string `json:"updated_at,omitempty"`

	ForceSendFields []string `json:"-"`
}

The visualization description API changes frequently and is unsupported. You can duplicate a visualization by copying description objects received _from the API_ and then using them to create a new one with a POST request to the same endpoint. Databricks does not recommend constructing ad-hoc visualizations entirely in JSON.

func (Visualization) MarshalJSON added in v0.23.0

func (s Visualization) MarshalJSON() ([]byte, error)

func (*Visualization) UnmarshalJSON added in v0.23.0

func (s *Visualization) UnmarshalJSON(b []byte) error

type WaitGetWarehouseRunning added in v0.10.0

type WaitGetWarehouseRunning[R any] struct {
	Response *R
	Id       string `json:"id"`
	// contains filtered or unexported fields
}

WaitGetWarehouseRunning is a wrapper that calls WarehousesAPI.WaitGetWarehouseRunning and waits to reach RUNNING state.

func (*WaitGetWarehouseRunning[R]) Get added in v0.10.0

Get the GetWarehouseResponse with the default timeout of 20 minutes.

func (*WaitGetWarehouseRunning[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWarehouseRunning[R]) GetWithTimeout(timeout time.Duration) (*GetWarehouseResponse, error)

Get the GetWarehouseResponse with custom timeout.

func (*WaitGetWarehouseRunning[R]) OnProgress added in v0.10.0

func (w *WaitGetWarehouseRunning[R]) OnProgress(callback func(*GetWarehouseResponse)) *WaitGetWarehouseRunning[R]

OnProgress invokes a callback every time it polls for the status update.

type WaitGetWarehouseStopped added in v0.10.0

type WaitGetWarehouseStopped[R any] struct {
	Response *R
	Id       string `json:"id"`
	// contains filtered or unexported fields
}

WaitGetWarehouseStopped is a wrapper that calls WarehousesAPI.WaitGetWarehouseStopped and waits to reach STOPPED state.

func (*WaitGetWarehouseStopped[R]) Get added in v0.10.0

Get the GetWarehouseResponse with the default timeout of 20 minutes.

func (*WaitGetWarehouseStopped[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWarehouseStopped[R]) GetWithTimeout(timeout time.Duration) (*GetWarehouseResponse, error)

Get the GetWarehouseResponse with custom timeout.

func (*WaitGetWarehouseStopped[R]) OnProgress added in v0.10.0

func (w *WaitGetWarehouseStopped[R]) OnProgress(callback func(*GetWarehouseResponse)) *WaitGetWarehouseStopped[R]

OnProgress invokes a callback every time it polls for the status update.

type WarehouseAccessControlRequest added in v0.15.0

type WarehouseAccessControlRequest struct {
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Permission level
	PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"`
	// Application ID of an active service principal. Setting this field
	// requires the `servicePrincipal/user` role.
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehouseAccessControlRequest) MarshalJSON added in v0.23.0

func (s WarehouseAccessControlRequest) MarshalJSON() ([]byte, error)

func (*WarehouseAccessControlRequest) UnmarshalJSON added in v0.23.0

func (s *WarehouseAccessControlRequest) UnmarshalJSON(b []byte) error

type WarehouseAccessControlResponse added in v0.15.0

type WarehouseAccessControlResponse struct {
	// All permissions.
	AllPermissions []WarehousePermission `json:"all_permissions,omitempty"`
	// Display name of the user or service principal.
	DisplayName string `json:"display_name,omitempty"`
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Name of the service principal.
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehouseAccessControlResponse) MarshalJSON added in v0.23.0

func (s WarehouseAccessControlResponse) MarshalJSON() ([]byte, error)

func (*WarehouseAccessControlResponse) UnmarshalJSON added in v0.23.0

func (s *WarehouseAccessControlResponse) UnmarshalJSON(b []byte) error

type WarehousePermission added in v0.15.0

type WarehousePermission struct {
	Inherited bool `json:"inherited,omitempty"`

	InheritedFromObject []string `json:"inherited_from_object,omitempty"`
	// Permission level
	PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehousePermission) MarshalJSON added in v0.23.0

func (s WarehousePermission) MarshalJSON() ([]byte, error)

func (*WarehousePermission) UnmarshalJSON added in v0.23.0

func (s *WarehousePermission) UnmarshalJSON(b []byte) error

type WarehousePermissionLevel added in v0.15.0

type WarehousePermissionLevel string

Permission level

const WarehousePermissionLevelCanManage WarehousePermissionLevel = `CAN_MANAGE`
const WarehousePermissionLevelCanUse WarehousePermissionLevel = `CAN_USE`
const WarehousePermissionLevelIsOwner WarehousePermissionLevel = `IS_OWNER`

func (*WarehousePermissionLevel) Set added in v0.15.0

Set raw string value and validate it against allowed values

func (*WarehousePermissionLevel) String added in v0.15.0

func (f *WarehousePermissionLevel) String() string

String representation for fmt.Print

func (*WarehousePermissionLevel) Type added in v0.15.0

func (f *WarehousePermissionLevel) Type() string

Type always returns WarehousePermissionLevel to satisfy [pflag.Value] interface

type WarehousePermissions added in v0.15.0

type WarehousePermissions struct {
	AccessControlList []WarehouseAccessControlResponse `json:"access_control_list,omitempty"`

	ObjectId string `json:"object_id,omitempty"`

	ObjectType string `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehousePermissions) MarshalJSON added in v0.23.0

func (s WarehousePermissions) MarshalJSON() ([]byte, error)

func (*WarehousePermissions) UnmarshalJSON added in v0.23.0

func (s *WarehousePermissions) UnmarshalJSON(b []byte) error

type WarehousePermissionsDescription added in v0.15.0

type WarehousePermissionsDescription struct {
	Description string `json:"description,omitempty"`
	// Permission level
	PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehousePermissionsDescription) MarshalJSON added in v0.23.0

func (s WarehousePermissionsDescription) MarshalJSON() ([]byte, error)

func (*WarehousePermissionsDescription) UnmarshalJSON added in v0.23.0

func (s *WarehousePermissionsDescription) UnmarshalJSON(b []byte) error

type WarehousePermissionsRequest added in v0.15.0

type WarehousePermissionsRequest struct {
	AccessControlList []WarehouseAccessControlRequest `json:"access_control_list,omitempty"`
	// The SQL warehouse for which to get or manage permissions.
	WarehouseId string `json:"-" url:"-"`
}

type WarehouseTypePair

type WarehouseTypePair struct {
	// If set to false the specific warehouse type will not be be allowed as a
	// value for warehouse_type in CreateWarehouse and EditWarehouse
	Enabled bool `json:"enabled,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`.
	WarehouseType WarehouseTypePairWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehouseTypePair) MarshalJSON added in v0.23.0

func (s WarehouseTypePair) MarshalJSON() ([]byte, error)

func (*WarehouseTypePair) UnmarshalJSON added in v0.23.0

func (s *WarehouseTypePair) UnmarshalJSON(b []byte) error

type WarehouseTypePairWarehouseType added in v0.9.0

type WarehouseTypePairWarehouseType string

Warehouse type: `PRO` or `CLASSIC`.

const WarehouseTypePairWarehouseTypeClassic WarehouseTypePairWarehouseType = `CLASSIC`
const WarehouseTypePairWarehouseTypePro WarehouseTypePairWarehouseType = `PRO`
const WarehouseTypePairWarehouseTypeTypeUnspecified WarehouseTypePairWarehouseType = `TYPE_UNSPECIFIED`

func (*WarehouseTypePairWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*WarehouseTypePairWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*WarehouseTypePairWarehouseType) Type added in v0.9.0

Type always returns WarehouseTypePairWarehouseType to satisfy [pflag.Value] interface

type WarehousesAPI

type WarehousesAPI struct {
	// contains filtered or unexported fields
}

A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. Compute resources are infrastructure resources that provide processing capabilities in the cloud.

func NewWarehouses

func NewWarehouses(client *client.DatabricksClient) *WarehousesAPI

func (*WarehousesAPI) Create

Create a warehouse.

Creates a new SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) CreateAndWait deprecated

func (a *WarehousesAPI) CreateAndWait(ctx context.Context, createWarehouseRequest CreateWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Create and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Create.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) Delete

func (a *WarehousesAPI) Delete(ctx context.Context, request DeleteWarehouseRequest) error

Delete a warehouse.

Deletes a SQL warehouse.

func (*WarehousesAPI) DeleteById

func (a *WarehousesAPI) DeleteById(ctx context.Context, id string) error

Delete a warehouse.

Deletes a SQL warehouse.

func (*WarehousesAPI) Edit

func (a *WarehousesAPI) Edit(ctx context.Context, editWarehouseRequest EditWarehouseRequest) (*WaitGetWarehouseRunning[any], error)

Update a warehouse.

Updates the configuration for a SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Warehouses.Edit(ctx, sql.EditWarehouseRequest{
	Id:             created.Id,
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) EditAndWait deprecated

func (a *WarehousesAPI) EditAndWait(ctx context.Context, editWarehouseRequest EditWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Edit and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Edit.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) EndpointInfoNameToIdMap

func (a *WarehousesAPI) EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error)

EndpointInfoNameToIdMap calls WarehousesAPI.ListAll and creates a map of results with EndpointInfo.Name as key and EndpointInfo.Id as value.

Returns an error if there's more than one EndpointInfo with the same .Name.

Note: All EndpointInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) Get

Get warehouse info.

Gets the information for a single SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

wh, err := w.Warehouses.GetById(ctx, created.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", wh)

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) GetById

Get warehouse info.

Gets the information for a single SQL warehouse.

func (*WarehousesAPI) GetByName

func (a *WarehousesAPI) GetByName(ctx context.Context, name string) (*EndpointInfo, error)

GetByName calls WarehousesAPI.EndpointInfoNameToIdMap and returns a single EndpointInfo.

Returns an error if there's more than one EndpointInfo with the same .Name.

Note: All EndpointInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) GetPermissionLevels added in v0.19.0

Get SQL warehouse permission levels.

Gets the permission levels that a user can have on an object.

func (*WarehousesAPI) GetPermissionLevelsByWarehouseId added in v0.19.0

func (a *WarehousesAPI) GetPermissionLevelsByWarehouseId(ctx context.Context, warehouseId string) (*GetWarehousePermissionLevelsResponse, error)

Get SQL warehouse permission levels.

Gets the permission levels that a user can have on an object.

func (*WarehousesAPI) GetPermissions added in v0.19.0

Get SQL warehouse permissions.

Gets the permissions of a SQL warehouse. SQL warehouses can inherit permissions from their root object.

func (*WarehousesAPI) GetPermissionsByWarehouseId added in v0.19.0

func (a *WarehousesAPI) GetPermissionsByWarehouseId(ctx context.Context, warehouseId string) (*WarehousePermissions, error)

Get SQL warehouse permissions.

Gets the permissions of a SQL warehouse. SQL warehouses can inherit permissions from their root object.

func (*WarehousesAPI) GetWorkspaceWarehouseConfig

func (a *WarehousesAPI) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error)

Get the workspace configuration.

Gets the workspace level configuration that is shared by all SQL warehouses in a workspace.

func (*WarehousesAPI) Impl

func (a *WarehousesAPI) Impl() WarehousesService

Impl returns low-level Warehouses API implementation

func (*WarehousesAPI) List added in v0.24.0

List warehouses.

Lists all SQL warehouses that a user has manager permissions on.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) ListAll

func (a *WarehousesAPI) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error)

List warehouses.

Lists all SQL warehouses that a user has manager permissions on.

This method is generated by Databricks SDK Code Generator.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Warehouses.ListAll(ctx, sql.ListWarehousesRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*WarehousesAPI) SetPermissions added in v0.19.0

Set SQL warehouse permissions.

Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions from their root object.

func (*WarehousesAPI) SetWorkspaceWarehouseConfig

func (a *WarehousesAPI) SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error

Set the workspace configuration.

Sets the workspace level configuration that is shared by all SQL warehouses in a workspace.

func (*WarehousesAPI) Start

func (a *WarehousesAPI) Start(ctx context.Context, startRequest StartRequest) (*WaitGetWarehouseRunning[any], error)

Start a warehouse.

Starts a SQL warehouse.

func (*WarehousesAPI) StartAndWait deprecated

func (a *WarehousesAPI) StartAndWait(ctx context.Context, startRequest StartRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Start and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Start.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) Stop

func (a *WarehousesAPI) Stop(ctx context.Context, stopRequest StopRequest) (*WaitGetWarehouseStopped[any], error)

Stop a warehouse.

Stops a SQL warehouse.

func (*WarehousesAPI) StopAndWait deprecated

func (a *WarehousesAPI) StopAndWait(ctx context.Context, stopRequest StopRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Stop and waits to reach STOPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Stop.Get() or WarehousesAPI.WaitGetWarehouseStopped

func (*WarehousesAPI) UpdatePermissions added in v0.19.0

func (a *WarehousesAPI) UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)

Update SQL warehouse permissions.

Updates the permissions on a SQL warehouse. SQL warehouses can inherit permissions from their root object.

func (*WarehousesAPI) WaitGetWarehouseRunning added in v0.10.0

func (a *WarehousesAPI) WaitGetWarehouseRunning(ctx context.Context, id string,
	timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

WaitGetWarehouseRunning repeatedly calls WarehousesAPI.Get and waits to reach RUNNING state

func (*WarehousesAPI) WaitGetWarehouseStopped added in v0.10.0

func (a *WarehousesAPI) WaitGetWarehouseStopped(ctx context.Context, id string,
	timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

WaitGetWarehouseStopped repeatedly calls WarehousesAPI.Get and waits to reach STOPPED state

func (*WarehousesAPI) WithImpl

func (a *WarehousesAPI) WithImpl(impl WarehousesService) *WarehousesAPI

WithImpl could be used to override low-level API implementations for unit testing purposes with github.com/golang/mock or other mocking frameworks.

type WarehousesService

type WarehousesService interface {

	// Create a warehouse.
	//
	// Creates a new SQL warehouse.
	Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error)

	// Delete a warehouse.
	//
	// Deletes a SQL warehouse.
	Delete(ctx context.Context, request DeleteWarehouseRequest) error

	// Update a warehouse.
	//
	// Updates the configuration for a SQL warehouse.
	Edit(ctx context.Context, request EditWarehouseRequest) error

	// Get warehouse info.
	//
	// Gets the information for a single SQL warehouse.
	Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error)

	// Get SQL warehouse permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error)

	// Get SQL warehouse permissions.
	//
	// Gets the permissions of a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error)

	// Get the workspace configuration.
	//
	// Gets the workspace level configuration that is shared by all SQL
	// warehouses in a workspace.
	GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error)

	// List warehouses.
	//
	// Lists all SQL warehouses that a user has manager permissions on.
	//
	// Use ListAll() to get all EndpointInfo instances
	List(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error)

	// Set SQL warehouse permissions.
	//
	// Sets permissions on a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)

	// Set the workspace configuration.
	//
	// Sets the workspace level configuration that is shared by all SQL
	// warehouses in a workspace.
	SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error

	// Start a warehouse.
	//
	// Starts a SQL warehouse.
	Start(ctx context.Context, request StartRequest) error

	// Stop a warehouse.
	//
	// Stops a SQL warehouse.
	Stop(ctx context.Context, request StopRequest) error

	// Update SQL warehouse permissions.
	//
	// Updates the permissions on a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)
}

A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. Compute resources are infrastructure resources that provide processing capabilities in the cloud.

type Widget

type Widget struct {
	// The unique ID for this widget.
	Id string `json:"id,omitempty"`

	Options *WidgetOptions `json:"options,omitempty"`
	// The visualization description API changes frequently and is unsupported.
	// You can duplicate a visualization by copying description objects received
	// _from the API_ and then using them to create a new one with a POST
	// request to the same endpoint. Databricks does not recommend constructing
	// ad-hoc visualizations entirely in JSON.
	Visualization *Visualization `json:"visualization,omitempty"`
	// Unused field.
	Width int `json:"width,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Widget) MarshalJSON added in v0.23.0

func (s Widget) MarshalJSON() ([]byte, error)

func (*Widget) UnmarshalJSON added in v0.23.0

func (s *Widget) UnmarshalJSON(b []byte) error

type WidgetOptions

type WidgetOptions struct {
	// Timestamp when this object was created
	CreatedAt string `json:"created_at,omitempty"`
	// Custom description of the widget
	Description string `json:"description,omitempty"`
	// Whether this widget is hidden on the dashboard.
	IsHidden bool `json:"isHidden,omitempty"`
	// How parameters used by the visualization in this widget relate to other
	// widgets on the dashboard. Databricks does not recommend modifying this
	// definition in JSON.
	ParameterMappings any `json:"parameterMappings,omitempty"`
	// Coordinates of this widget on a dashboard. This portion of the API
	// changes frequently and is unsupported.
	Position *WidgetPosition `json:"position,omitempty"`
	// Custom title of the widget
	Title string `json:"title,omitempty"`
	// Timestamp of the last time this object was updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WidgetOptions) MarshalJSON added in v0.23.0

func (s WidgetOptions) MarshalJSON() ([]byte, error)

func (*WidgetOptions) UnmarshalJSON added in v0.23.0

func (s *WidgetOptions) UnmarshalJSON(b []byte) error

type WidgetPosition added in v0.19.0

type WidgetPosition struct {
	// reserved for internal use
	AutoHeight bool `json:"autoHeight,omitempty"`
	// column in the dashboard grid. Values start with 0
	Col int `json:"col,omitempty"`
	// row in the dashboard grid. Values start with 0
	Row int `json:"row,omitempty"`
	// width of the widget measured in dashboard grid cells
	SizeX int `json:"sizeX,omitempty"`
	// height of the widget measured in dashboard grid cells
	SizeY int `json:"sizeY,omitempty"`

	ForceSendFields []string `json:"-"`
}

Coordinates of this widget on a dashboard. This portion of the API changes frequently and is unsupported.

func (WidgetPosition) MarshalJSON added in v0.23.0

func (s WidgetPosition) MarshalJSON() ([]byte, error)

func (*WidgetPosition) UnmarshalJSON added in v0.23.0

func (s *WidgetPosition) UnmarshalJSON(b []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL