sql

package
v0.52.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 19, 2024 License: Apache-2.0 Imports: 9 Imported by: 20

Documentation

Overview

These APIs allow you to manage Alerts, Alerts Legacy, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Statement Execution, Warehouses, etc.

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AccessControl

type AccessControl struct {
	GroupName string `json:"group_name,omitempty"`
	// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *
	// `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query
	PermissionLevel PermissionLevel `json:"permission_level,omitempty"`

	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AccessControl) MarshalJSON added in v0.23.0

func (s AccessControl) MarshalJSON() ([]byte, error)

func (*AccessControl) UnmarshalJSON added in v0.23.0

func (s *AccessControl) UnmarshalJSON(b []byte) error

type Alert

type Alert struct {
	// Trigger conditions of the alert.
	Condition *AlertCondition `json:"condition,omitempty"`
	// The timestamp indicating when the alert was created.
	CreateTime string `json:"create_time,omitempty"`
	// Custom body of alert notification, if it exists. See [here] for custom
	// templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomBody string `json:"custom_body,omitempty"`
	// Custom subject of alert notification, if it exists. This can include
	// email subject entries and Slack notification headers, for example. See
	// [here] for custom templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomSubject string `json:"custom_subject,omitempty"`
	// The display name of the alert.
	DisplayName string `json:"display_name,omitempty"`
	// UUID identifying the alert.
	Id string `json:"id,omitempty"`
	// The workspace state of the alert. Used for tracking trashed status.
	LifecycleState LifecycleState `json:"lifecycle_state,omitempty"`
	// Whether to notify alert subscribers when alert returns back to normal.
	NotifyOnOk bool `json:"notify_on_ok,omitempty"`
	// The owner's username. This field is set to "Unavailable" if the user has
	// been deleted.
	OwnerUserName string `json:"owner_user_name,omitempty"`
	// The workspace path of the folder containing the alert.
	ParentPath string `json:"parent_path,omitempty"`
	// UUID of the query attached to the alert.
	QueryId string `json:"query_id,omitempty"`
	// Number of seconds an alert must wait after being triggered to rearm
	// itself. After rearming, it can be triggered again. If 0 or not specified,
	// the alert will not be triggered again.
	SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"`
	// Current state of the alert's trigger status. This field is set to UNKNOWN
	// if the alert has not yet been evaluated or ran into an error during the
	// last evaluation.
	State AlertState `json:"state,omitempty"`
	// Timestamp when the alert was last triggered, if the alert has been
	// triggered before.
	TriggerTime string `json:"trigger_time,omitempty"`
	// The timestamp indicating when the alert was updated.
	UpdateTime string `json:"update_time,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Alert) MarshalJSON added in v0.23.0

func (s Alert) MarshalJSON() ([]byte, error)

func (*Alert) UnmarshalJSON added in v0.23.0

func (s *Alert) UnmarshalJSON(b []byte) error

type AlertCondition added in v0.44.0

type AlertCondition struct {
	// Alert state if result is empty.
	EmptyResultState AlertState `json:"empty_result_state,omitempty"`
	// Operator used for comparison in alert evaluation.
	Op AlertOperator `json:"op,omitempty"`
	// Name of the column from the query result to use for comparison in alert
	// evaluation.
	Operand *AlertConditionOperand `json:"operand,omitempty"`
	// Threshold value used for comparison in alert evaluation.
	Threshold *AlertConditionThreshold `json:"threshold,omitempty"`
}

type AlertConditionOperand added in v0.44.0

type AlertConditionOperand struct {
	Column *AlertOperandColumn `json:"column,omitempty"`
}

type AlertConditionThreshold added in v0.44.0

type AlertConditionThreshold struct {
	Value *AlertOperandValue `json:"value,omitempty"`
}

type AlertOperandColumn added in v0.44.0

type AlertOperandColumn struct {
	Name string `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AlertOperandColumn) MarshalJSON added in v0.44.0

func (s AlertOperandColumn) MarshalJSON() ([]byte, error)

func (*AlertOperandColumn) UnmarshalJSON added in v0.44.0

func (s *AlertOperandColumn) UnmarshalJSON(b []byte) error

type AlertOperandValue added in v0.44.0

type AlertOperandValue struct {
	BoolValue bool `json:"bool_value,omitempty"`

	DoubleValue float64 `json:"double_value,omitempty"`

	StringValue string `json:"string_value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AlertOperandValue) MarshalJSON added in v0.44.0

func (s AlertOperandValue) MarshalJSON() ([]byte, error)

func (*AlertOperandValue) UnmarshalJSON added in v0.44.0

func (s *AlertOperandValue) UnmarshalJSON(b []byte) error

type AlertOperator added in v0.44.0

type AlertOperator string
const AlertOperatorEqual AlertOperator = `EQUAL`
const AlertOperatorGreaterThan AlertOperator = `GREATER_THAN`
const AlertOperatorGreaterThanOrEqual AlertOperator = `GREATER_THAN_OR_EQUAL`
const AlertOperatorIsNull AlertOperator = `IS_NULL`
const AlertOperatorLessThan AlertOperator = `LESS_THAN`
const AlertOperatorLessThanOrEqual AlertOperator = `LESS_THAN_OR_EQUAL`
const AlertOperatorNotEqual AlertOperator = `NOT_EQUAL`

func (*AlertOperator) Set added in v0.44.0

func (f *AlertOperator) Set(v string) error

Set raw string value and validate it against allowed values

func (*AlertOperator) String added in v0.44.0

func (f *AlertOperator) String() string

String representation for fmt.Print

func (*AlertOperator) Type added in v0.44.0

func (f *AlertOperator) Type() string

Type always returns AlertOperator to satisfy [pflag.Value] interface

type AlertOptions

type AlertOptions struct {
	// Name of column in the query result to compare in alert evaluation.
	Column string `json:"column"`
	// Custom body of alert notification, if it exists. See [here] for custom
	// templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomBody string `json:"custom_body,omitempty"`
	// Custom subject of alert notification, if it exists. This includes email
	// subject, Slack notification header, etc. See [here] for custom templating
	// instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomSubject string `json:"custom_subject,omitempty"`
	// State that alert evaluates to when query result is empty.
	EmptyResultState AlertOptionsEmptyResultState `json:"empty_result_state,omitempty"`
	// Whether or not the alert is muted. If an alert is muted, it will not
	// notify users and notification destinations when triggered.
	Muted bool `json:"muted,omitempty"`
	// Operator used to compare in alert evaluation: `>`, `>=`, `<`, `<=`, `==`,
	// `!=`
	Op string `json:"op"`
	// Value used to compare in alert evaluation. Supported types include
	// strings (eg. 'foobar'), floats (eg. 123.4), and booleans (true).
	Value any `json:"value"`

	ForceSendFields []string `json:"-"`
}

Alert configuration options.

func (AlertOptions) MarshalJSON added in v0.23.0

func (s AlertOptions) MarshalJSON() ([]byte, error)

func (*AlertOptions) UnmarshalJSON added in v0.23.0

func (s *AlertOptions) UnmarshalJSON(b []byte) error

type AlertOptionsEmptyResultState added in v0.20.0

type AlertOptionsEmptyResultState string

State that alert evaluates to when query result is empty.

const AlertOptionsEmptyResultStateOk AlertOptionsEmptyResultState = `ok`
const AlertOptionsEmptyResultStateTriggered AlertOptionsEmptyResultState = `triggered`
const AlertOptionsEmptyResultStateUnknown AlertOptionsEmptyResultState = `unknown`

func (*AlertOptionsEmptyResultState) Set added in v0.20.0

Set raw string value and validate it against allowed values

func (*AlertOptionsEmptyResultState) String added in v0.20.0

String representation for fmt.Print

func (*AlertOptionsEmptyResultState) Type added in v0.20.0

Type always returns AlertOptionsEmptyResultState to satisfy [pflag.Value] interface

type AlertQuery added in v0.13.0

type AlertQuery struct {
	// The timestamp when this query was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Data source ID maps to the ID of the data source used by the resource and
	// is distinct from the warehouse ID. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/api/workspace/datasources/list
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Query ID.
	Id string `json:"id,omitempty"`
	// Indicates whether the query is trashed. Trashed queries can't be used in
	// dashboards, or appear in search results. If this boolean is `true`, the
	// `options` property for this query includes a `moved_to_trash_at`
	// timestamp. Trashed queries are permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether the query is a draft. Draft queries only appear in list views for
	// their owners. Visualizations from draft queries cannot appear on
	// dashboards.
	IsDraft bool `json:"is_draft,omitempty"`
	// Text parameter types are not safe from SQL injection for all types of
	// data source. Set this Boolean parameter to `true` if a query either does
	// not use any text type parameters or uses a data source type where text
	// type parameters are handled safely.
	IsSafe bool `json:"is_safe,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`

	Options *QueryOptions `json:"options,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// The timestamp at which this query was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`
	// The ID of the user who owns the query.
	UserId int `json:"user_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (AlertQuery) MarshalJSON added in v0.23.0

func (s AlertQuery) MarshalJSON() ([]byte, error)

func (*AlertQuery) UnmarshalJSON added in v0.23.0

func (s *AlertQuery) UnmarshalJSON(b []byte) error

type AlertState

type AlertState string
const AlertStateOk AlertState = `OK`
const AlertStateTriggered AlertState = `TRIGGERED`
const AlertStateUnknown AlertState = `UNKNOWN`

func (*AlertState) Set

func (f *AlertState) Set(v string) error

Set raw string value and validate it against allowed values

func (*AlertState) String

func (f *AlertState) String() string

String representation for fmt.Print

func (*AlertState) Type

func (f *AlertState) Type() string

Type always returns AlertState to satisfy [pflag.Value] interface

type AlertsAPI

type AlertsAPI struct {
	// contains filtered or unexported fields
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewAlerts

func NewAlerts(client *client.DatabricksClient) *AlertsAPI

func (*AlertsAPI) Create

func (a *AlertsAPI) Create(ctx context.Context, request CreateAlertRequest) (*Alert, error)
Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.CreateQueryRequest{
	Query: &sql.CreateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		WarehouseId: srcs[0].WarehouseId,
		Description: "test query from Go SDK",
		QueryText:   "SELECT 1",
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlertRequest{
	Alert: &sql.CreateAlertRequestAlert{
		Condition: &sql.AlertCondition{
			Operand: &sql.AlertConditionOperand{
				Column: &sql.AlertOperandColumn{
					Name: "1",
				},
			},
			Op: sql.AlertOperatorEqual,
			Threshold: &sql.AlertConditionThreshold{
				Value: &sql.AlertOperandValue{
					DoubleValue: 1,
				},
			},
		},
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		QueryId:     query.Id,
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

// cleanup

err = w.Queries.DeleteById(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteById(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) Delete

func (a *AlertsAPI) Delete(ctx context.Context, request TrashAlertRequest) error

func (*AlertsAPI) DeleteById added in v0.44.0

func (a *AlertsAPI) DeleteById(ctx context.Context, id string) error

Delete an alert.

Moves an alert to the trash. Trashed alerts immediately disappear from searches and list views, and can no longer trigger. You can restore a trashed alert through the UI. A trashed alert is permanently deleted after 30 days.

func (*AlertsAPI) Get

func (a *AlertsAPI) Get(ctx context.Context, request GetAlertRequest) (*Alert, error)
Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.CreateQueryRequest{
	Query: &sql.CreateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		WarehouseId: srcs[0].WarehouseId,
		Description: "test query from Go SDK",
		QueryText:   "SELECT 1",
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlertRequest{
	Alert: &sql.CreateAlertRequestAlert{
		Condition: &sql.AlertCondition{
			Operand: &sql.AlertConditionOperand{
				Column: &sql.AlertOperandColumn{
					Name: "1",
				},
			},
			Op: sql.AlertOperatorEqual,
			Threshold: &sql.AlertConditionThreshold{
				Value: &sql.AlertOperandValue{
					DoubleValue: 1,
				},
			},
		},
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		QueryId:     query.Id,
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

byId, err := w.Alerts.GetById(ctx, alert.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Queries.DeleteById(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteById(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

func (*AlertsAPI) GetByDisplayName added in v0.44.0

func (a *AlertsAPI) GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error)

GetByDisplayName calls AlertsAPI.ListAlertsResponseAlertDisplayNameToIdMap and returns a single ListAlertsResponseAlert.

Returns an error if there's more than one ListAlertsResponseAlert with the same .DisplayName.

Note: All ListAlertsResponseAlert instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*AlertsAPI) GetById added in v0.44.0

func (a *AlertsAPI) GetById(ctx context.Context, id string) (*Alert, error)

Get an alert.

Gets an alert.

func (*AlertsAPI) List

List alerts.

Gets a list of alerts accessible to the user, ordered by creation time. **Warning:** Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

This method is generated by Databricks SDK Code Generator.

func (*AlertsAPI) ListAlertsResponseAlertDisplayNameToIdMap added in v0.44.0

func (a *AlertsAPI) ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error)

ListAlertsResponseAlertDisplayNameToIdMap calls AlertsAPI.ListAll and creates a map of results with ListAlertsResponseAlert.DisplayName as key and ListAlertsResponseAlert.Id as value.

Returns an error if there's more than one ListAlertsResponseAlert with the same .DisplayName.

Note: All ListAlertsResponseAlert instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*AlertsAPI) ListAll added in v0.44.0

List alerts.

Gets a list of alerts accessible to the user, ordered by creation time. **Warning:** Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

This method is generated by Databricks SDK Code Generator.

Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Alerts.ListAll(ctx, sql.ListAlertsRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*AlertsAPI) Update

func (a *AlertsAPI) Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error)
Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.CreateQueryRequest{
	Query: &sql.CreateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		WarehouseId: srcs[0].WarehouseId,
		Description: "test query from Go SDK",
		QueryText:   "SELECT 1",
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

alert, err := w.Alerts.Create(ctx, sql.CreateAlertRequest{
	Alert: &sql.CreateAlertRequestAlert{
		Condition: &sql.AlertCondition{
			Operand: &sql.AlertConditionOperand{
				Column: &sql.AlertOperandColumn{
					Name: "1",
				},
			},
			Op: sql.AlertOperatorEqual,
			Threshold: &sql.AlertConditionThreshold{
				Value: &sql.AlertOperandValue{
					DoubleValue: 1,
				},
			},
		},
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		QueryId:     query.Id,
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", alert)

_, err = w.Alerts.Update(ctx, sql.UpdateAlertRequest{
	Id: alert.Id,
	Alert: &sql.UpdateAlertRequestAlert{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	},
	UpdateMask: "display_name",
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Queries.DeleteById(ctx, query.Id)
if err != nil {
	panic(err)
}
err = w.Alerts.DeleteById(ctx, alert.Id)
if err != nil {
	panic(err)
}
Output:

type AlertsInterface added in v0.29.0

type AlertsInterface interface {

	// Create an alert.
	//
	// Creates an alert.
	Create(ctx context.Context, request CreateAlertRequest) (*Alert, error)

	// Delete an alert.
	//
	// Moves an alert to the trash. Trashed alerts immediately disappear from
	// searches and list views, and can no longer trigger. You can restore a trashed
	// alert through the UI. A trashed alert is permanently deleted after 30 days.
	Delete(ctx context.Context, request TrashAlertRequest) error

	// Delete an alert.
	//
	// Moves an alert to the trash. Trashed alerts immediately disappear from
	// searches and list views, and can no longer trigger. You can restore a trashed
	// alert through the UI. A trashed alert is permanently deleted after 30 days.
	DeleteById(ctx context.Context, id string) error

	// Get an alert.
	//
	// Gets an alert.
	Get(ctx context.Context, request GetAlertRequest) (*Alert, error)

	// Get an alert.
	//
	// Gets an alert.
	GetById(ctx context.Context, id string) (*Alert, error)

	// List alerts.
	//
	// Gets a list of alerts accessible to the user, ordered by creation time.
	// **Warning:** Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert]

	// List alerts.
	//
	// Gets a list of alerts accessible to the user, ordered by creation time.
	// **Warning:** Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error)

	// ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value.
	//
	// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName.
	//
	// Note: All [ListAlertsResponseAlert] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error)

	// GetByDisplayName calls [AlertsAPI.ListAlertsResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsResponseAlert].
	//
	// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName.
	//
	// Note: All [ListAlertsResponseAlert] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error)

	// Update an alert.
	//
	// Updates an alert.
	Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error)
}

type AlertsLegacyAPI added in v0.44.0

type AlertsLegacyAPI struct {
	// contains filtered or unexported fields
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

**Note**: A new version of the Databricks SQL API is now available. Please see the latest version. Learn more

func NewAlertsLegacy added in v0.44.0

func NewAlertsLegacy(client *client.DatabricksClient) *AlertsLegacyAPI

func (*AlertsLegacyAPI) Create added in v0.44.0

func (a *AlertsLegacyAPI) Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error)

func (*AlertsLegacyAPI) Delete added in v0.44.0

func (a *AlertsLegacyAPI) Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error

func (*AlertsLegacyAPI) DeleteByAlertId added in v0.44.0

func (a *AlertsLegacyAPI) DeleteByAlertId(ctx context.Context, alertId string) error

Delete an alert.

Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to the trash.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:alerts/delete instead. Learn more

func (*AlertsLegacyAPI) Get added in v0.44.0

func (a *AlertsLegacyAPI) Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error)

func (*AlertsLegacyAPI) GetByAlertId added in v0.44.0

func (a *AlertsLegacyAPI) GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error)

Get an alert.

Gets an alert.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:alerts/get instead. Learn more

func (*AlertsLegacyAPI) GetByName added in v0.44.0

func (a *AlertsLegacyAPI) GetByName(ctx context.Context, name string) (*LegacyAlert, error)

GetByName calls AlertsLegacyAPI.LegacyAlertNameToIdMap and returns a single LegacyAlert.

Returns an error if there's more than one LegacyAlert with the same .Name.

Note: All LegacyAlert instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*AlertsLegacyAPI) LegacyAlertNameToIdMap added in v0.44.0

func (a *AlertsLegacyAPI) LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error)

LegacyAlertNameToIdMap calls AlertsLegacyAPI.List and creates a map of results with LegacyAlert.Name as key and LegacyAlert.Id as value.

Returns an error if there's more than one LegacyAlert with the same .Name.

Note: All LegacyAlert instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*AlertsLegacyAPI) List added in v0.44.0

func (a *AlertsLegacyAPI) List(ctx context.Context) ([]LegacyAlert, error)

func (*AlertsLegacyAPI) Update added in v0.44.0

func (a *AlertsLegacyAPI) Update(ctx context.Context, request EditAlert) error

type AlertsLegacyInterface added in v0.44.0

type AlertsLegacyInterface interface {

	// Create an alert.
	//
	// Creates an alert. An alert is a Databricks SQL object that periodically runs
	// a query, evaluates a condition of its result, and notifies users or
	// notification destinations if the condition was met.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:alerts/create instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error)

	// Delete an alert.
	//
	// Deletes an alert. Deleted alerts are no longer accessible and cannot be
	// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to
	// the trash.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:alerts/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error

	// Delete an alert.
	//
	// Deletes an alert. Deleted alerts are no longer accessible and cannot be
	// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to
	// the trash.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:alerts/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	DeleteByAlertId(ctx context.Context, alertId string) error

	// Get an alert.
	//
	// Gets an alert.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:alerts/get instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error)

	// Get an alert.
	//
	// Gets an alert.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:alerts/get instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error)

	// Get alerts.
	//
	// Gets a list of alerts.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:alerts/list instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	List(ctx context.Context) ([]LegacyAlert, error)

	// LegacyAlertNameToIdMap calls [AlertsLegacyAPI.List] and creates a map of results with [LegacyAlert].Name as key and [LegacyAlert].Id as value.
	//
	// Returns an error if there's more than one [LegacyAlert] with the same .Name.
	//
	// Note: All [LegacyAlert] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error)

	// GetByName calls [AlertsLegacyAPI.LegacyAlertNameToIdMap] and returns a single [LegacyAlert].
	//
	// Returns an error if there's more than one [LegacyAlert] with the same .Name.
	//
	// Note: All [LegacyAlert] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByName(ctx context.Context, name string) (*LegacyAlert, error)

	// Update an alert.
	//
	// Updates an alert.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:alerts/update instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Update(ctx context.Context, request EditAlert) error
}

type AlertsLegacyService added in v0.44.0

type AlertsLegacyService interface {

	// Create an alert.
	//
	// Creates an alert. An alert is a Databricks SQL object that periodically
	// runs a query, evaluates a condition of its result, and notifies users or
	// notification destinations if the condition was met.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:alerts/create instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error)

	// Delete an alert.
	//
	// Deletes an alert. Deleted alerts are no longer accessible and cannot be
	// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved
	// to the trash.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:alerts/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error

	// Get an alert.
	//
	// Gets an alert.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:alerts/get instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error)

	// Get alerts.
	//
	// Gets a list of alerts.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:alerts/list instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	List(ctx context.Context) ([]LegacyAlert, error)

	// Update an alert.
	//
	// Updates an alert.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:alerts/update instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Update(ctx context.Context, request EditAlert) error
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

**Note**: A new version of the Databricks SQL API is now available. Please see the latest version. Learn more

type AlertsService

type AlertsService interface {

	// Create an alert.
	//
	// Creates an alert.
	Create(ctx context.Context, request CreateAlertRequest) (*Alert, error)

	// Delete an alert.
	//
	// Moves an alert to the trash. Trashed alerts immediately disappear from
	// searches and list views, and can no longer trigger. You can restore a
	// trashed alert through the UI. A trashed alert is permanently deleted
	// after 30 days.
	Delete(ctx context.Context, request TrashAlertRequest) error

	// Get an alert.
	//
	// Gets an alert.
	Get(ctx context.Context, request GetAlertRequest) (*Alert, error)

	// List alerts.
	//
	// Gets a list of alerts accessible to the user, ordered by creation time.
	// **Warning:** Calling this API concurrently 10 or more times could result
	// in throttling, service degradation, or a temporary ban.
	//
	// Use ListAll() to get all ListAlertsResponseAlert instances, which will iterate over every result page.
	List(ctx context.Context, request ListAlertsRequest) (*ListAlertsResponse, error)

	// Update an alert.
	//
	// Updates an alert.
	Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error)
}

The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type BaseChunkInfo added in v0.20.0

type BaseChunkInfo struct {
	// The number of bytes in the result chunk. This field is not available when
	// using `INLINE` disposition.
	ByteCount int64 `json:"byte_count,omitempty"`
	// The position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// The number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// The starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`

	ForceSendFields []string `json:"-"`
}

Describes metadata for a particular chunk, within a result set; this structure is used both within a manifest, and when fetching individual chunk data or links.

func (BaseChunkInfo) MarshalJSON added in v0.23.0

func (s BaseChunkInfo) MarshalJSON() ([]byte, error)

func (*BaseChunkInfo) UnmarshalJSON added in v0.23.0

func (s *BaseChunkInfo) UnmarshalJSON(b []byte) error

type CancelExecutionRequest added in v0.3.0

type CancelExecutionRequest struct {
	// The statement ID is returned upon successfully submitting a SQL
	// statement, and is a required reference for all subsequent calls.
	StatementId string `json:"-" url:"-"`
}

Cancel statement execution

type CancelExecutionResponse added in v0.34.0

type CancelExecutionResponse struct {
}

type Channel

type Channel struct {
	DbsqlVersion string `json:"dbsql_version,omitempty"`

	Name ChannelName `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

Configures the channel name and DBSQL version of the warehouse. CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified.

func (Channel) MarshalJSON added in v0.23.0

func (s Channel) MarshalJSON() ([]byte, error)

func (*Channel) UnmarshalJSON added in v0.23.0

func (s *Channel) UnmarshalJSON(b []byte) error

type ChannelInfo

type ChannelInfo struct {
	// DB SQL Version the Channel is mapped to.
	DbsqlVersion string `json:"dbsql_version,omitempty"`
	// Name of the channel
	Name ChannelName `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

Details about a Channel.

func (ChannelInfo) MarshalJSON added in v0.23.0

func (s ChannelInfo) MarshalJSON() ([]byte, error)

func (*ChannelInfo) UnmarshalJSON added in v0.23.0

func (s *ChannelInfo) UnmarshalJSON(b []byte) error

type ChannelName

type ChannelName string
const ChannelNameChannelNameCurrent ChannelName = `CHANNEL_NAME_CURRENT`
const ChannelNameChannelNameCustom ChannelName = `CHANNEL_NAME_CUSTOM`
const ChannelNameChannelNamePreview ChannelName = `CHANNEL_NAME_PREVIEW`
const ChannelNameChannelNamePrevious ChannelName = `CHANNEL_NAME_PREVIOUS`

func (*ChannelName) Set

func (f *ChannelName) Set(v string) error

Set raw string value and validate it against allowed values

func (*ChannelName) String

func (f *ChannelName) String() string

String representation for fmt.Print

func (*ChannelName) Type

func (f *ChannelName) Type() string

Type always returns ChannelName to satisfy [pflag.Value] interface

type ColumnInfo added in v0.3.0

type ColumnInfo struct {
	// The name of the column.
	Name string `json:"name,omitempty"`
	// The ordinal position of the column (starting at position 0).
	Position int `json:"position,omitempty"`
	// The format of the interval type.
	TypeIntervalType string `json:"type_interval_type,omitempty"`
	// The name of the base data type. This doesn't include details for complex
	// types such as STRUCT, MAP or ARRAY.
	TypeName ColumnInfoTypeName `json:"type_name,omitempty"`
	// Specifies the number of digits in a number. This applies to the DECIMAL
	// type.
	TypePrecision int `json:"type_precision,omitempty"`
	// Specifies the number of digits to the right of the decimal point in a
	// number. This applies to the DECIMAL type.
	TypeScale int `json:"type_scale,omitempty"`
	// The full SQL type specification.
	TypeText string `json:"type_text,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ColumnInfo) MarshalJSON added in v0.23.0

func (s ColumnInfo) MarshalJSON() ([]byte, error)

func (*ColumnInfo) UnmarshalJSON added in v0.23.0

func (s *ColumnInfo) UnmarshalJSON(b []byte) error

type ColumnInfoTypeName added in v0.3.0

type ColumnInfoTypeName string

The name of the base data type. This doesn't include details for complex types such as STRUCT, MAP or ARRAY.

const ColumnInfoTypeNameArray ColumnInfoTypeName = `ARRAY`
const ColumnInfoTypeNameBinary ColumnInfoTypeName = `BINARY`
const ColumnInfoTypeNameBoolean ColumnInfoTypeName = `BOOLEAN`
const ColumnInfoTypeNameByte ColumnInfoTypeName = `BYTE`
const ColumnInfoTypeNameChar ColumnInfoTypeName = `CHAR`
const ColumnInfoTypeNameDate ColumnInfoTypeName = `DATE`
const ColumnInfoTypeNameDecimal ColumnInfoTypeName = `DECIMAL`
const ColumnInfoTypeNameDouble ColumnInfoTypeName = `DOUBLE`
const ColumnInfoTypeNameFloat ColumnInfoTypeName = `FLOAT`
const ColumnInfoTypeNameInt ColumnInfoTypeName = `INT`
const ColumnInfoTypeNameInterval ColumnInfoTypeName = `INTERVAL`
const ColumnInfoTypeNameLong ColumnInfoTypeName = `LONG`
const ColumnInfoTypeNameMap ColumnInfoTypeName = `MAP`
const ColumnInfoTypeNameNull ColumnInfoTypeName = `NULL`
const ColumnInfoTypeNameShort ColumnInfoTypeName = `SHORT`
const ColumnInfoTypeNameString ColumnInfoTypeName = `STRING`
const ColumnInfoTypeNameStruct ColumnInfoTypeName = `STRUCT`
const ColumnInfoTypeNameTimestamp ColumnInfoTypeName = `TIMESTAMP`
const ColumnInfoTypeNameUserDefinedType ColumnInfoTypeName = `USER_DEFINED_TYPE`

func (*ColumnInfoTypeName) Set added in v0.3.0

func (f *ColumnInfoTypeName) Set(v string) error

Set raw string value and validate it against allowed values

func (*ColumnInfoTypeName) String added in v0.3.0

func (f *ColumnInfoTypeName) String() string

String representation for fmt.Print

func (*ColumnInfoTypeName) Type added in v0.3.0

func (f *ColumnInfoTypeName) Type() string

Type always returns ColumnInfoTypeName to satisfy [pflag.Value] interface

type CreateAlert added in v0.3.0

type CreateAlert struct {
	// Name of the alert.
	Name string `json:"name"`
	// Alert configuration options.
	Options AlertOptions `json:"options"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// Query ID.
	QueryId string `json:"query_id"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateAlert) MarshalJSON added in v0.23.0

func (s CreateAlert) MarshalJSON() ([]byte, error)

func (*CreateAlert) UnmarshalJSON added in v0.23.0

func (s *CreateAlert) UnmarshalJSON(b []byte) error

type CreateAlertRequest added in v0.44.0

type CreateAlertRequest struct {
	Alert *CreateAlertRequestAlert `json:"alert,omitempty"`
}

type CreateAlertRequestAlert added in v0.44.0

type CreateAlertRequestAlert struct {
	// Trigger conditions of the alert.
	Condition *AlertCondition `json:"condition,omitempty"`
	// Custom body of alert notification, if it exists. See [here] for custom
	// templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomBody string `json:"custom_body,omitempty"`
	// Custom subject of alert notification, if it exists. This can include
	// email subject entries and Slack notification headers, for example. See
	// [here] for custom templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomSubject string `json:"custom_subject,omitempty"`
	// The display name of the alert.
	DisplayName string `json:"display_name,omitempty"`
	// Whether to notify alert subscribers when alert returns back to normal.
	NotifyOnOk bool `json:"notify_on_ok,omitempty"`
	// The workspace path of the folder containing the alert.
	ParentPath string `json:"parent_path,omitempty"`
	// UUID of the query attached to the alert.
	QueryId string `json:"query_id,omitempty"`
	// Number of seconds an alert must wait after being triggered to rearm
	// itself. After rearming, it can be triggered again. If 0 or not specified,
	// the alert will not be triggered again.
	SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateAlertRequestAlert) MarshalJSON added in v0.44.0

func (s CreateAlertRequestAlert) MarshalJSON() ([]byte, error)

func (*CreateAlertRequestAlert) UnmarshalJSON added in v0.44.0

func (s *CreateAlertRequestAlert) UnmarshalJSON(b []byte) error

type CreateQueryRequest added in v0.44.0

type CreateQueryRequest struct {
	Query *CreateQueryRequestQuery `json:"query,omitempty"`
}

type CreateQueryRequestQuery added in v0.44.0

type CreateQueryRequestQuery struct {
	// Whether to apply a 1000 row limit to the query result.
	ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"`
	// Name of the catalog where this query will be executed.
	Catalog string `json:"catalog,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Display name of the query that appears in list views, widget headings,
	// and on the query page.
	DisplayName string `json:"display_name,omitempty"`
	// List of query parameter definitions.
	Parameters []QueryParameter `json:"parameters,omitempty"`
	// Workspace path of the workspace folder containing the object.
	ParentPath string `json:"parent_path,omitempty"`
	// Text of the query to be run.
	QueryText string `json:"query_text,omitempty"`
	// Sets the "Run as" role for the object.
	RunAsMode RunAsMode `json:"run_as_mode,omitempty"`
	// Name of the schema where this query will be executed.
	Schema string `json:"schema,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// ID of the SQL warehouse attached to the query.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateQueryRequestQuery) MarshalJSON added in v0.44.0

func (s CreateQueryRequestQuery) MarshalJSON() ([]byte, error)

func (*CreateQueryRequestQuery) UnmarshalJSON added in v0.44.0

func (s *CreateQueryRequestQuery) UnmarshalJSON(b []byte) error

type CreateQueryVisualizationsLegacyRequest added in v0.44.0

type CreateQueryVisualizationsLegacyRequest struct {
	// A short description of this visualization. This is not displayed in the
	// UI.
	Description string `json:"description,omitempty"`
	// The name of the visualization that appears on dashboards and the query
	// screen.
	Name string `json:"name,omitempty"`
	// The options object varies widely from one visualization type to the next
	// and is unsupported. Databricks does not recommend modifying visualization
	// settings in JSON.
	Options any `json:"options"`
	// The identifier returned by :method:queries/create
	QueryId string `json:"query_id"`
	// The type of visualization: chart, table, pivot table, and so on.
	Type string `json:"type"`

	ForceSendFields []string `json:"-"`
}

Add visualization to a query

func (CreateQueryVisualizationsLegacyRequest) MarshalJSON added in v0.44.0

func (s CreateQueryVisualizationsLegacyRequest) MarshalJSON() ([]byte, error)

func (*CreateQueryVisualizationsLegacyRequest) UnmarshalJSON added in v0.44.0

func (s *CreateQueryVisualizationsLegacyRequest) UnmarshalJSON(b []byte) error

type CreateVisualizationRequest added in v0.44.0

type CreateVisualizationRequest struct {
	Visualization *CreateVisualizationRequestVisualization `json:"visualization,omitempty"`
}

type CreateVisualizationRequestVisualization added in v0.44.0

type CreateVisualizationRequestVisualization struct {
	// The display name of the visualization.
	DisplayName string `json:"display_name,omitempty"`
	// UUID of the query that the visualization is attached to.
	QueryId string `json:"query_id,omitempty"`
	// The visualization options varies widely from one visualization type to
	// the next and is unsupported. Databricks does not recommend modifying
	// visualization options directly.
	SerializedOptions string `json:"serialized_options,omitempty"`
	// The visualization query plan varies widely from one visualization type to
	// the next and is unsupported. Databricks does not recommend modifying the
	// visualization query plan directly.
	SerializedQueryPlan string `json:"serialized_query_plan,omitempty"`
	// The type of visualization: counter, table, funnel, and so on.
	Type string `json:"type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateVisualizationRequestVisualization) MarshalJSON added in v0.44.0

func (s CreateVisualizationRequestVisualization) MarshalJSON() ([]byte, error)

func (*CreateVisualizationRequestVisualization) UnmarshalJSON added in v0.44.0

func (s *CreateVisualizationRequestVisualization) UnmarshalJSON(b []byte) error

type CreateWarehouseRequest

type CreateWarehouseRequest struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be >= 0 mins for serverless warehouses - Must be
	// == 0 or >= 10 mins for non-serverless warehouses - 0 indicates no
	// autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType CreateWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateWarehouseRequest) MarshalJSON added in v0.23.0

func (s CreateWarehouseRequest) MarshalJSON() ([]byte, error)

func (*CreateWarehouseRequest) UnmarshalJSON added in v0.23.0

func (s *CreateWarehouseRequest) UnmarshalJSON(b []byte) error

type CreateWarehouseRequestWarehouseType added in v0.9.0

type CreateWarehouseRequestWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const CreateWarehouseRequestWarehouseTypeClassic CreateWarehouseRequestWarehouseType = `CLASSIC`
const CreateWarehouseRequestWarehouseTypePro CreateWarehouseRequestWarehouseType = `PRO`
const CreateWarehouseRequestWarehouseTypeTypeUnspecified CreateWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED`

func (*CreateWarehouseRequestWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*CreateWarehouseRequestWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*CreateWarehouseRequestWarehouseType) Type added in v0.9.0

Type always returns CreateWarehouseRequestWarehouseType to satisfy [pflag.Value] interface

type CreateWarehouseResponse

type CreateWarehouseResponse struct {
	// Id for the SQL warehouse. This value is unique across all SQL warehouses.
	Id string `json:"id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (CreateWarehouseResponse) MarshalJSON added in v0.23.0

func (s CreateWarehouseResponse) MarshalJSON() ([]byte, error)

func (*CreateWarehouseResponse) UnmarshalJSON added in v0.23.0

func (s *CreateWarehouseResponse) UnmarshalJSON(b []byte) error

type CreateWidget added in v0.19.0

type CreateWidget struct {
	// Dashboard ID returned by :method:dashboards/create.
	DashboardId string `json:"dashboard_id"`
	// Widget ID returned by :method:dashboardwidgets/create
	Id string `json:"-" url:"-"`

	Options WidgetOptions `json:"options"`
	// If this is a textbox widget, the application displays this text. This
	// field is ignored if the widget contains a visualization in the
	// `visualization` field.
	Text string `json:"text,omitempty"`
	// Query Vizualization ID returned by :method:queryvisualizations/create.
	VisualizationId string `json:"visualization_id,omitempty"`
	// Width of a widget
	Width int `json:"width"`

	ForceSendFields []string `json:"-"`
}

func (CreateWidget) MarshalJSON added in v0.23.0

func (s CreateWidget) MarshalJSON() ([]byte, error)

func (*CreateWidget) UnmarshalJSON added in v0.23.0

func (s *CreateWidget) UnmarshalJSON(b []byte) error

type Dashboard

type Dashboard struct {
	// Whether the authenticated user can edit the query definition.
	CanEdit bool `json:"can_edit,omitempty"`
	// Timestamp when this dashboard was created.
	CreatedAt string `json:"created_at,omitempty"`
	// In the web application, query filters that share a name are coupled to a
	// single selection box if this value is `true`.
	DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"`
	// The ID for this dashboard.
	Id string `json:"id,omitempty"`
	// Indicates whether a dashboard is trashed. Trashed dashboards won't appear
	// in list views. If this boolean is `true`, the `options` property for this
	// dashboard includes a `moved_to_trash_at` timestamp. Items in trash are
	// permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether a dashboard is a draft. Draft dashboards only appear in list
	// views for their owners.
	IsDraft bool `json:"is_draft,omitempty"`
	// Indicates whether this query object appears in the current user's
	// favorites list. This flag determines whether the star icon for favorites
	// is selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// The title of the dashboard that appears in list views and at the top of
	// the dashboard page.
	Name string `json:"name,omitempty"`

	Options *DashboardOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *
	// `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query
	PermissionTier PermissionLevel `json:"permission_tier,omitempty"`
	// URL slug. Usually mirrors the query name with dashes (`-`) instead of
	// spaces. Appears in the URL for this query.
	Slug string `json:"slug,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// Timestamp when this dashboard was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`
	// The ID of the user who owns the dashboard.
	UserId int `json:"user_id,omitempty"`

	Widgets []Widget `json:"widgets,omitempty"`

	ForceSendFields []string `json:"-"`
}

A JSON representing a dashboard containing widgets of visualizations and text boxes.

func (Dashboard) MarshalJSON added in v0.23.0

func (s Dashboard) MarshalJSON() ([]byte, error)

func (*Dashboard) UnmarshalJSON added in v0.23.0

func (s *Dashboard) UnmarshalJSON(b []byte) error

type DashboardEditContent added in v0.30.0

type DashboardEditContent struct {
	DashboardId string `json:"-" url:"-"`
	// The title of this dashboard that appears in list views and at the top of
	// the dashboard page.
	Name string `json:"name,omitempty"`
	// Sets the **Run as** role for the object. Must be set to one of `"viewer"`
	// (signifying "run as viewer" behavior) or `"owner"` (signifying "run as
	// owner" behavior)
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	Tags []string `json:"tags,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DashboardEditContent) MarshalJSON added in v0.30.0

func (s DashboardEditContent) MarshalJSON() ([]byte, error)

func (*DashboardEditContent) UnmarshalJSON added in v0.30.0

func (s *DashboardEditContent) UnmarshalJSON(b []byte) error

type DashboardOptions

type DashboardOptions struct {
	// The timestamp when this dashboard was moved to trash. Only present when
	// the `is_archived` property is `true`. Trashed items are deleted after
	// thirty days.
	MovedToTrashAt string `json:"moved_to_trash_at,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DashboardOptions) MarshalJSON added in v0.23.0

func (s DashboardOptions) MarshalJSON() ([]byte, error)

func (*DashboardOptions) UnmarshalJSON added in v0.23.0

func (s *DashboardOptions) UnmarshalJSON(b []byte) error

type DashboardPostContent added in v0.30.0

type DashboardPostContent struct {
	// Indicates whether the dashboard filters are enabled
	DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"`
	// Indicates whether this dashboard object should appear in the current
	// user's favorites list.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// The title of this dashboard that appears in list views and at the top of
	// the dashboard page.
	Name string `json:"name"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// Sets the **Run as** role for the object. Must be set to one of `"viewer"`
	// (signifying "run as viewer" behavior) or `"owner"` (signifying "run as
	// owner" behavior)
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	Tags []string `json:"tags,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DashboardPostContent) MarshalJSON added in v0.30.0

func (s DashboardPostContent) MarshalJSON() ([]byte, error)

func (*DashboardPostContent) UnmarshalJSON added in v0.30.0

func (s *DashboardPostContent) UnmarshalJSON(b []byte) error

type DashboardWidgetsAPI added in v0.19.0

type DashboardWidgetsAPI struct {
	// contains filtered or unexported fields
}

This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace. Data structures may change over time.

func NewDashboardWidgets added in v0.19.0

func NewDashboardWidgets(client *client.DatabricksClient) *DashboardWidgetsAPI

func (*DashboardWidgetsAPI) Create added in v0.19.0

func (a *DashboardWidgetsAPI) Create(ctx context.Context, request CreateWidget) (*Widget, error)

func (*DashboardWidgetsAPI) Delete added in v0.19.0

func (a *DashboardWidgetsAPI) Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error

func (*DashboardWidgetsAPI) DeleteById added in v0.19.0

func (a *DashboardWidgetsAPI) DeleteById(ctx context.Context, id string) error

Remove widget.

func (*DashboardWidgetsAPI) Update added in v0.19.0

func (a *DashboardWidgetsAPI) Update(ctx context.Context, request CreateWidget) (*Widget, error)

type DashboardWidgetsInterface added in v0.29.0

type DashboardWidgetsInterface interface {

	// Add widget to a dashboard.
	Create(ctx context.Context, request CreateWidget) (*Widget, error)

	// Remove widget.
	Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error

	// Remove widget.
	DeleteById(ctx context.Context, id string) error

	// Update existing widget.
	Update(ctx context.Context, request CreateWidget) (*Widget, error)
}

type DashboardWidgetsService added in v0.19.0

type DashboardWidgetsService interface {

	// Add widget to a dashboard.
	Create(ctx context.Context, request CreateWidget) (*Widget, error)

	// Remove widget.
	Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error

	// Update existing widget.
	Update(ctx context.Context, request CreateWidget) (*Widget, error)
}

This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace. Data structures may change over time.

type DashboardsAPI

type DashboardsAPI struct {
	// contains filtered or unexported fields
}

In general, there is little need to modify dashboards using the API. However, it can be useful to use dashboard objects to look-up a collection of related query IDs. The API can also be used to duplicate multiple dashboards at once since you can get a dashboard definition with a GET request and then POST it to create a new one. Dashboards can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewDashboards

func NewDashboards(client *client.DatabricksClient) *DashboardsAPI

func (*DashboardsAPI) Create

func (a *DashboardsAPI) Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error)
Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.DashboardPostContent{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) DashboardNameToIdMap

func (a *DashboardsAPI) DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error)

DashboardNameToIdMap calls DashboardsAPI.ListAll and creates a map of results with Dashboard.Name as key and Dashboard.Id as value.

Returns an error if there's more than one Dashboard with the same .Name.

Note: All Dashboard instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) Delete

func (a *DashboardsAPI) Delete(ctx context.Context, request DeleteDashboardRequest) error
Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.DashboardPostContent{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) DeleteByDashboardId

func (a *DashboardsAPI) DeleteByDashboardId(ctx context.Context, dashboardId string) error

Remove a dashboard.

Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared.

func (*DashboardsAPI) Get

func (a *DashboardsAPI) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error)
Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.DashboardPostContent{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := w.Dashboards.GetByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) GetByDashboardId

func (a *DashboardsAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error)

Retrieve a definition.

Returns a JSON representation of a dashboard object, including its visualization and query objects.

func (*DashboardsAPI) GetByName

func (a *DashboardsAPI) GetByName(ctx context.Context, name string) (*Dashboard, error)

GetByName calls DashboardsAPI.DashboardNameToIdMap and returns a single Dashboard.

Returns an error if there's more than one Dashboard with the same .Name.

Note: All Dashboard instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) List added in v0.24.0

Get dashboard objects.

Fetch a paginated list of dashboard objects.

**Warning**: Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

This method is generated by Databricks SDK Code Generator.

func (*DashboardsAPI) ListAll

func (a *DashboardsAPI) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error)

Get dashboard objects.

Fetch a paginated list of dashboard objects.

**Warning**: Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

This method is generated by Databricks SDK Code Generator.

Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Dashboards.ListAll(ctx, sql.ListDashboardsRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*DashboardsAPI) Restore

func (a *DashboardsAPI) Restore(ctx context.Context, request RestoreDashboardRequest) error
Example (Dashboards)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Dashboards.Create(ctx, sql.DashboardPostContent{
	Name: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

err = w.Dashboards.Restore(ctx, sql.RestoreDashboardRequest{
	DashboardId: created.Id,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Dashboards.DeleteByDashboardId(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*DashboardsAPI) Update added in v0.30.0

func (a *DashboardsAPI) Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error)

type DashboardsInterface added in v0.29.0

type DashboardsInterface interface {

	// Create a dashboard object.
	Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error)

	// Remove a dashboard.
	//
	// Moves a dashboard to the trash. Trashed dashboards do not appear in list
	// views or searches, and cannot be shared.
	Delete(ctx context.Context, request DeleteDashboardRequest) error

	// Remove a dashboard.
	//
	// Moves a dashboard to the trash. Trashed dashboards do not appear in list
	// views or searches, and cannot be shared.
	DeleteByDashboardId(ctx context.Context, dashboardId string) error

	// Retrieve a definition.
	//
	// Returns a JSON representation of a dashboard object, including its
	// visualization and query objects.
	Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error)

	// Retrieve a definition.
	//
	// Returns a JSON representation of a dashboard object, including its
	// visualization and query objects.
	GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error)

	// Get dashboard objects.
	//
	// Fetch a paginated list of dashboard objects.
	//
	// **Warning**: Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard]

	// Get dashboard objects.
	//
	// Fetch a paginated list of dashboard objects.
	//
	// **Warning**: Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error)

	// DashboardNameToIdMap calls [DashboardsAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value.
	//
	// Returns an error if there's more than one [Dashboard] with the same .Name.
	//
	// Note: All [Dashboard] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error)

	// GetByName calls [DashboardsAPI.DashboardNameToIdMap] and returns a single [Dashboard].
	//
	// Returns an error if there's more than one [Dashboard] with the same .Name.
	//
	// Note: All [Dashboard] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByName(ctx context.Context, name string) (*Dashboard, error)

	// Restore a dashboard.
	//
	// A restored dashboard appears in list views and searches and can be shared.
	Restore(ctx context.Context, request RestoreDashboardRequest) error

	// Change a dashboard definition.
	//
	// Modify this dashboard definition. This operation only affects attributes of
	// the dashboard object. It does not add, modify, or remove widgets.
	//
	// **Note**: You cannot undo this operation.
	Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error)
}

type DashboardsService

type DashboardsService interface {

	// Create a dashboard object.
	Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error)

	// Remove a dashboard.
	//
	// Moves a dashboard to the trash. Trashed dashboards do not appear in list
	// views or searches, and cannot be shared.
	Delete(ctx context.Context, request DeleteDashboardRequest) error

	// Retrieve a definition.
	//
	// Returns a JSON representation of a dashboard object, including its
	// visualization and query objects.
	Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error)

	// Get dashboard objects.
	//
	// Fetch a paginated list of dashboard objects.
	//
	// **Warning**: Calling this API concurrently 10 or more times could result
	// in throttling, service degradation, or a temporary ban.
	//
	// Use ListAll() to get all Dashboard instances, which will iterate over every result page.
	List(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error)

	// Restore a dashboard.
	//
	// A restored dashboard appears in list views and searches and can be
	// shared.
	Restore(ctx context.Context, request RestoreDashboardRequest) error

	// Change a dashboard definition.
	//
	// Modify this dashboard definition. This operation only affects attributes
	// of the dashboard object. It does not add, modify, or remove widgets.
	//
	// **Note**: You cannot undo this operation.
	Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error)
}

In general, there is little need to modify dashboards using the API. However, it can be useful to use dashboard objects to look-up a collection of related query IDs. The API can also be used to duplicate multiple dashboards at once since you can get a dashboard definition with a GET request and then POST it to create a new one. Dashboards can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type DataSource

type DataSource struct {
	// Data source ID maps to the ID of the data source used by the resource and
	// is distinct from the warehouse ID. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/api/workspace/datasources/list
	Id string `json:"id,omitempty"`
	// The string name of this data source / SQL warehouse as it appears in the
	// Databricks SQL web application.
	Name string `json:"name,omitempty"`
	// Reserved for internal use.
	PauseReason string `json:"pause_reason,omitempty"`
	// Reserved for internal use.
	Paused int `json:"paused,omitempty"`
	// Reserved for internal use.
	SupportsAutoLimit bool `json:"supports_auto_limit,omitempty"`
	// Reserved for internal use.
	Syntax string `json:"syntax,omitempty"`
	// The type of data source. For SQL warehouses, this will be
	// `databricks_internal`.
	Type string `json:"type,omitempty"`
	// Reserved for internal use.
	ViewOnly bool `json:"view_only,omitempty"`
	// The ID of the associated SQL warehouse, if this data source is backed by
	// a SQL warehouse.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

A JSON object representing a DBSQL data source / SQL warehouse.

func (DataSource) MarshalJSON added in v0.23.0

func (s DataSource) MarshalJSON() ([]byte, error)

func (*DataSource) UnmarshalJSON added in v0.23.0

func (s *DataSource) UnmarshalJSON(b []byte) error

type DataSourcesAPI

type DataSourcesAPI struct {
	// contains filtered or unexported fields
}

This API is provided to assist you in making new query objects. When creating a query object, you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. If you don't already know the `data_source_id` for your desired SQL warehouse, this API will help you find it.

This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL.

**Note**: A new version of the Databricks SQL API is now available. Learn more

func NewDataSources

func NewDataSources(client *client.DatabricksClient) *DataSourcesAPI

func (*DataSourcesAPI) DataSourceNameToIdMap

func (a *DataSourcesAPI) DataSourceNameToIdMap(ctx context.Context) (map[string]string, error)

DataSourceNameToIdMap calls DataSourcesAPI.List and creates a map of results with DataSource.Name as key and DataSource.Id as value.

Returns an error if there's more than one DataSource with the same .Name.

Note: All DataSource instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*DataSourcesAPI) GetByName

func (a *DataSourcesAPI) GetByName(ctx context.Context, name string) (*DataSource, error)

GetByName calls DataSourcesAPI.DataSourceNameToIdMap and returns a single DataSource.

Returns an error if there's more than one DataSource with the same .Name.

Note: All DataSource instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*DataSourcesAPI) List

func (a *DataSourcesAPI) List(ctx context.Context) ([]DataSource, error)

type DataSourcesInterface added in v0.29.0

type DataSourcesInterface interface {

	// Get a list of SQL warehouses.
	//
	// Retrieves a full list of SQL warehouses available in this workspace. All
	// fields that appear in this API response are enumerated for clarity. However,
	// you need only a SQL warehouse's `id` to create new queries against it.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:warehouses/list instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	List(ctx context.Context) ([]DataSource, error)

	// DataSourceNameToIdMap calls [DataSourcesAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value.
	//
	// Returns an error if there's more than one [DataSource] with the same .Name.
	//
	// Note: All [DataSource] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	DataSourceNameToIdMap(ctx context.Context) (map[string]string, error)

	// GetByName calls [DataSourcesAPI.DataSourceNameToIdMap] and returns a single [DataSource].
	//
	// Returns an error if there's more than one [DataSource] with the same .Name.
	//
	// Note: All [DataSource] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByName(ctx context.Context, name string) (*DataSource, error)
}

type DataSourcesService

type DataSourcesService interface {

	// Get a list of SQL warehouses.
	//
	// Retrieves a full list of SQL warehouses available in this workspace. All
	// fields that appear in this API response are enumerated for clarity.
	// However, you need only a SQL warehouse's `id` to create new queries
	// against it.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:warehouses/list instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	List(ctx context.Context) ([]DataSource, error)
}

This API is provided to assist you in making new query objects. When creating a query object, you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. If you don't already know the `data_source_id` for your desired SQL warehouse, this API will help you find it.

This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL.

**Note**: A new version of the Databricks SQL API is now available. Learn more

type DatePrecision added in v0.44.0

type DatePrecision string
const DatePrecisionDayPrecision DatePrecision = `DAY_PRECISION`
const DatePrecisionMinutePrecision DatePrecision = `MINUTE_PRECISION`
const DatePrecisionSecondPrecision DatePrecision = `SECOND_PRECISION`

func (*DatePrecision) Set added in v0.44.0

func (f *DatePrecision) Set(v string) error

Set raw string value and validate it against allowed values

func (*DatePrecision) String added in v0.44.0

func (f *DatePrecision) String() string

String representation for fmt.Print

func (*DatePrecision) Type added in v0.44.0

func (f *DatePrecision) Type() string

Type always returns DatePrecision to satisfy [pflag.Value] interface

type DateRange added in v0.44.0

type DateRange struct {
	End string `json:"end"`

	Start string `json:"start"`
}

type DateRangeValue added in v0.44.0

type DateRangeValue struct {
	// Manually specified date-time range value.
	DateRangeValue *DateRange `json:"date_range_value,omitempty"`
	// Dynamic date-time range value based on current date-time.
	DynamicDateRangeValue DateRangeValueDynamicDateRange `json:"dynamic_date_range_value,omitempty"`
	// Date-time precision to format the value into when the query is run.
	// Defaults to DAY_PRECISION (YYYY-MM-DD).
	Precision DatePrecision `json:"precision,omitempty"`

	StartDayOfWeek int `json:"start_day_of_week,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DateRangeValue) MarshalJSON added in v0.44.0

func (s DateRangeValue) MarshalJSON() ([]byte, error)

func (*DateRangeValue) UnmarshalJSON added in v0.44.0

func (s *DateRangeValue) UnmarshalJSON(b []byte) error

type DateRangeValueDynamicDateRange added in v0.44.0

type DateRangeValueDynamicDateRange string
const DateRangeValueDynamicDateRangeLast12Months DateRangeValueDynamicDateRange = `LAST_12_MONTHS`
const DateRangeValueDynamicDateRangeLast14Days DateRangeValueDynamicDateRange = `LAST_14_DAYS`
const DateRangeValueDynamicDateRangeLast24Hours DateRangeValueDynamicDateRange = `LAST_24_HOURS`
const DateRangeValueDynamicDateRangeLast30Days DateRangeValueDynamicDateRange = `LAST_30_DAYS`
const DateRangeValueDynamicDateRangeLast60Days DateRangeValueDynamicDateRange = `LAST_60_DAYS`
const DateRangeValueDynamicDateRangeLast7Days DateRangeValueDynamicDateRange = `LAST_7_DAYS`
const DateRangeValueDynamicDateRangeLast8Hours DateRangeValueDynamicDateRange = `LAST_8_HOURS`
const DateRangeValueDynamicDateRangeLast90Days DateRangeValueDynamicDateRange = `LAST_90_DAYS`
const DateRangeValueDynamicDateRangeLastHour DateRangeValueDynamicDateRange = `LAST_HOUR`
const DateRangeValueDynamicDateRangeLastMonth DateRangeValueDynamicDateRange = `LAST_MONTH`
const DateRangeValueDynamicDateRangeLastWeek DateRangeValueDynamicDateRange = `LAST_WEEK`
const DateRangeValueDynamicDateRangeLastYear DateRangeValueDynamicDateRange = `LAST_YEAR`
const DateRangeValueDynamicDateRangeThisMonth DateRangeValueDynamicDateRange = `THIS_MONTH`
const DateRangeValueDynamicDateRangeThisWeek DateRangeValueDynamicDateRange = `THIS_WEEK`
const DateRangeValueDynamicDateRangeThisYear DateRangeValueDynamicDateRange = `THIS_YEAR`
const DateRangeValueDynamicDateRangeToday DateRangeValueDynamicDateRange = `TODAY`
const DateRangeValueDynamicDateRangeYesterday DateRangeValueDynamicDateRange = `YESTERDAY`

func (*DateRangeValueDynamicDateRange) Set added in v0.44.0

Set raw string value and validate it against allowed values

func (*DateRangeValueDynamicDateRange) String added in v0.44.0

String representation for fmt.Print

func (*DateRangeValueDynamicDateRange) Type added in v0.44.0

Type always returns DateRangeValueDynamicDateRange to satisfy [pflag.Value] interface

type DateValue added in v0.44.0

type DateValue struct {
	// Manually specified date-time value.
	DateValue string `json:"date_value,omitempty"`
	// Dynamic date-time value based on current date-time.
	DynamicDateValue DateValueDynamicDate `json:"dynamic_date_value,omitempty"`
	// Date-time precision to format the value into when the query is run.
	// Defaults to DAY_PRECISION (YYYY-MM-DD).
	Precision DatePrecision `json:"precision,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (DateValue) MarshalJSON added in v0.44.0

func (s DateValue) MarshalJSON() ([]byte, error)

func (*DateValue) UnmarshalJSON added in v0.44.0

func (s *DateValue) UnmarshalJSON(b []byte) error

type DateValueDynamicDate added in v0.44.0

type DateValueDynamicDate string
const DateValueDynamicDateNow DateValueDynamicDate = `NOW`
const DateValueDynamicDateYesterday DateValueDynamicDate = `YESTERDAY`

func (*DateValueDynamicDate) Set added in v0.44.0

func (f *DateValueDynamicDate) Set(v string) error

Set raw string value and validate it against allowed values

func (*DateValueDynamicDate) String added in v0.44.0

func (f *DateValueDynamicDate) String() string

String representation for fmt.Print

func (*DateValueDynamicDate) Type added in v0.44.0

func (f *DateValueDynamicDate) Type() string

Type always returns DateValueDynamicDate to satisfy [pflag.Value] interface

type DbsqlPermissionsAPI

type DbsqlPermissionsAPI struct {
	// contains filtered or unexported fields
}

The SQL Permissions API is similar to the endpoints of the :method:permissions/set. However, this exposes only one endpoint, which gets the Access Control List for a given object. You cannot modify any permissions using this API.

There are three levels of permission:

- `CAN_VIEW`: Allows read-only access

- `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`)

- `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`)

**Note**: A new version of the Databricks SQL API is now available. Learn more

func NewDbsqlPermissions

func NewDbsqlPermissions(client *client.DatabricksClient) *DbsqlPermissionsAPI

func (*DbsqlPermissionsAPI) Get

func (a *DbsqlPermissionsAPI) Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error)

func (*DbsqlPermissionsAPI) GetByObjectTypeAndObjectId

func (a *DbsqlPermissionsAPI) GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error)

Get object ACL.

Gets a JSON representation of the access control list (ACL) for a specified object.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:workspace/getpermissions instead. Learn more

func (*DbsqlPermissionsAPI) Set

func (a *DbsqlPermissionsAPI) Set(ctx context.Context, request SetRequest) (*SetResponse, error)

func (*DbsqlPermissionsAPI) TransferOwnership

func (a *DbsqlPermissionsAPI) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error)

type DbsqlPermissionsInterface added in v0.29.0

type DbsqlPermissionsInterface interface {

	// Get object ACL.
	//
	// Gets a JSON representation of the access control list (ACL) for a specified
	// object.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:workspace/getpermissions instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error)

	// Get object ACL.
	//
	// Gets a JSON representation of the access control list (ACL) for a specified
	// object.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:workspace/getpermissions instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error)

	// Set object ACL.
	//
	// Sets the access control list (ACL) for a specified object. This operation
	// will complete rewrite the ACL.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:workspace/setpermissions instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Set(ctx context.Context, request SetRequest) (*SetResponse, error)

	// Transfer object ownership.
	//
	// Transfers ownership of a dashboard, query, or alert to an active user.
	// Requires an admin API key.
	//
	// **Note**: A new version of the Databricks SQL API is now available. For
	// queries and alerts, please use :method:queries/update and
	// :method:alerts/update respectively instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error)
}

type DbsqlPermissionsService

type DbsqlPermissionsService interface {

	// Get object ACL.
	//
	// Gets a JSON representation of the access control list (ACL) for a
	// specified object.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:workspace/getpermissions instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error)

	// Set object ACL.
	//
	// Sets the access control list (ACL) for a specified object. This operation
	// will complete rewrite the ACL.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:workspace/setpermissions instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Set(ctx context.Context, request SetRequest) (*SetResponse, error)

	// Transfer object ownership.
	//
	// Transfers ownership of a dashboard, query, or alert to an active user.
	// Requires an admin API key.
	//
	// **Note**: A new version of the Databricks SQL API is now available. For
	// queries and alerts, please use :method:queries/update and
	// :method:alerts/update respectively instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error)
}

The SQL Permissions API is similar to the endpoints of the :method:permissions/set. However, this exposes only one endpoint, which gets the Access Control List for a given object. You cannot modify any permissions using this API.

There are three levels of permission:

- `CAN_VIEW`: Allows read-only access

- `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`)

- `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`)

**Note**: A new version of the Databricks SQL API is now available. Learn more

type DeleteAlertsLegacyRequest added in v0.44.0

type DeleteAlertsLegacyRequest struct {
	AlertId string `json:"-" url:"-"`
}

Delete an alert

type DeleteDashboardRequest

type DeleteDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Remove a dashboard

type DeleteDashboardWidgetRequest added in v0.19.0

type DeleteDashboardWidgetRequest struct {
	// Widget ID returned by :method:dashboardwidgets/create
	Id string `json:"-" url:"-"`
}

Remove widget

type DeleteQueriesLegacyRequest added in v0.44.0

type DeleteQueriesLegacyRequest struct {
	QueryId string `json:"-" url:"-"`
}

Delete a query

type DeleteQueryVisualizationsLegacyRequest added in v0.44.0

type DeleteQueryVisualizationsLegacyRequest struct {
	// Widget ID returned by :method:queryvizualisations/create
	Id string `json:"-" url:"-"`
}

Remove visualization

type DeleteResponse added in v0.34.0

type DeleteResponse struct {
}

type DeleteVisualizationRequest added in v0.44.0

type DeleteVisualizationRequest struct {
	Id string `json:"-" url:"-"`
}

Remove a visualization

type DeleteWarehouseRequest

type DeleteWarehouseRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Delete a warehouse

type DeleteWarehouseResponse added in v0.34.0

type DeleteWarehouseResponse struct {
}

type Disposition added in v0.3.0

type Disposition string
const DispositionExternalLinks Disposition = `EXTERNAL_LINKS`
const DispositionInline Disposition = `INLINE`

func (*Disposition) Set added in v0.3.0

func (f *Disposition) Set(v string) error

Set raw string value and validate it against allowed values

func (*Disposition) String added in v0.3.0

func (f *Disposition) String() string

String representation for fmt.Print

func (*Disposition) Type added in v0.3.0

func (f *Disposition) Type() string

Type always returns Disposition to satisfy [pflag.Value] interface

type EditAlert

type EditAlert struct {
	AlertId string `json:"-" url:"-"`
	// Name of the alert.
	Name string `json:"name"`
	// Alert configuration options.
	Options AlertOptions `json:"options"`
	// Query ID.
	QueryId string `json:"query_id"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EditAlert) MarshalJSON added in v0.23.0

func (s EditAlert) MarshalJSON() ([]byte, error)

func (*EditAlert) UnmarshalJSON added in v0.23.0

func (s *EditAlert) UnmarshalJSON(b []byte) error

type EditWarehouseRequest

type EditWarehouseRequest struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute.
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Required. Id of the warehouse to configure.
	Id string `json:"-" url:"-"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType EditWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EditWarehouseRequest) MarshalJSON added in v0.23.0

func (s EditWarehouseRequest) MarshalJSON() ([]byte, error)

func (*EditWarehouseRequest) UnmarshalJSON added in v0.23.0

func (s *EditWarehouseRequest) UnmarshalJSON(b []byte) error

type EditWarehouseRequestWarehouseType added in v0.9.0

type EditWarehouseRequestWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const EditWarehouseRequestWarehouseTypeClassic EditWarehouseRequestWarehouseType = `CLASSIC`
const EditWarehouseRequestWarehouseTypePro EditWarehouseRequestWarehouseType = `PRO`
const EditWarehouseRequestWarehouseTypeTypeUnspecified EditWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED`

func (*EditWarehouseRequestWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*EditWarehouseRequestWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*EditWarehouseRequestWarehouseType) Type added in v0.9.0

Type always returns EditWarehouseRequestWarehouseType to satisfy [pflag.Value] interface

type EditWarehouseResponse added in v0.34.0

type EditWarehouseResponse struct {
}

type Empty added in v0.44.0

type Empty struct {
}

Represents an empty message, similar to google.protobuf.Empty, which is not available in the firm right now.

type EndpointConfPair

type EndpointConfPair struct {
	Key string `json:"key,omitempty"`

	Value string `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointConfPair) MarshalJSON added in v0.23.0

func (s EndpointConfPair) MarshalJSON() ([]byte, error)

func (*EndpointConfPair) UnmarshalJSON added in v0.23.0

func (s *EndpointConfPair) UnmarshalJSON(b []byte) error

type EndpointHealth

type EndpointHealth struct {
	// Details about errors that are causing current degraded/failed status.
	Details string `json:"details,omitempty"`
	// The reason for failure to bring up clusters for this warehouse. This is
	// available when status is 'FAILED' and sometimes when it is DEGRADED.
	FailureReason *TerminationReason `json:"failure_reason,omitempty"`
	// Deprecated. split into summary and details for security
	Message string `json:"message,omitempty"`
	// Health status of the warehouse.
	Status Status `json:"status,omitempty"`
	// A short summary of the health status in case of degraded/failed
	// warehouses.
	Summary string `json:"summary,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointHealth) MarshalJSON added in v0.23.0

func (s EndpointHealth) MarshalJSON() ([]byte, error)

func (*EndpointHealth) UnmarshalJSON added in v0.23.0

func (s *EndpointHealth) UnmarshalJSON(b []byte) error

type EndpointInfo

type EndpointInfo struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Optional health status. Assume the warehouse is healthy if this field is
	// not set.
	Health *EndpointHealth `json:"health,omitempty"`
	// unique identifier for warehouse
	Id string `json:"id,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// the jdbc connection string for this warehouse
	JdbcUrl string `json:"jdbc_url,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// current number of active sessions for the warehouse
	NumActiveSessions int64 `json:"num_active_sessions,omitempty"`
	// current number of clusters running for the service
	NumClusters int `json:"num_clusters,omitempty"`
	// ODBC parameters for the SQL warehouse
	OdbcParams *OdbcParams `json:"odbc_params,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// State of the warehouse
	State State `json:"state,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType EndpointInfoWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointInfo) MarshalJSON added in v0.23.0

func (s EndpointInfo) MarshalJSON() ([]byte, error)

func (*EndpointInfo) UnmarshalJSON added in v0.23.0

func (s *EndpointInfo) UnmarshalJSON(b []byte) error

type EndpointInfoWarehouseType added in v0.9.0

type EndpointInfoWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const EndpointInfoWarehouseTypeClassic EndpointInfoWarehouseType = `CLASSIC`
const EndpointInfoWarehouseTypePro EndpointInfoWarehouseType = `PRO`
const EndpointInfoWarehouseTypeTypeUnspecified EndpointInfoWarehouseType = `TYPE_UNSPECIFIED`

func (*EndpointInfoWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*EndpointInfoWarehouseType) String added in v0.9.0

func (f *EndpointInfoWarehouseType) String() string

String representation for fmt.Print

func (*EndpointInfoWarehouseType) Type added in v0.9.0

Type always returns EndpointInfoWarehouseType to satisfy [pflag.Value] interface

type EndpointTagPair

type EndpointTagPair struct {
	Key string `json:"key,omitempty"`

	Value string `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EndpointTagPair) MarshalJSON added in v0.23.0

func (s EndpointTagPair) MarshalJSON() ([]byte, error)

func (*EndpointTagPair) UnmarshalJSON added in v0.23.0

func (s *EndpointTagPair) UnmarshalJSON(b []byte) error

type EndpointTags

type EndpointTags struct {
	CustomTags []EndpointTagPair `json:"custom_tags,omitempty"`
}

type EnumValue added in v0.44.0

type EnumValue struct {
	// List of valid query parameter values, newline delimited.
	EnumOptions string `json:"enum_options,omitempty"`
	// If specified, allows multiple values to be selected for this parameter.
	MultiValuesOptions *MultiValuesOptions `json:"multi_values_options,omitempty"`
	// List of selected query parameter values.
	Values []string `json:"values,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (EnumValue) MarshalJSON added in v0.44.0

func (s EnumValue) MarshalJSON() ([]byte, error)

func (*EnumValue) UnmarshalJSON added in v0.44.0

func (s *EnumValue) UnmarshalJSON(b []byte) error

type ExecuteStatementRequest added in v0.3.0

type ExecuteStatementRequest struct {
	// Applies the given byte limit to the statement's result size. Byte counts
	// are based on internal data representations and might not match the final
	// size in the requested `format`. If the result was truncated due to the
	// byte limit, then `truncated` in the response is set to `true`. When using
	// `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is
	// applied if `byte_limit` is not explcitly set.
	ByteLimit int64 `json:"byte_limit,omitempty"`
	// Sets default catalog for statement execution, similar to [`USE CATALOG`]
	// in SQL.
	//
	// [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html
	Catalog string `json:"catalog,omitempty"`

	Disposition Disposition `json:"disposition,omitempty"`
	// Statement execution supports three result formats: `JSON_ARRAY`
	// (default), `ARROW_STREAM`, and `CSV`.
	//
	// Important: The formats `ARROW_STREAM` and `CSV` are supported only with
	// `EXTERNAL_LINKS` disposition. `JSON_ARRAY` is supported in `INLINE` and
	// `EXTERNAL_LINKS` disposition.
	//
	// When specifying `format=JSON_ARRAY`, result data will be formatted as an
	// array of arrays of values, where each value is either the *string
	// representation* of a value, or `null`. For example, the output of `SELECT
	// concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)`
	// would look like this:
	//
	// “` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null
	// ], ] “`
	//
	// When specifying `format=JSON_ARRAY` and `disposition=EXTERNAL_LINKS`,
	// each chunk in the result contains compact JSON with no indentation or
	// extra whitespace.
	//
	// When specifying `format=ARROW_STREAM` and `disposition=EXTERNAL_LINKS`,
	// each chunk in the result will be formatted as Apache Arrow Stream. See
	// the [Apache Arrow streaming format].
	//
	// When specifying `format=CSV` and `disposition=EXTERNAL_LINKS`, each chunk
	// in the result will be a CSV according to [RFC 4180] standard. All the
	// columns values will have *string representation* similar to the
	// `JSON_ARRAY` format, and `null` values will be encoded as “null”.
	// Only the first chunk in the result would contain a header row with column
	// names. For example, the output of `SELECT concat('id-', id) AS strCol, id
	// AS intCol, null as nullCol FROM range(3)` would look like this:
	//
	// “` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null “`
	//
	// [Apache Arrow streaming format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format
	// [RFC 4180]: https://www.rfc-editor.org/rfc/rfc4180
	Format Format `json:"format,omitempty"`
	// When `wait_timeout > 0s`, the call will block up to the specified time.
	// If the statement execution doesn't finish within this time,
	// `on_wait_timeout` determines whether the execution should continue or be
	// canceled. When set to `CONTINUE`, the statement execution continues
	// asynchronously and the call returns a statement ID which can be used for
	// polling with :method:statementexecution/getStatement. When set to
	// `CANCEL`, the statement execution is canceled and the call returns with a
	// `CANCELED` state.
	OnWaitTimeout ExecuteStatementRequestOnWaitTimeout `json:"on_wait_timeout,omitempty"`
	// A list of parameters to pass into a SQL statement containing parameter
	// markers. A parameter consists of a name, a value, and optionally a type.
	// To represent a NULL value, the `value` field may be omitted or set to
	// `null` explicitly. If the `type` field is omitted, the value is
	// interpreted as a string.
	//
	// If the type is given, parameters will be checked for type correctness
	// according to the given type. A value is correct if the provided string
	// can be converted to the requested type using the `cast` function. The
	// exact semantics are described in the section [`cast` function] of the SQL
	// language reference.
	//
	// For example, the following statement contains two parameters, `my_name`
	// and `my_date`:
	//
	// SELECT * FROM my_table WHERE name = :my_name AND date = :my_date
	//
	// The parameters can be passed in the request body as follows:
	//
	// { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND
	// date = :my_date", "parameters": [ { "name": "my_name", "value": "the
	// name" }, { "name": "my_date", "value": "2020-01-01", "type": "DATE" } ] }
	//
	// Currently, positional parameters denoted by a `?` marker are not
	// supported by the Databricks SQL Statement Execution API.
	//
	// Also see the section [Parameter markers] of the SQL language reference.
	//
	// [Parameter markers]: https://docs.databricks.com/sql/language-manual/sql-ref-parameter-marker.html
	// [`cast` function]: https://docs.databricks.com/sql/language-manual/functions/cast.html
	Parameters []StatementParameterListItem `json:"parameters,omitempty"`
	// Applies the given row limit to the statement's result set, but unlike the
	// `LIMIT` clause in SQL, it also sets the `truncated` field in the response
	// to indicate whether the result was trimmed due to the limit or not.
	RowLimit int64 `json:"row_limit,omitempty"`
	// Sets default schema for statement execution, similar to [`USE SCHEMA`] in
	// SQL.
	//
	// [`USE SCHEMA`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html
	Schema string `json:"schema,omitempty"`
	// The SQL statement to execute. The statement can optionally be
	// parameterized, see `parameters`.
	Statement string `json:"statement"`
	// The time in seconds the call will wait for the statement's result set as
	// `Ns`, where `N` can be set to 0 or to a value between 5 and 50.
	//
	// When set to `0s`, the statement will execute in asynchronous mode and the
	// call will not wait for the execution to finish. In this case, the call
	// returns directly with `PENDING` state and a statement ID which can be
	// used for polling with :method:statementexecution/getStatement.
	//
	// When set between 5 and 50 seconds, the call will behave synchronously up
	// to this timeout and wait for the statement execution to finish. If the
	// execution finishes within this time, the call returns immediately with a
	// manifest and result data (or a `FAILED` state in case of an execution
	// error). If the statement takes longer to execute, `on_wait_timeout`
	// determines what should happen after the timeout is reached.
	WaitTimeout string `json:"wait_timeout,omitempty"`
	// Warehouse upon which to execute a statement. See also [What are SQL
	// warehouses?]
	//
	// [What are SQL warehouses?]: https://docs.databricks.com/sql/admin/warehouse-type.html
	WarehouseId string `json:"warehouse_id"`

	ForceSendFields []string `json:"-"`
}

func (ExecuteStatementRequest) MarshalJSON added in v0.23.0

func (s ExecuteStatementRequest) MarshalJSON() ([]byte, error)

func (*ExecuteStatementRequest) UnmarshalJSON added in v0.23.0

func (s *ExecuteStatementRequest) UnmarshalJSON(b []byte) error

type ExecuteStatementRequestOnWaitTimeout added in v0.20.0

type ExecuteStatementRequestOnWaitTimeout string

When `wait_timeout > 0s`, the call will block up to the specified time. If the statement execution doesn't finish within this time, `on_wait_timeout` determines whether the execution should continue or be canceled. When set to `CONTINUE`, the statement execution continues asynchronously and the call returns a statement ID which can be used for polling with :method:statementexecution/getStatement. When set to `CANCEL`, the statement execution is canceled and the call returns with a `CANCELED` state.

const ExecuteStatementRequestOnWaitTimeoutCancel ExecuteStatementRequestOnWaitTimeout = `CANCEL`
const ExecuteStatementRequestOnWaitTimeoutContinue ExecuteStatementRequestOnWaitTimeout = `CONTINUE`

func (*ExecuteStatementRequestOnWaitTimeout) Set added in v0.20.0

Set raw string value and validate it against allowed values

func (*ExecuteStatementRequestOnWaitTimeout) String added in v0.20.0

String representation for fmt.Print

func (*ExecuteStatementRequestOnWaitTimeout) Type added in v0.20.0

Type always returns ExecuteStatementRequestOnWaitTimeout to satisfy [pflag.Value] interface

type ExternalLink struct {
	// The number of bytes in the result chunk. This field is not available when
	// using `INLINE` disposition.
	ByteCount int64 `json:"byte_count,omitempty"`
	// The position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// Indicates the date-time that the given external link will expire and
	// becomes invalid, after which point a new `external_link` must be
	// requested.
	Expiration string `json:"expiration,omitempty"`

	ExternalLink string `json:"external_link,omitempty"`
	// HTTP headers that must be included with a GET request to the
	// `external_link`. Each header is provided as a key-value pair. Headers are
	// typically used to pass a decryption key to the external service. The
	// values of these headers should be considered sensitive and the client
	// should not expose these values in a log.
	HttpHeaders map[string]string `json:"http_headers,omitempty"`
	// When fetching, provides the `chunk_index` for the _next_ chunk. If
	// absent, indicates there are no more chunks. The next chunk can be fetched
	// with a :method:statementexecution/getStatementResultChunkN request.
	NextChunkIndex int `json:"next_chunk_index,omitempty"`
	// When fetching, provides a link to fetch the _next_ chunk. If absent,
	// indicates there are no more chunks. This link is an absolute `path` to be
	// joined with your `$DATABRICKS_HOST`, and should be treated as an opaque
	// link. This is an alternative to using `next_chunk_index`.
	NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"`
	// The number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// The starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ExternalLink) MarshalJSON added in v0.23.0

func (s ExternalLink) MarshalJSON() ([]byte, error)

func (*ExternalLink) UnmarshalJSON added in v0.23.0

func (s *ExternalLink) UnmarshalJSON(b []byte) error

type Format added in v0.3.0

type Format string
const FormatArrowStream Format = `ARROW_STREAM`
const FormatCsv Format = `CSV`
const FormatJsonArray Format = `JSON_ARRAY`

func (*Format) Set added in v0.3.0

func (f *Format) Set(v string) error

Set raw string value and validate it against allowed values

func (*Format) String added in v0.3.0

func (f *Format) String() string

String representation for fmt.Print

func (*Format) Type added in v0.3.0

func (f *Format) Type() string

Type always returns Format to satisfy [pflag.Value] interface

type GetAlertRequest

type GetAlertRequest struct {
	Id string `json:"-" url:"-"`
}

Get an alert

type GetAlertsLegacyRequest added in v0.44.0

type GetAlertsLegacyRequest struct {
	AlertId string `json:"-" url:"-"`
}

Get an alert

type GetDashboardRequest

type GetDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Retrieve a definition

type GetDbsqlPermissionRequest

type GetDbsqlPermissionRequest struct {
	// Object ID. An ACL is returned for the object with this UUID.
	ObjectId string `json:"-" url:"-"`
	// The type of object permissions to check.
	ObjectType ObjectTypePlural `json:"-" url:"-"`
}

Get object ACL

type GetQueriesLegacyRequest added in v0.44.0

type GetQueriesLegacyRequest struct {
	QueryId string `json:"-" url:"-"`
}

Get a query definition.

type GetQueryRequest

type GetQueryRequest struct {
	Id string `json:"-" url:"-"`
}

Get a query

type GetResponse

type GetResponse struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// An object's type and UUID, separated by a forward slash (/) character.
	ObjectId string `json:"object_id,omitempty"`
	// A singular noun object type.
	ObjectType ObjectType `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetResponse) MarshalJSON added in v0.23.0

func (s GetResponse) MarshalJSON() ([]byte, error)

func (*GetResponse) UnmarshalJSON added in v0.23.0

func (s *GetResponse) UnmarshalJSON(b []byte) error

type GetStatementRequest added in v0.3.0

type GetStatementRequest struct {
	// The statement ID is returned upon successfully submitting a SQL
	// statement, and is a required reference for all subsequent calls.
	StatementId string `json:"-" url:"-"`
}

Get status, manifest, and result first chunk

type GetStatementResultChunkNRequest added in v0.3.0

type GetStatementResultChunkNRequest struct {
	ChunkIndex int `json:"-" url:"-"`
	// The statement ID is returned upon successfully submitting a SQL
	// statement, and is a required reference for all subsequent calls.
	StatementId string `json:"-" url:"-"`
}

Get result chunk by index

type GetWarehousePermissionLevelsRequest added in v0.15.0

type GetWarehousePermissionLevelsRequest struct {
	// The SQL warehouse for which to get or manage permissions.
	WarehouseId string `json:"-" url:"-"`
}

Get SQL warehouse permission levels

type GetWarehousePermissionLevelsResponse added in v0.15.0

type GetWarehousePermissionLevelsResponse struct {
	// Specific permission levels
	PermissionLevels []WarehousePermissionsDescription `json:"permission_levels,omitempty"`
}

type GetWarehousePermissionsRequest added in v0.15.0

type GetWarehousePermissionsRequest struct {
	// The SQL warehouse for which to get or manage permissions.
	WarehouseId string `json:"-" url:"-"`
}

Get SQL warehouse permissions

type GetWarehouseRequest

type GetWarehouseRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Get warehouse info

type GetWarehouseResponse

type GetWarehouseResponse struct {
	// The amount of time in minutes that a SQL warehouse must be idle (i.e., no
	// RUNNING queries) before it is automatically stopped.
	//
	// Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
	//
	// Defaults to 120 mins
	AutoStopMins int `json:"auto_stop_mins,omitempty"`
	// Channel Details
	Channel *Channel `json:"channel,omitempty"`
	// Size of the clusters allocated for this warehouse. Increasing the size of
	// a spark cluster allows you to run larger queries on it. If you want to
	// increase the number of concurrent queries, please tune max_num_clusters.
	//
	// Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large
	// - 2X-Large - 3X-Large - 4X-Large
	ClusterSize string `json:"cluster_size,omitempty"`
	// warehouse creator name
	CreatorName string `json:"creator_name,omitempty"`
	// Configures whether the warehouse should use Photon optimized clusters.
	//
	// Defaults to false.
	EnablePhoton bool `json:"enable_photon,omitempty"`
	// Configures whether the warehouse should use serverless compute
	EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"`
	// Optional health status. Assume the warehouse is healthy if this field is
	// not set.
	Health *EndpointHealth `json:"health,omitempty"`
	// unique identifier for warehouse
	Id string `json:"id,omitempty"`
	// Deprecated. Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// the jdbc connection string for this warehouse
	JdbcUrl string `json:"jdbc_url,omitempty"`
	// Maximum number of clusters that the autoscaler will create to handle
	// concurrent queries.
	//
	// Supported values: - Must be >= min_num_clusters - Must be <= 30.
	//
	// Defaults to min_clusters if unset.
	MaxNumClusters int `json:"max_num_clusters,omitempty"`
	// Minimum number of available clusters that will be maintained for this SQL
	// warehouse. Increasing this will ensure that a larger number of clusters
	// are always running and therefore may reduce the cold start time for new
	// queries. This is similar to reserved vs. revocable cores in a resource
	// manager.
	//
	// Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30)
	//
	// Defaults to 1
	MinNumClusters int `json:"min_num_clusters,omitempty"`
	// Logical name for the cluster.
	//
	// Supported values: - Must be unique within an org. - Must be less than 100
	// characters.
	Name string `json:"name,omitempty"`
	// current number of active sessions for the warehouse
	NumActiveSessions int64 `json:"num_active_sessions,omitempty"`
	// current number of clusters running for the service
	NumClusters int `json:"num_clusters,omitempty"`
	// ODBC parameters for the SQL warehouse
	OdbcParams *OdbcParams `json:"odbc_params,omitempty"`
	// Configurations whether the warehouse should use spot instances.
	SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"`
	// State of the warehouse
	State State `json:"state,omitempty"`
	// A set of key-value pairs that will be tagged on all resources (e.g., AWS
	// instances and EBS volumes) associated with this SQL warehouse.
	//
	// Supported values: - Number of tags < 45.
	Tags *EndpointTags `json:"tags,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless
	// compute, you must set to `PRO` and also set the field
	// `enable_serverless_compute` to `true`.
	WarehouseType GetWarehouseResponseWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetWarehouseResponse) MarshalJSON added in v0.23.0

func (s GetWarehouseResponse) MarshalJSON() ([]byte, error)

func (*GetWarehouseResponse) UnmarshalJSON added in v0.23.0

func (s *GetWarehouseResponse) UnmarshalJSON(b []byte) error

type GetWarehouseResponseWarehouseType added in v0.9.0

type GetWarehouseResponseWarehouseType string

Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`.

const GetWarehouseResponseWarehouseTypeClassic GetWarehouseResponseWarehouseType = `CLASSIC`
const GetWarehouseResponseWarehouseTypePro GetWarehouseResponseWarehouseType = `PRO`
const GetWarehouseResponseWarehouseTypeTypeUnspecified GetWarehouseResponseWarehouseType = `TYPE_UNSPECIFIED`

func (*GetWarehouseResponseWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*GetWarehouseResponseWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*GetWarehouseResponseWarehouseType) Type added in v0.9.0

Type always returns GetWarehouseResponseWarehouseType to satisfy [pflag.Value] interface

type GetWorkspaceWarehouseConfigResponse

type GetWorkspaceWarehouseConfigResponse struct {
	// Optional: Channel selection details
	Channel *Channel `json:"channel,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"`
	// Spark confs for external hive metastore configuration JSON serialized
	// size must be less than <= 512K
	DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"`
	// List of Warehouse Types allowed in this workspace (limits allowed value
	// of the type field in CreateWarehouse and EditWarehouse). Note: Some types
	// cannot be disabled, they don't need to be specified in
	// SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing
	// warehouses to be converted to another type. Used by frontend to save
	// specific type availability in the warehouse create and edit form UI.
	EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"`
	// GCP only: Google Service Account used to pass to cluster to access Google
	// Cloud Storage
	GoogleServiceAccount string `json:"google_service_account,omitempty"`
	// AWS Only: Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Security policy for warehouses
	SecurityPolicy GetWorkspaceWarehouseConfigResponseSecurityPolicy `json:"security_policy,omitempty"`
	// SQL configuration parameters
	SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (GetWorkspaceWarehouseConfigResponse) MarshalJSON added in v0.23.0

func (s GetWorkspaceWarehouseConfigResponse) MarshalJSON() ([]byte, error)

func (*GetWorkspaceWarehouseConfigResponse) UnmarshalJSON added in v0.23.0

func (s *GetWorkspaceWarehouseConfigResponse) UnmarshalJSON(b []byte) error

type GetWorkspaceWarehouseConfigResponseSecurityPolicy

type GetWorkspaceWarehouseConfigResponseSecurityPolicy string

Security policy for warehouses

const GetWorkspaceWarehouseConfigResponseSecurityPolicyDataAccessControl GetWorkspaceWarehouseConfigResponseSecurityPolicy = `DATA_ACCESS_CONTROL`
const GetWorkspaceWarehouseConfigResponseSecurityPolicyNone GetWorkspaceWarehouseConfigResponseSecurityPolicy = `NONE`
const GetWorkspaceWarehouseConfigResponseSecurityPolicyPassthrough GetWorkspaceWarehouseConfigResponseSecurityPolicy = `PASSTHROUGH`

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) Set

Set raw string value and validate it against allowed values

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) String

String representation for fmt.Print

func (*GetWorkspaceWarehouseConfigResponseSecurityPolicy) Type

Type always returns GetWorkspaceWarehouseConfigResponseSecurityPolicy to satisfy [pflag.Value] interface

type LegacyAlert added in v0.44.0

type LegacyAlert struct {
	// Timestamp when the alert was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Alert ID.
	Id string `json:"id,omitempty"`
	// Timestamp when the alert was last triggered.
	LastTriggeredAt string `json:"last_triggered_at,omitempty"`
	// Name of the alert.
	Name string `json:"name,omitempty"`
	// Alert configuration options.
	Options *AlertOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`

	Query *AlertQuery `json:"query,omitempty"`
	// Number of seconds after being triggered before the alert rearms itself
	// and can be triggered again. If `null`, alert will never be triggered
	// again.
	Rearm int `json:"rearm,omitempty"`
	// State of the alert. Possible values are: `unknown` (yet to be evaluated),
	// `triggered` (evaluated and fulfilled trigger conditions), or `ok`
	// (evaluated and did not fulfill trigger conditions).
	State LegacyAlertState `json:"state,omitempty"`
	// Timestamp when the alert was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (LegacyAlert) MarshalJSON added in v0.44.0

func (s LegacyAlert) MarshalJSON() ([]byte, error)

func (*LegacyAlert) UnmarshalJSON added in v0.44.0

func (s *LegacyAlert) UnmarshalJSON(b []byte) error

type LegacyAlertState added in v0.44.0

type LegacyAlertState string

State of the alert. Possible values are: `unknown` (yet to be evaluated), `triggered` (evaluated and fulfilled trigger conditions), or `ok` (evaluated and did not fulfill trigger conditions).

const LegacyAlertStateOk LegacyAlertState = `ok`
const LegacyAlertStateTriggered LegacyAlertState = `triggered`
const LegacyAlertStateUnknown LegacyAlertState = `unknown`

func (*LegacyAlertState) Set added in v0.44.0

func (f *LegacyAlertState) Set(v string) error

Set raw string value and validate it against allowed values

func (*LegacyAlertState) String added in v0.44.0

func (f *LegacyAlertState) String() string

String representation for fmt.Print

func (*LegacyAlertState) Type added in v0.44.0

func (f *LegacyAlertState) Type() string

Type always returns LegacyAlertState to satisfy [pflag.Value] interface

type LegacyQuery added in v0.44.0

type LegacyQuery struct {
	// Describes whether the authenticated user is allowed to edit the
	// definition of this query.
	CanEdit bool `json:"can_edit,omitempty"`
	// The timestamp when this query was created.
	CreatedAt string `json:"created_at,omitempty"`
	// Data source ID maps to the ID of the data source used by the resource and
	// is distinct from the warehouse ID. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/api/workspace/datasources/list
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Query ID.
	Id string `json:"id,omitempty"`
	// Indicates whether the query is trashed. Trashed queries can't be used in
	// dashboards, or appear in search results. If this boolean is `true`, the
	// `options` property for this query includes a `moved_to_trash_at`
	// timestamp. Trashed queries are permanently deleted after 30 days.
	IsArchived bool `json:"is_archived,omitempty"`
	// Whether the query is a draft. Draft queries only appear in list views for
	// their owners. Visualizations from draft queries cannot appear on
	// dashboards.
	IsDraft bool `json:"is_draft,omitempty"`
	// Whether this query object appears in the current user's favorites list.
	// This flag determines whether the star icon for favorites is selected.
	IsFavorite bool `json:"is_favorite,omitempty"`
	// Text parameter types are not safe from SQL injection for all types of
	// data source. Set this Boolean parameter to `true` if a query either does
	// not use any text type parameters or uses a data source type where text
	// type parameters are handled safely.
	IsSafe bool `json:"is_safe,omitempty"`

	LastModifiedBy *User `json:"last_modified_by,omitempty"`
	// The ID of the user who last saved changes to this query.
	LastModifiedById int `json:"last_modified_by_id,omitempty"`
	// If there is a cached result for this query and user, this field includes
	// the query result ID. If this query uses parameters, this field is always
	// null.
	LatestQueryDataId string `json:"latest_query_data_id,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`

	Options *QueryOptions `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query *
	// `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query
	PermissionTier PermissionLevel `json:"permission_tier,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`
	// A SHA-256 hash of the query text along with the authenticated user ID.
	QueryHash string `json:"query_hash,omitempty"`
	// Sets the **Run as** role for the object. Must be set to one of `"viewer"`
	// (signifying "run as viewer" behavior) or `"owner"` (signifying "run as
	// owner" behavior)
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// The timestamp at which this query was last updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	User *User `json:"user,omitempty"`
	// The ID of the user who owns the query.
	UserId int `json:"user_id,omitempty"`

	Visualizations []LegacyVisualization `json:"visualizations,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (LegacyQuery) MarshalJSON added in v0.44.0

func (s LegacyQuery) MarshalJSON() ([]byte, error)

func (*LegacyQuery) UnmarshalJSON added in v0.44.0

func (s *LegacyQuery) UnmarshalJSON(b []byte) error

type LegacyVisualization added in v0.44.0

type LegacyVisualization struct {
	CreatedAt string `json:"created_at,omitempty"`
	// A short description of this visualization. This is not displayed in the
	// UI.
	Description string `json:"description,omitempty"`
	// The UUID for this visualization.
	Id string `json:"id,omitempty"`
	// The name of the visualization that appears on dashboards and the query
	// screen.
	Name string `json:"name,omitempty"`
	// The options object varies widely from one visualization type to the next
	// and is unsupported. Databricks does not recommend modifying visualization
	// settings in JSON.
	Options any `json:"options,omitempty"`

	Query *LegacyQuery `json:"query,omitempty"`
	// The type of visualization: chart, table, pivot table, and so on.
	Type string `json:"type,omitempty"`

	UpdatedAt string `json:"updated_at,omitempty"`

	ForceSendFields []string `json:"-"`
}

The visualization description API changes frequently and is unsupported. You can duplicate a visualization by copying description objects received _from the API_ and then using them to create a new one with a POST request to the same endpoint. Databricks does not recommend constructing ad-hoc visualizations entirely in JSON.

func (LegacyVisualization) MarshalJSON added in v0.44.0

func (s LegacyVisualization) MarshalJSON() ([]byte, error)

func (*LegacyVisualization) UnmarshalJSON added in v0.44.0

func (s *LegacyVisualization) UnmarshalJSON(b []byte) error

type LifecycleState added in v0.44.0

type LifecycleState string
const LifecycleStateActive LifecycleState = `ACTIVE`
const LifecycleStateTrashed LifecycleState = `TRASHED`

func (*LifecycleState) Set added in v0.44.0

func (f *LifecycleState) Set(v string) error

Set raw string value and validate it against allowed values

func (*LifecycleState) String added in v0.44.0

func (f *LifecycleState) String() string

String representation for fmt.Print

func (*LifecycleState) Type added in v0.44.0

func (f *LifecycleState) Type() string

Type always returns LifecycleState to satisfy [pflag.Value] interface

type ListAlertsRequest added in v0.44.0

type ListAlertsRequest struct {
	PageSize int `json:"-" url:"page_size,omitempty"`

	PageToken string `json:"-" url:"page_token,omitempty"`

	ForceSendFields []string `json:"-"`
}

List alerts

func (ListAlertsRequest) MarshalJSON added in v0.44.0

func (s ListAlertsRequest) MarshalJSON() ([]byte, error)

func (*ListAlertsRequest) UnmarshalJSON added in v0.44.0

func (s *ListAlertsRequest) UnmarshalJSON(b []byte) error

type ListAlertsResponse added in v0.44.0

type ListAlertsResponse struct {
	NextPageToken string `json:"next_page_token,omitempty"`

	Results []ListAlertsResponseAlert `json:"results,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListAlertsResponse) MarshalJSON added in v0.44.0

func (s ListAlertsResponse) MarshalJSON() ([]byte, error)

func (*ListAlertsResponse) UnmarshalJSON added in v0.44.0

func (s *ListAlertsResponse) UnmarshalJSON(b []byte) error

type ListAlertsResponseAlert added in v0.44.0

type ListAlertsResponseAlert struct {
	// Trigger conditions of the alert.
	Condition *AlertCondition `json:"condition,omitempty"`
	// The timestamp indicating when the alert was created.
	CreateTime string `json:"create_time,omitempty"`
	// Custom body of alert notification, if it exists. See [here] for custom
	// templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomBody string `json:"custom_body,omitempty"`
	// Custom subject of alert notification, if it exists. This can include
	// email subject entries and Slack notification headers, for example. See
	// [here] for custom templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomSubject string `json:"custom_subject,omitempty"`
	// The display name of the alert.
	DisplayName string `json:"display_name,omitempty"`
	// UUID identifying the alert.
	Id string `json:"id,omitempty"`
	// The workspace state of the alert. Used for tracking trashed status.
	LifecycleState LifecycleState `json:"lifecycle_state,omitempty"`
	// Whether to notify alert subscribers when alert returns back to normal.
	NotifyOnOk bool `json:"notify_on_ok,omitempty"`
	// The owner's username. This field is set to "Unavailable" if the user has
	// been deleted.
	OwnerUserName string `json:"owner_user_name,omitempty"`
	// UUID of the query attached to the alert.
	QueryId string `json:"query_id,omitempty"`
	// Number of seconds an alert must wait after being triggered to rearm
	// itself. After rearming, it can be triggered again. If 0 or not specified,
	// the alert will not be triggered again.
	SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"`
	// Current state of the alert's trigger status. This field is set to UNKNOWN
	// if the alert has not yet been evaluated or ran into an error during the
	// last evaluation.
	State AlertState `json:"state,omitempty"`
	// Timestamp when the alert was last triggered, if the alert has been
	// triggered before.
	TriggerTime string `json:"trigger_time,omitempty"`
	// The timestamp indicating when the alert was updated.
	UpdateTime string `json:"update_time,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListAlertsResponseAlert) MarshalJSON added in v0.44.0

func (s ListAlertsResponseAlert) MarshalJSON() ([]byte, error)

func (*ListAlertsResponseAlert) UnmarshalJSON added in v0.44.0

func (s *ListAlertsResponseAlert) UnmarshalJSON(b []byte) error

type ListDashboardsRequest

type ListDashboardsRequest struct {
	// Name of dashboard attribute to order by.
	Order ListOrder `json:"-" url:"order,omitempty"`
	// Page number to retrieve.
	Page int `json:"-" url:"page,omitempty"`
	// Number of dashboards to return per page.
	PageSize int `json:"-" url:"page_size,omitempty"`
	// Full text search term.
	Q string `json:"-" url:"q,omitempty"`

	ForceSendFields []string `json:"-"`
}

Get dashboard objects

func (ListDashboardsRequest) MarshalJSON added in v0.23.0

func (s ListDashboardsRequest) MarshalJSON() ([]byte, error)

func (*ListDashboardsRequest) UnmarshalJSON added in v0.23.0

func (s *ListDashboardsRequest) UnmarshalJSON(b []byte) error

type ListOrder

type ListOrder string
const ListOrderCreatedAt ListOrder = `created_at`
const ListOrderName ListOrder = `name`

func (*ListOrder) Set

func (f *ListOrder) Set(v string) error

Set raw string value and validate it against allowed values

func (*ListOrder) String

func (f *ListOrder) String() string

String representation for fmt.Print

func (*ListOrder) Type

func (f *ListOrder) Type() string

Type always returns ListOrder to satisfy [pflag.Value] interface

type ListQueriesLegacyRequest added in v0.44.0

type ListQueriesLegacyRequest struct {
	// Name of query attribute to order by. Default sort order is ascending.
	// Append a dash (`-`) to order descending instead.
	//
	// - `name`: The name of the query.
	//
	// - `created_at`: The timestamp the query was created.
	//
	// - `runtime`: The time it took to run this query. This is blank for
	// parameterized queries. A blank value is treated as the highest value for
	// sorting.
	//
	// - `executed_at`: The timestamp when the query was last run.
	//
	// - `created_by`: The user name of the user that created the query.
	Order string `json:"-" url:"order,omitempty"`
	// Page number to retrieve.
	Page int `json:"-" url:"page,omitempty"`
	// Number of queries to return per page.
	PageSize int `json:"-" url:"page_size,omitempty"`
	// Full text search term
	Q string `json:"-" url:"q,omitempty"`

	ForceSendFields []string `json:"-"`
}

Get a list of queries

func (ListQueriesLegacyRequest) MarshalJSON added in v0.44.0

func (s ListQueriesLegacyRequest) MarshalJSON() ([]byte, error)

func (*ListQueriesLegacyRequest) UnmarshalJSON added in v0.44.0

func (s *ListQueriesLegacyRequest) UnmarshalJSON(b []byte) error

type ListQueriesRequest

type ListQueriesRequest struct {
	PageSize int `json:"-" url:"page_size,omitempty"`

	PageToken string `json:"-" url:"page_token,omitempty"`

	ForceSendFields []string `json:"-"`
}

List queries

func (ListQueriesRequest) MarshalJSON added in v0.23.0

func (s ListQueriesRequest) MarshalJSON() ([]byte, error)

func (*ListQueriesRequest) UnmarshalJSON added in v0.23.0

func (s *ListQueriesRequest) UnmarshalJSON(b []byte) error

type ListQueriesResponse

type ListQueriesResponse struct {
	// Whether there is another page of results.
	HasNextPage bool `json:"has_next_page,omitempty"`
	// A token that can be used to get the next page of results.
	NextPageToken string `json:"next_page_token,omitempty"`

	Res []QueryInfo `json:"res,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListQueriesResponse) MarshalJSON added in v0.23.0

func (s ListQueriesResponse) MarshalJSON() ([]byte, error)

func (*ListQueriesResponse) UnmarshalJSON added in v0.23.0

func (s *ListQueriesResponse) UnmarshalJSON(b []byte) error

type ListQueryHistoryRequest

type ListQueryHistoryRequest struct {
	// A filter to limit query history results. This field is optional.
	FilterBy *QueryFilter `json:"-" url:"filter_by,omitempty"`
	// Whether to include the query metrics with each query. Only use this for a
	// small subset of queries (max_results). Defaults to false.
	IncludeMetrics bool `json:"-" url:"include_metrics,omitempty"`
	// Limit the number of results returned in one page. Must be less than 1000
	// and the default is 100.
	MaxResults int `json:"-" url:"max_results,omitempty"`
	// A token that can be used to get the next page of results. The token can
	// contains characters that need to be encoded before using it in a URL. For
	// example, the character '+' needs to be replaced by %2B. This field is
	// optional.
	PageToken string `json:"-" url:"page_token,omitempty"`

	ForceSendFields []string `json:"-"`
}

List Queries

func (ListQueryHistoryRequest) MarshalJSON added in v0.23.0

func (s ListQueryHistoryRequest) MarshalJSON() ([]byte, error)

func (*ListQueryHistoryRequest) UnmarshalJSON added in v0.23.0

func (s *ListQueryHistoryRequest) UnmarshalJSON(b []byte) error

type ListQueryObjectsResponse added in v0.44.0

type ListQueryObjectsResponse struct {
	NextPageToken string `json:"next_page_token,omitempty"`

	Results []ListQueryObjectsResponseQuery `json:"results,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListQueryObjectsResponse) MarshalJSON added in v0.44.0

func (s ListQueryObjectsResponse) MarshalJSON() ([]byte, error)

func (*ListQueryObjectsResponse) UnmarshalJSON added in v0.44.0

func (s *ListQueryObjectsResponse) UnmarshalJSON(b []byte) error

type ListQueryObjectsResponseQuery added in v0.44.0

type ListQueryObjectsResponseQuery struct {
	// Whether to apply a 1000 row limit to the query result.
	ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"`
	// Name of the catalog where this query will be executed.
	Catalog string `json:"catalog,omitempty"`
	// Timestamp when this query was created.
	CreateTime string `json:"create_time,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Display name of the query that appears in list views, widget headings,
	// and on the query page.
	DisplayName string `json:"display_name,omitempty"`
	// UUID identifying the query.
	Id string `json:"id,omitempty"`
	// Username of the user who last saved changes to this query.
	LastModifierUserName string `json:"last_modifier_user_name,omitempty"`
	// Indicates whether the query is trashed.
	LifecycleState LifecycleState `json:"lifecycle_state,omitempty"`
	// Username of the user that owns the query.
	OwnerUserName string `json:"owner_user_name,omitempty"`
	// List of query parameter definitions.
	Parameters []QueryParameter `json:"parameters,omitempty"`
	// Text of the query to be run.
	QueryText string `json:"query_text,omitempty"`
	// Sets the "Run as" role for the object.
	RunAsMode RunAsMode `json:"run_as_mode,omitempty"`
	// Name of the schema where this query will be executed.
	Schema string `json:"schema,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// Timestamp when this query was last updated.
	UpdateTime string `json:"update_time,omitempty"`
	// ID of the SQL warehouse attached to the query.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListQueryObjectsResponseQuery) MarshalJSON added in v0.44.0

func (s ListQueryObjectsResponseQuery) MarshalJSON() ([]byte, error)

func (*ListQueryObjectsResponseQuery) UnmarshalJSON added in v0.44.0

func (s *ListQueryObjectsResponseQuery) UnmarshalJSON(b []byte) error

type ListResponse

type ListResponse struct {
	// The total number of dashboards.
	Count int `json:"count,omitempty"`
	// The current page being displayed.
	Page int `json:"page,omitempty"`
	// The number of dashboards per page.
	PageSize int `json:"page_size,omitempty"`
	// List of dashboards returned.
	Results []Dashboard `json:"results,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListResponse) MarshalJSON added in v0.23.0

func (s ListResponse) MarshalJSON() ([]byte, error)

func (*ListResponse) UnmarshalJSON added in v0.23.0

func (s *ListResponse) UnmarshalJSON(b []byte) error

type ListVisualizationsForQueryRequest added in v0.44.0

type ListVisualizationsForQueryRequest struct {
	Id string `json:"-" url:"-"`

	PageSize int `json:"-" url:"page_size,omitempty"`

	PageToken string `json:"-" url:"page_token,omitempty"`

	ForceSendFields []string `json:"-"`
}

List visualizations on a query

func (ListVisualizationsForQueryRequest) MarshalJSON added in v0.44.0

func (s ListVisualizationsForQueryRequest) MarshalJSON() ([]byte, error)

func (*ListVisualizationsForQueryRequest) UnmarshalJSON added in v0.44.0

func (s *ListVisualizationsForQueryRequest) UnmarshalJSON(b []byte) error

type ListVisualizationsForQueryResponse added in v0.44.0

type ListVisualizationsForQueryResponse struct {
	NextPageToken string `json:"next_page_token,omitempty"`

	Results []Visualization `json:"results,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ListVisualizationsForQueryResponse) MarshalJSON added in v0.44.0

func (s ListVisualizationsForQueryResponse) MarshalJSON() ([]byte, error)

func (*ListVisualizationsForQueryResponse) UnmarshalJSON added in v0.44.0

func (s *ListVisualizationsForQueryResponse) UnmarshalJSON(b []byte) error

type ListWarehousesRequest

type ListWarehousesRequest struct {
	// Service Principal which will be used to fetch the list of warehouses. If
	// not specified, the user from the session header is used.
	RunAsUserId int `json:"-" url:"run_as_user_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

List warehouses

func (ListWarehousesRequest) MarshalJSON added in v0.23.0

func (s ListWarehousesRequest) MarshalJSON() ([]byte, error)

func (*ListWarehousesRequest) UnmarshalJSON added in v0.23.0

func (s *ListWarehousesRequest) UnmarshalJSON(b []byte) error

type ListWarehousesResponse

type ListWarehousesResponse struct {
	// A list of warehouses and their configurations.
	Warehouses []EndpointInfo `json:"warehouses,omitempty"`
}

type MultiValuesOptions added in v0.32.0

type MultiValuesOptions struct {
	// Character that prefixes each selected parameter value.
	Prefix string `json:"prefix,omitempty"`
	// Character that separates each selected parameter value. Defaults to a
	// comma.
	Separator string `json:"separator,omitempty"`
	// Character that suffixes each selected parameter value.
	Suffix string `json:"suffix,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (MultiValuesOptions) MarshalJSON added in v0.32.0

func (s MultiValuesOptions) MarshalJSON() ([]byte, error)

func (*MultiValuesOptions) UnmarshalJSON added in v0.32.0

func (s *MultiValuesOptions) UnmarshalJSON(b []byte) error

type NumericValue added in v0.44.0

type NumericValue struct {
	Value float64 `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (NumericValue) MarshalJSON added in v0.44.0

func (s NumericValue) MarshalJSON() ([]byte, error)

func (*NumericValue) UnmarshalJSON added in v0.44.0

func (s *NumericValue) UnmarshalJSON(b []byte) error

type ObjectType

type ObjectType string

A singular noun object type.

const ObjectTypeAlert ObjectType = `alert`
const ObjectTypeDashboard ObjectType = `dashboard`
const ObjectTypeDataSource ObjectType = `data_source`
const ObjectTypeQuery ObjectType = `query`

func (*ObjectType) Set

func (f *ObjectType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ObjectType) String

func (f *ObjectType) String() string

String representation for fmt.Print

func (*ObjectType) Type

func (f *ObjectType) Type() string

Type always returns ObjectType to satisfy [pflag.Value] interface

type ObjectTypePlural

type ObjectTypePlural string

Always a plural of the object type.

const ObjectTypePluralAlerts ObjectTypePlural = `alerts`
const ObjectTypePluralDashboards ObjectTypePlural = `dashboards`
const ObjectTypePluralDataSources ObjectTypePlural = `data_sources`
const ObjectTypePluralQueries ObjectTypePlural = `queries`

func (*ObjectTypePlural) Set

func (f *ObjectTypePlural) Set(v string) error

Set raw string value and validate it against allowed values

func (*ObjectTypePlural) String

func (f *ObjectTypePlural) String() string

String representation for fmt.Print

func (*ObjectTypePlural) Type

func (f *ObjectTypePlural) Type() string

Type always returns ObjectTypePlural to satisfy [pflag.Value] interface

type OdbcParams

type OdbcParams struct {
	Hostname string `json:"hostname,omitempty"`

	Path string `json:"path,omitempty"`

	Port int `json:"port,omitempty"`

	Protocol string `json:"protocol,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (OdbcParams) MarshalJSON added in v0.23.0

func (s OdbcParams) MarshalJSON() ([]byte, error)

func (*OdbcParams) UnmarshalJSON added in v0.23.0

func (s *OdbcParams) UnmarshalJSON(b []byte) error

type OwnableObjectType

type OwnableObjectType string

The singular form of the type of object which can be owned.

const OwnableObjectTypeAlert OwnableObjectType = `alert`
const OwnableObjectTypeDashboard OwnableObjectType = `dashboard`
const OwnableObjectTypeQuery OwnableObjectType = `query`

func (*OwnableObjectType) Set

func (f *OwnableObjectType) Set(v string) error

Set raw string value and validate it against allowed values

func (*OwnableObjectType) String

func (f *OwnableObjectType) String() string

String representation for fmt.Print

func (*OwnableObjectType) Type

func (f *OwnableObjectType) Type() string

Type always returns OwnableObjectType to satisfy [pflag.Value] interface

type Parameter

type Parameter struct {
	// List of valid parameter values, newline delimited. Only applies for
	// dropdown list parameters.
	EnumOptions string `json:"enumOptions,omitempty"`
	// If specified, allows multiple values to be selected for this parameter.
	// Only applies to dropdown list and query-based dropdown list parameters.
	MultiValuesOptions *MultiValuesOptions `json:"multiValuesOptions,omitempty"`
	// The literal parameter marker that appears between double curly braces in
	// the query text.
	Name string `json:"name,omitempty"`
	// The UUID of the query that provides the parameter values. Only applies
	// for query-based dropdown list parameters.
	QueryId string `json:"queryId,omitempty"`
	// The text displayed in a parameter picking widget.
	Title string `json:"title,omitempty"`
	// Parameters can have several different types.
	Type ParameterType `json:"type,omitempty"`
	// The default value for this parameter.
	Value any `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Parameter) MarshalJSON added in v0.23.0

func (s Parameter) MarshalJSON() ([]byte, error)

func (*Parameter) UnmarshalJSON added in v0.23.0

func (s *Parameter) UnmarshalJSON(b []byte) error

type ParameterType

type ParameterType string

Parameters can have several different types.

const ParameterTypeDatetime ParameterType = `datetime`
const ParameterTypeEnum ParameterType = `enum`
const ParameterTypeNumber ParameterType = `number`
const ParameterTypeQuery ParameterType = `query`
const ParameterTypeText ParameterType = `text`

func (*ParameterType) Set

func (f *ParameterType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ParameterType) String

func (f *ParameterType) String() string

String representation for fmt.Print

func (*ParameterType) Type

func (f *ParameterType) Type() string

Type always returns ParameterType to satisfy [pflag.Value] interface

type PermissionLevel

type PermissionLevel string

* `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query

const PermissionLevelCanEdit PermissionLevel = `CAN_EDIT`

Can edit the query

const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE`

Can manage the query

const PermissionLevelCanRun PermissionLevel = `CAN_RUN`

Can run the query

const PermissionLevelCanView PermissionLevel = `CAN_VIEW`

Can view the query

func (*PermissionLevel) Set

func (f *PermissionLevel) Set(v string) error

Set raw string value and validate it against allowed values

func (*PermissionLevel) String

func (f *PermissionLevel) String() string

String representation for fmt.Print

func (*PermissionLevel) Type

func (f *PermissionLevel) Type() string

Type always returns PermissionLevel to satisfy [pflag.Value] interface

type PlansState

type PlansState string

Possible Reasons for which we have not saved plans in the database

const PlansStateEmpty PlansState = `EMPTY`
const PlansStateExists PlansState = `EXISTS`
const PlansStateIgnoredLargePlansSize PlansState = `IGNORED_LARGE_PLANS_SIZE`
const PlansStateIgnoredSmallDuration PlansState = `IGNORED_SMALL_DURATION`
const PlansStateIgnoredSparkPlanType PlansState = `IGNORED_SPARK_PLAN_TYPE`
const PlansStateUnknown PlansState = `UNKNOWN`

func (*PlansState) Set

func (f *PlansState) Set(v string) error

Set raw string value and validate it against allowed values

func (*PlansState) String

func (f *PlansState) String() string

String representation for fmt.Print

func (*PlansState) Type

func (f *PlansState) Type() string

Type always returns PlansState to satisfy [pflag.Value] interface

type QueriesAPI

type QueriesAPI struct {
	// contains filtered or unexported fields
}

The queries API can be used to perform CRUD operations on queries. A query is a Databricks SQL object that includes the target SQL warehouse, query text, name, description, tags, and parameters. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

func NewQueries

func NewQueries(client *client.DatabricksClient) *QueriesAPI

func (*QueriesAPI) Create

func (a *QueriesAPI) Create(ctx context.Context, request CreateQueryRequest) (*Query, error)
Example (Alerts)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.CreateQueryRequest{
	Query: &sql.CreateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		WarehouseId: srcs[0].WarehouseId,
		Description: "test query from Go SDK",
		QueryText:   "SELECT 1",
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

// cleanup

err = w.Queries.DeleteById(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.CreateQueryRequest{
	Query: &sql.CreateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		WarehouseId: srcs[0].WarehouseId,
		Description: "test query from Go SDK",
		QueryText:   "SHOW TABLES",
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

// cleanup

err = w.Queries.DeleteById(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) Delete

func (a *QueriesAPI) Delete(ctx context.Context, request TrashQueryRequest) error

func (*QueriesAPI) DeleteById added in v0.44.0

func (a *QueriesAPI) DeleteById(ctx context.Context, id string) error

Delete a query.

Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and cannot be used for alerts. You can restore a trashed query through the UI. A trashed query is permanently deleted after 30 days.

func (*QueriesAPI) Get

func (a *QueriesAPI) Get(ctx context.Context, request GetQueryRequest) (*Query, error)
Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.CreateQueryRequest{
	Query: &sql.CreateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		WarehouseId: srcs[0].WarehouseId,
		Description: "test query from Go SDK",
		QueryText:   "SHOW TABLES",
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

byId, err := w.Queries.GetById(ctx, query.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

err = w.Queries.DeleteById(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

func (*QueriesAPI) GetByDisplayName added in v0.44.0

func (a *QueriesAPI) GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error)

GetByDisplayName calls QueriesAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap and returns a single ListQueryObjectsResponseQuery.

Returns an error if there's more than one ListQueryObjectsResponseQuery with the same .DisplayName.

Note: All ListQueryObjectsResponseQuery instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) GetById added in v0.44.0

func (a *QueriesAPI) GetById(ctx context.Context, id string) (*Query, error)

Get a query.

Gets a query.

func (*QueriesAPI) List added in v0.24.0

List queries.

Gets a list of queries accessible to the user, ordered by creation time. **Warning:** Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) ListAll

List queries.

Gets a list of queries accessible to the user, ordered by creation time. **Warning:** Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) ListQueryObjectsResponseQueryDisplayNameToIdMap added in v0.44.0

func (a *QueriesAPI) ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error)

ListQueryObjectsResponseQueryDisplayNameToIdMap calls QueriesAPI.ListAll and creates a map of results with ListQueryObjectsResponseQuery.DisplayName as key and ListQueryObjectsResponseQuery.Id as value.

Returns an error if there's more than one ListQueryObjectsResponseQuery with the same .DisplayName.

Note: All ListQueryObjectsResponseQuery instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) ListVisualizations added in v0.44.0

List visualizations on a query.

Gets a list of visualizations on a query.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) ListVisualizationsAll added in v0.44.0

func (a *QueriesAPI) ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error)

List visualizations on a query.

Gets a list of visualizations on a query.

This method is generated by Databricks SDK Code Generator.

func (*QueriesAPI) ListVisualizationsById added in v0.44.0

func (a *QueriesAPI) ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error)

List visualizations on a query.

Gets a list of visualizations on a query.

func (*QueriesAPI) Update

func (a *QueriesAPI) Update(ctx context.Context, request UpdateQueryRequest) (*Query, error)
Example (Queries)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

srcs, err := w.DataSources.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", srcs)

query, err := w.Queries.Create(ctx, sql.CreateQueryRequest{
	Query: &sql.CreateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		WarehouseId: srcs[0].WarehouseId,
		Description: "test query from Go SDK",
		QueryText:   "SHOW TABLES",
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", query)

updated, err := w.Queries.Update(ctx, sql.UpdateQueryRequest{
	Id: query.Id,
	Query: &sql.UpdateQueryRequestQuery{
		DisplayName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		Description: "UPDATED: test query from Go SDK",
		QueryText:   "SELECT 2+2",
	},
	UpdateMask: "display_name,description,query_text",
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", updated)

// cleanup

err = w.Queries.DeleteById(ctx, query.Id)
if err != nil {
	panic(err)
}
Output:

type QueriesInterface added in v0.29.0

type QueriesInterface interface {

	// Create a query.
	//
	// Creates a query.
	Create(ctx context.Context, request CreateQueryRequest) (*Query, error)

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and cannot be used for alerts. You can restore a
	// trashed query through the UI. A trashed query is permanently deleted after 30
	// days.
	Delete(ctx context.Context, request TrashQueryRequest) error

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and cannot be used for alerts. You can restore a
	// trashed query through the UI. A trashed query is permanently deleted after 30
	// days.
	DeleteById(ctx context.Context, id string) error

	// Get a query.
	//
	// Gets a query.
	Get(ctx context.Context, request GetQueryRequest) (*Query, error)

	// Get a query.
	//
	// Gets a query.
	GetById(ctx context.Context, id string) (*Query, error)

	// List queries.
	//
	// Gets a list of queries accessible to the user, ordered by creation time.
	// **Warning:** Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery]

	// List queries.
	//
	// Gets a list of queries accessible to the user, ordered by creation time.
	// **Warning:** Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error)

	// ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value.
	//
	// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName.
	//
	// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error)

	// GetByDisplayName calls [QueriesAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap] and returns a single [ListQueryObjectsResponseQuery].
	//
	// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName.
	//
	// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error)

	// List visualizations on a query.
	//
	// Gets a list of visualizations on a query.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization]

	// List visualizations on a query.
	//
	// Gets a list of visualizations on a query.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error)

	// List visualizations on a query.
	//
	// Gets a list of visualizations on a query.
	ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error)

	// Update a query.
	//
	// Updates a query.
	Update(ctx context.Context, request UpdateQueryRequest) (*Query, error)
}

type QueriesLegacyAPI added in v0.44.0

type QueriesLegacyAPI struct {
	// contains filtered or unexported fields
}

These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

**Note**: A new version of the Databricks SQL API is now available. Please see the latest version. Learn more

func NewQueriesLegacy added in v0.44.0

func NewQueriesLegacy(client *client.DatabricksClient) *QueriesLegacyAPI

func (*QueriesLegacyAPI) Create added in v0.44.0

func (a *QueriesLegacyAPI) Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error)

func (*QueriesLegacyAPI) Delete added in v0.44.0

func (a *QueriesLegacyAPI) Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error

func (*QueriesLegacyAPI) DeleteByQueryId added in v0.44.0

func (a *QueriesLegacyAPI) DeleteByQueryId(ctx context.Context, queryId string) error

Delete a query.

Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is deleted after 30 days.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/delete instead. Learn more

func (*QueriesLegacyAPI) Get added in v0.44.0

func (a *QueriesLegacyAPI) Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error)

func (*QueriesLegacyAPI) GetByName added in v0.44.0

func (a *QueriesLegacyAPI) GetByName(ctx context.Context, name string) (*LegacyQuery, error)

GetByName calls QueriesLegacyAPI.LegacyQueryNameToIdMap and returns a single LegacyQuery.

Returns an error if there's more than one LegacyQuery with the same .Name.

Note: All LegacyQuery instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*QueriesLegacyAPI) GetByQueryId added in v0.44.0

func (a *QueriesLegacyAPI) GetByQueryId(ctx context.Context, queryId string) (*LegacyQuery, error)

Get a query definition.

Retrieve a query object definition along with contextual permissions information about the currently authenticated user.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/get instead. Learn more

func (*QueriesLegacyAPI) LegacyQueryNameToIdMap added in v0.44.0

func (a *QueriesLegacyAPI) LegacyQueryNameToIdMap(ctx context.Context, request ListQueriesLegacyRequest) (map[string]string, error)

LegacyQueryNameToIdMap calls QueriesLegacyAPI.ListAll and creates a map of results with LegacyQuery.Name as key and LegacyQuery.Id as value.

Returns an error if there's more than one LegacyQuery with the same .Name.

Note: All LegacyQuery instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*QueriesLegacyAPI) List added in v0.44.0

Get a list of queries.

Gets a list of queries. Optionally, this list can be filtered by a search term.

**Warning**: Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/list instead. Learn more

This method is generated by Databricks SDK Code Generator.

func (*QueriesLegacyAPI) ListAll added in v0.44.0

Get a list of queries.

Gets a list of queries. Optionally, this list can be filtered by a search term.

**Warning**: Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/list instead. Learn more

This method is generated by Databricks SDK Code Generator.

func (*QueriesLegacyAPI) Restore added in v0.44.0

func (a *QueriesLegacyAPI) Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error

func (*QueriesLegacyAPI) Update added in v0.44.0

func (a *QueriesLegacyAPI) Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error)

type QueriesLegacyInterface added in v0.44.0

type QueriesLegacyInterface interface {

	// Create a new query definition.
	//
	// Creates a new query definition. Queries created with this endpoint belong to
	// the authenticated user making the request.
	//
	// The `data_source_id` field specifies the ID of the SQL warehouse to run this
	// query against. You can use the Data Sources API to see a complete list of
	// available SQL warehouses. Or you can copy the `data_source_id` from an
	// existing query.
	//
	// **Note**: You cannot add a visualization until you create the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/create instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error)

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and they cannot be used for alerts. The trash is
	// deleted after 30 days.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and they cannot be used for alerts. The trash is
	// deleted after 30 days.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	DeleteByQueryId(ctx context.Context, queryId string) error

	// Get a query definition.
	//
	// Retrieve a query object definition along with contextual permissions
	// information about the currently authenticated user.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/get instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error)

	// Get a query definition.
	//
	// Retrieve a query object definition along with contextual permissions
	// information about the currently authenticated user.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/get instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	GetByQueryId(ctx context.Context, queryId string) (*LegacyQuery, error)

	// Get a list of queries.
	//
	// Gets a list of queries. Optionally, this list can be filtered by a search
	// term.
	//
	// **Warning**: Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/list instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListQueriesLegacyRequest) listing.Iterator[LegacyQuery]

	// Get a list of queries.
	//
	// Gets a list of queries. Optionally, this list can be filtered by a search
	// term.
	//
	// **Warning**: Calling this API concurrently 10 or more times could result in
	// throttling, service degradation, or a temporary ban.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/list instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error)

	// LegacyQueryNameToIdMap calls [QueriesLegacyAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value.
	//
	// Returns an error if there's more than one [LegacyQuery] with the same .Name.
	//
	// Note: All [LegacyQuery] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	LegacyQueryNameToIdMap(ctx context.Context, request ListQueriesLegacyRequest) (map[string]string, error)

	// GetByName calls [QueriesLegacyAPI.LegacyQueryNameToIdMap] and returns a single [LegacyQuery].
	//
	// Returns an error if there's more than one [LegacyQuery] with the same .Name.
	//
	// Note: All [LegacyQuery] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByName(ctx context.Context, name string) (*LegacyQuery, error)

	// Restore a query.
	//
	// Restore a query that has been moved to the trash. A restored query appears in
	// list views and searches. You can use restored queries for alerts.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// see the latest version. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error

	// Change a query definition.
	//
	// Modify this query definition.
	//
	// **Note**: You cannot undo this operation.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queries/update instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error)
}

type QueriesLegacyService added in v0.44.0

type QueriesLegacyService interface {

	// Create a new query definition.
	//
	// Creates a new query definition. Queries created with this endpoint belong
	// to the authenticated user making the request.
	//
	// The `data_source_id` field specifies the ID of the SQL warehouse to run
	// this query against. You can use the Data Sources API to see a complete
	// list of available SQL warehouses. Or you can copy the `data_source_id`
	// from an existing query.
	//
	// **Note**: You cannot add a visualization until you create the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queries/create instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error)

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and they cannot be used for alerts. The trash is
	// deleted after 30 days.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queries/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error

	// Get a query definition.
	//
	// Retrieve a query object definition along with contextual permissions
	// information about the currently authenticated user.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queries/get instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error)

	// Get a list of queries.
	//
	// Gets a list of queries. Optionally, this list can be filtered by a search
	// term.
	//
	// **Warning**: Calling this API concurrently 10 or more times could result
	// in throttling, service degradation, or a temporary ban.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queries/list instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	//
	// Use ListAll() to get all LegacyQuery instances, which will iterate over every result page.
	List(ctx context.Context, request ListQueriesLegacyRequest) (*QueryList, error)

	// Restore a query.
	//
	// Restore a query that has been moved to the trash. A restored query
	// appears in list views and searches. You can use restored queries for
	// alerts.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please see the latest version. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error

	// Change a query definition.
	//
	// Modify this query definition.
	//
	// **Note**: You cannot undo this operation.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queries/update instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error)
}

These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

**Note**: A new version of the Databricks SQL API is now available. Please see the latest version. Learn more

type QueriesService

type QueriesService interface {

	// Create a query.
	//
	// Creates a query.
	Create(ctx context.Context, request CreateQueryRequest) (*Query, error)

	// Delete a query.
	//
	// Moves a query to the trash. Trashed queries immediately disappear from
	// searches and list views, and cannot be used for alerts. You can restore a
	// trashed query through the UI. A trashed query is permanently deleted
	// after 30 days.
	Delete(ctx context.Context, request TrashQueryRequest) error

	// Get a query.
	//
	// Gets a query.
	Get(ctx context.Context, request GetQueryRequest) (*Query, error)

	// List queries.
	//
	// Gets a list of queries accessible to the user, ordered by creation time.
	// **Warning:** Calling this API concurrently 10 or more times could result
	// in throttling, service degradation, or a temporary ban.
	//
	// Use ListAll() to get all ListQueryObjectsResponseQuery instances, which will iterate over every result page.
	List(ctx context.Context, request ListQueriesRequest) (*ListQueryObjectsResponse, error)

	// List visualizations on a query.
	//
	// Gets a list of visualizations on a query.
	//
	// Use ListVisualizationsAll() to get all Visualization instances, which will iterate over every result page.
	ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error)

	// Update a query.
	//
	// Updates a query.
	Update(ctx context.Context, request UpdateQueryRequest) (*Query, error)
}

The queries API can be used to perform CRUD operations on queries. A query is a Databricks SQL object that includes the target SQL warehouse, query text, name, description, tags, and parameters. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create.

type Query

type Query struct {
	// Whether to apply a 1000 row limit to the query result.
	ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"`
	// Name of the catalog where this query will be executed.
	Catalog string `json:"catalog,omitempty"`
	// Timestamp when this query was created.
	CreateTime string `json:"create_time,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Display name of the query that appears in list views, widget headings,
	// and on the query page.
	DisplayName string `json:"display_name,omitempty"`
	// UUID identifying the query.
	Id string `json:"id,omitempty"`
	// Username of the user who last saved changes to this query.
	LastModifierUserName string `json:"last_modifier_user_name,omitempty"`
	// Indicates whether the query is trashed.
	LifecycleState LifecycleState `json:"lifecycle_state,omitempty"`
	// Username of the user that owns the query.
	OwnerUserName string `json:"owner_user_name,omitempty"`
	// List of query parameter definitions.
	Parameters []QueryParameter `json:"parameters,omitempty"`
	// Workspace path of the workspace folder containing the object.
	ParentPath string `json:"parent_path,omitempty"`
	// Text of the query to be run.
	QueryText string `json:"query_text,omitempty"`
	// Sets the "Run as" role for the object.
	RunAsMode RunAsMode `json:"run_as_mode,omitempty"`
	// Name of the schema where this query will be executed.
	Schema string `json:"schema,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// Timestamp when this query was last updated.
	UpdateTime string `json:"update_time,omitempty"`
	// ID of the SQL warehouse attached to the query.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Query) MarshalJSON added in v0.23.0

func (s Query) MarshalJSON() ([]byte, error)

func (*Query) UnmarshalJSON added in v0.23.0

func (s *Query) UnmarshalJSON(b []byte) error

type QueryBackedValue added in v0.44.0

type QueryBackedValue struct {
	// If specified, allows multiple values to be selected for this parameter.
	MultiValuesOptions *MultiValuesOptions `json:"multi_values_options,omitempty"`
	// UUID of the query that provides the parameter values.
	QueryId string `json:"query_id,omitempty"`
	// List of selected query parameter values.
	Values []string `json:"values,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryBackedValue) MarshalJSON added in v0.44.0

func (s QueryBackedValue) MarshalJSON() ([]byte, error)

func (*QueryBackedValue) UnmarshalJSON added in v0.44.0

func (s *QueryBackedValue) UnmarshalJSON(b []byte) error

type QueryEditContent added in v0.3.0

type QueryEditContent struct {
	// Data source ID maps to the ID of the data source used by the resource and
	// is distinct from the warehouse ID. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/api/workspace/datasources/list
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`
	// Exclusively used for storing a list parameter definitions. A parameter is
	// an object with `title`, `name`, `type`, and `value` properties. The
	// `value` field here is the default value. It can be overridden at runtime.
	Options any `json:"options,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`

	QueryId string `json:"-" url:"-"`
	// Sets the **Run as** role for the object. Must be set to one of `"viewer"`
	// (signifying "run as viewer" behavior) or `"owner"` (signifying "run as
	// owner" behavior)
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	Tags []string `json:"tags,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryEditContent) MarshalJSON added in v0.23.0

func (s QueryEditContent) MarshalJSON() ([]byte, error)

func (*QueryEditContent) UnmarshalJSON added in v0.23.0

func (s *QueryEditContent) UnmarshalJSON(b []byte) error

type QueryFilter

type QueryFilter struct {
	// A range filter for query submitted time. The time range must be <= 30
	// days.
	QueryStartTimeRange *TimeRange `json:"query_start_time_range,omitempty" url:"query_start_time_range,omitempty"`
	// A list of statement IDs.
	StatementIds []string `json:"statement_ids,omitempty" url:"statement_ids,omitempty"`

	Statuses []QueryStatus `json:"statuses,omitempty" url:"statuses,omitempty"`
	// A list of user IDs who ran the queries.
	UserIds []int64 `json:"user_ids,omitempty" url:"user_ids,omitempty"`
	// A list of warehouse IDs.
	WarehouseIds []string `json:"warehouse_ids,omitempty" url:"warehouse_ids,omitempty"`
}

type QueryHistoryAPI

type QueryHistoryAPI struct {
	// contains filtered or unexported fields
}

A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.

func NewQueryHistory

func NewQueryHistory(client *client.DatabricksClient) *QueryHistoryAPI

func (*QueryHistoryAPI) List added in v0.24.0

func (a *QueryHistoryAPI) List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error)

type QueryHistoryInterface added in v0.29.0

type QueryHistoryInterface interface {

	// List Queries.
	//
	// List the history of queries through SQL warehouses, and serverless compute.
	//
	// You can filter by user ID, warehouse ID, status, and time range. Most
	// recently started queries are returned first (up to max_results in request).
	// The pagination token returned in response can be used to list subsequent
	// query statuses.
	List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error)
}

type QueryHistoryService

type QueryHistoryService interface {

	// List Queries.
	//
	// List the history of queries through SQL warehouses, and serverless
	// compute.
	//
	// You can filter by user ID, warehouse ID, status, and time range. Most
	// recently started queries are returned first (up to max_results in
	// request). The pagination token returned in response can be used to list
	// subsequent query statuses.
	List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error)
}

A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.

type QueryInfo

type QueryInfo struct {
	// SQL Warehouse channel information at the time of query execution
	ChannelUsed *ChannelInfo `json:"channel_used,omitempty"`
	// Total execution time of the statement ( excluding result fetch time ).
	Duration int64 `json:"duration,omitempty"`
	// Alias for `warehouse_id`.
	EndpointId string `json:"endpoint_id,omitempty"`
	// Message describing why the query could not complete.
	ErrorMessage string `json:"error_message,omitempty"`
	// The ID of the user whose credentials were used to run the query.
	ExecutedAsUserId int64 `json:"executed_as_user_id,omitempty"`
	// The email address or username of the user whose credentials were used to
	// run the query.
	ExecutedAsUserName string `json:"executed_as_user_name,omitempty"`
	// The time execution of the query ended.
	ExecutionEndTimeMs int64 `json:"execution_end_time_ms,omitempty"`
	// Whether more updates for the query are expected.
	IsFinal bool `json:"is_final,omitempty"`
	// A key that can be used to look up query details.
	LookupKey string `json:"lookup_key,omitempty"`
	// Metrics about query execution.
	Metrics *QueryMetrics `json:"metrics,omitempty"`
	// Whether plans exist for the execution, or the reason why they are missing
	PlansState PlansState `json:"plans_state,omitempty"`
	// The time the query ended.
	QueryEndTimeMs int64 `json:"query_end_time_ms,omitempty"`
	// The query ID.
	QueryId string `json:"query_id,omitempty"`
	// The time the query started.
	QueryStartTimeMs int64 `json:"query_start_time_ms,omitempty"`
	// The text of the query.
	QueryText string `json:"query_text,omitempty"`
	// The number of results returned by the query.
	RowsProduced int64 `json:"rows_produced,omitempty"`
	// URL to the Spark UI query plan.
	SparkUiUrl string `json:"spark_ui_url,omitempty"`
	// Type of statement for this query
	StatementType QueryStatementType `json:"statement_type,omitempty"`
	// Query status with one the following values:
	//
	// - `QUEUED`: Query has been received and queued. - `RUNNING`: Query has
	// started. - `CANCELED`: Query has been cancelled by the user. - `FAILED`:
	// Query has failed. - `FINISHED`: Query has completed.
	Status QueryStatus `json:"status,omitempty"`
	// The ID of the user who ran the query.
	UserId int64 `json:"user_id,omitempty"`
	// The email address or username of the user who ran the query.
	UserName string `json:"user_name,omitempty"`
	// Warehouse ID.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryInfo) MarshalJSON added in v0.23.0

func (s QueryInfo) MarshalJSON() ([]byte, error)

func (*QueryInfo) UnmarshalJSON added in v0.23.0

func (s *QueryInfo) UnmarshalJSON(b []byte) error

type QueryList

type QueryList struct {
	// The total number of queries.
	Count int `json:"count,omitempty"`
	// The page number that is currently displayed.
	Page int `json:"page,omitempty"`
	// The number of queries per page.
	PageSize int `json:"page_size,omitempty"`
	// List of queries returned.
	Results []LegacyQuery `json:"results,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryList) MarshalJSON added in v0.23.0

func (s QueryList) MarshalJSON() ([]byte, error)

func (*QueryList) UnmarshalJSON added in v0.23.0

func (s *QueryList) UnmarshalJSON(b []byte) error

type QueryMetrics

type QueryMetrics struct {
	// Time spent loading metadata and optimizing the query, in milliseconds.
	CompilationTimeMs int64 `json:"compilation_time_ms,omitempty"`
	// Time spent executing the query, in milliseconds.
	ExecutionTimeMs int64 `json:"execution_time_ms,omitempty"`
	// Total amount of data sent over the network between executor nodes during
	// shuffle, in bytes.
	NetworkSentBytes int64 `json:"network_sent_bytes,omitempty"`
	// Timestamp of when the query was enqueued waiting while the warehouse was
	// at max load. This field is optional and will not appear if the query
	// skipped the overloading queue.
	OverloadingQueueStartTimestamp int64 `json:"overloading_queue_start_timestamp,omitempty"`
	// Total execution time for all individual Photon query engine tasks in the
	// query, in milliseconds.
	PhotonTotalTimeMs int64 `json:"photon_total_time_ms,omitempty"`
	// Timestamp of when the query was enqueued waiting for a cluster to be
	// provisioned for the warehouse. This field is optional and will not appear
	// if the query skipped the provisioning queue.
	ProvisioningQueueStartTimestamp int64 `json:"provisioning_queue_start_timestamp,omitempty"`
	// Total number of bytes in all tables not read due to pruning
	PrunedBytes int64 `json:"pruned_bytes,omitempty"`
	// Total number of files from all tables not read due to pruning
	PrunedFilesCount int64 `json:"pruned_files_count,omitempty"`
	// Timestamp of when the underlying compute started compilation of the
	// query.
	QueryCompilationStartTimestamp int64 `json:"query_compilation_start_timestamp,omitempty"`
	// Total size of data read by the query, in bytes.
	ReadBytes int64 `json:"read_bytes,omitempty"`
	// Size of persistent data read from the cache, in bytes.
	ReadCacheBytes int64 `json:"read_cache_bytes,omitempty"`
	// Number of files read after pruning
	ReadFilesCount int64 `json:"read_files_count,omitempty"`
	// Number of partitions read after pruning.
	ReadPartitionsCount int64 `json:"read_partitions_count,omitempty"`
	// Size of persistent data read from cloud object storage on your cloud
	// tenant, in bytes.
	ReadRemoteBytes int64 `json:"read_remote_bytes,omitempty"`
	// Time spent fetching the query results after the execution finished, in
	// milliseconds.
	ResultFetchTimeMs int64 `json:"result_fetch_time_ms,omitempty"`
	// `true` if the query result was fetched from cache, `false` otherwise.
	ResultFromCache bool `json:"result_from_cache,omitempty"`
	// Total number of rows returned by the query.
	RowsProducedCount int64 `json:"rows_produced_count,omitempty"`
	// Total number of rows read by the query.
	RowsReadCount int64 `json:"rows_read_count,omitempty"`
	// Size of data temporarily written to disk while executing the query, in
	// bytes.
	SpillToDiskBytes int64 `json:"spill_to_disk_bytes,omitempty"`
	// Sum of execution time for all of the query’s tasks, in milliseconds.
	TaskTotalTimeMs int64 `json:"task_total_time_ms,omitempty"`
	// Total execution time of the query from the client’s point of view, in
	// milliseconds.
	TotalTimeMs int64 `json:"total_time_ms,omitempty"`
	// Size pf persistent data written to cloud object storage in your cloud
	// tenant, in bytes.
	WriteRemoteBytes int64 `json:"write_remote_bytes,omitempty"`

	ForceSendFields []string `json:"-"`
}

A query metric that encapsulates a set of measurements for a single query. Metrics come from the driver and are stored in the history service database.

func (QueryMetrics) MarshalJSON added in v0.23.0

func (s QueryMetrics) MarshalJSON() ([]byte, error)

func (*QueryMetrics) UnmarshalJSON added in v0.23.0

func (s *QueryMetrics) UnmarshalJSON(b []byte) error

type QueryOptions

type QueryOptions struct {
	// The name of the catalog to execute this query in.
	Catalog string `json:"catalog,omitempty"`
	// The timestamp when this query was moved to trash. Only present when the
	// `is_archived` property is `true`. Trashed items are deleted after thirty
	// days.
	MovedToTrashAt string `json:"moved_to_trash_at,omitempty"`

	Parameters []Parameter `json:"parameters,omitempty"`
	// The name of the schema to execute this query in.
	Schema string `json:"schema,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryOptions) MarshalJSON added in v0.23.0

func (s QueryOptions) MarshalJSON() ([]byte, error)

func (*QueryOptions) UnmarshalJSON added in v0.23.0

func (s *QueryOptions) UnmarshalJSON(b []byte) error

type QueryParameter added in v0.44.0

type QueryParameter struct {
	// Date-range query parameter value. Can only specify one of
	// `dynamic_date_range_value` or `date_range_value`.
	DateRangeValue *DateRangeValue `json:"date_range_value,omitempty"`
	// Date query parameter value. Can only specify one of `dynamic_date_value`
	// or `date_value`.
	DateValue *DateValue `json:"date_value,omitempty"`
	// Dropdown query parameter value.
	EnumValue *EnumValue `json:"enum_value,omitempty"`
	// Literal parameter marker that appears between double curly braces in the
	// query text.
	Name string `json:"name,omitempty"`
	// Numeric query parameter value.
	NumericValue *NumericValue `json:"numeric_value,omitempty"`
	// Query-based dropdown query parameter value.
	QueryBackedValue *QueryBackedValue `json:"query_backed_value,omitempty"`
	// Text query parameter value.
	TextValue *TextValue `json:"text_value,omitempty"`
	// Text displayed in the user-facing parameter widget in the UI.
	Title string `json:"title,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryParameter) MarshalJSON added in v0.44.0

func (s QueryParameter) MarshalJSON() ([]byte, error)

func (*QueryParameter) UnmarshalJSON added in v0.44.0

func (s *QueryParameter) UnmarshalJSON(b []byte) error

type QueryPostContent

type QueryPostContent struct {
	// Data source ID maps to the ID of the data source used by the resource and
	// is distinct from the warehouse ID. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/api/workspace/datasources/list
	DataSourceId string `json:"data_source_id,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// The title of this query that appears in list views, widget headings, and
	// on the query page.
	Name string `json:"name,omitempty"`
	// Exclusively used for storing a list parameter definitions. A parameter is
	// an object with `title`, `name`, `type`, and `value` properties. The
	// `value` field here is the default value. It can be overridden at runtime.
	Options any `json:"options,omitempty"`
	// The identifier of the workspace folder containing the object.
	Parent string `json:"parent,omitempty"`
	// The text of the query to be run.
	Query string `json:"query,omitempty"`
	// Sets the **Run as** role for the object. Must be set to one of `"viewer"`
	// (signifying "run as viewer" behavior) or `"owner"` (signifying "run as
	// owner" behavior)
	RunAsRole RunAsRole `json:"run_as_role,omitempty"`

	Tags []string `json:"tags,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (QueryPostContent) MarshalJSON added in v0.23.0

func (s QueryPostContent) MarshalJSON() ([]byte, error)

func (*QueryPostContent) UnmarshalJSON added in v0.23.0

func (s *QueryPostContent) UnmarshalJSON(b []byte) error

type QueryStatementType

type QueryStatementType string
const QueryStatementTypeAlter QueryStatementType = `ALTER`
const QueryStatementTypeAnalyze QueryStatementType = `ANALYZE`
const QueryStatementTypeCopy QueryStatementType = `COPY`
const QueryStatementTypeCreate QueryStatementType = `CREATE`
const QueryStatementTypeDelete QueryStatementType = `DELETE`
const QueryStatementTypeDescribe QueryStatementType = `DESCRIBE`
const QueryStatementTypeDrop QueryStatementType = `DROP`
const QueryStatementTypeExplain QueryStatementType = `EXPLAIN`
const QueryStatementTypeGrant QueryStatementType = `GRANT`
const QueryStatementTypeInsert QueryStatementType = `INSERT`
const QueryStatementTypeMerge QueryStatementType = `MERGE`
const QueryStatementTypeOptimize QueryStatementType = `OPTIMIZE`
const QueryStatementTypeOther QueryStatementType = `OTHER`
const QueryStatementTypeRefresh QueryStatementType = `REFRESH`
const QueryStatementTypeReplace QueryStatementType = `REPLACE`
const QueryStatementTypeRevoke QueryStatementType = `REVOKE`
const QueryStatementTypeSelect QueryStatementType = `SELECT`
const QueryStatementTypeSet QueryStatementType = `SET`
const QueryStatementTypeShow QueryStatementType = `SHOW`
const QueryStatementTypeTruncate QueryStatementType = `TRUNCATE`
const QueryStatementTypeUpdate QueryStatementType = `UPDATE`
const QueryStatementTypeUse QueryStatementType = `USE`

func (*QueryStatementType) Set

func (f *QueryStatementType) Set(v string) error

Set raw string value and validate it against allowed values

func (*QueryStatementType) String

func (f *QueryStatementType) String() string

String representation for fmt.Print

func (*QueryStatementType) Type

func (f *QueryStatementType) Type() string

Type always returns QueryStatementType to satisfy [pflag.Value] interface

type QueryStatus

type QueryStatus string

Statuses which are also used by OperationStatus in runtime

const QueryStatusCanceled QueryStatus = `CANCELED`
const QueryStatusCompiled QueryStatus = `COMPILED`
const QueryStatusCompiling QueryStatus = `COMPILING`
const QueryStatusFailed QueryStatus = `FAILED`
const QueryStatusFinished QueryStatus = `FINISHED`
const QueryStatusQueued QueryStatus = `QUEUED`
const QueryStatusRunning QueryStatus = `RUNNING`
const QueryStatusStarted QueryStatus = `STARTED`

func (*QueryStatus) Set

func (f *QueryStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*QueryStatus) String

func (f *QueryStatus) String() string

String representation for fmt.Print

func (*QueryStatus) Type

func (f *QueryStatus) Type() string

Type always returns QueryStatus to satisfy [pflag.Value] interface

type QueryVisualizationsAPI added in v0.19.0

type QueryVisualizationsAPI struct {
	// contains filtered or unexported fields
}

This is an evolving API that facilitates the addition and removal of visualizations from existing queries in the Databricks Workspace. Data structures can change over time.

func NewQueryVisualizations added in v0.19.0

func NewQueryVisualizations(client *client.DatabricksClient) *QueryVisualizationsAPI

func (*QueryVisualizationsAPI) Create added in v0.19.0

func (a *QueryVisualizationsAPI) Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error)

func (*QueryVisualizationsAPI) Delete added in v0.19.0

func (a *QueryVisualizationsAPI) Delete(ctx context.Context, request DeleteVisualizationRequest) error

func (*QueryVisualizationsAPI) DeleteById added in v0.19.0

func (a *QueryVisualizationsAPI) DeleteById(ctx context.Context, id string) error

Remove a visualization.

Removes a visualization.

func (*QueryVisualizationsAPI) Update added in v0.19.0

func (a *QueryVisualizationsAPI) Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error)

type QueryVisualizationsInterface added in v0.29.0

type QueryVisualizationsInterface interface {

	// Add a visualization to a query.
	//
	// Adds a visualization to a query.
	Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error)

	// Remove a visualization.
	//
	// Removes a visualization.
	Delete(ctx context.Context, request DeleteVisualizationRequest) error

	// Remove a visualization.
	//
	// Removes a visualization.
	DeleteById(ctx context.Context, id string) error

	// Update a visualization.
	//
	// Updates a visualization.
	Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error)
}

type QueryVisualizationsLegacyAPI added in v0.44.0

type QueryVisualizationsLegacyAPI struct {
	// contains filtered or unexported fields
}

This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace. Data structures may change over time.

**Note**: A new version of the Databricks SQL API is now available. Please see the latest version. Learn more

func NewQueryVisualizationsLegacy added in v0.44.0

func NewQueryVisualizationsLegacy(client *client.DatabricksClient) *QueryVisualizationsLegacyAPI

func (*QueryVisualizationsLegacyAPI) Create added in v0.44.0

func (a *QueryVisualizationsLegacyAPI) Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error)

func (*QueryVisualizationsLegacyAPI) Delete added in v0.44.0

func (a *QueryVisualizationsLegacyAPI) Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error

func (*QueryVisualizationsLegacyAPI) DeleteById added in v0.44.0

func (a *QueryVisualizationsLegacyAPI) DeleteById(ctx context.Context, id string) error

Remove visualization.

Removes a visualization from the query.

**Note**: A new version of the Databricks SQL API is now available. Please use :method:queryvisualizations/delete instead. Learn more

func (*QueryVisualizationsLegacyAPI) Update added in v0.44.0

func (a *QueryVisualizationsLegacyAPI) Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error)

type QueryVisualizationsLegacyInterface added in v0.44.0

type QueryVisualizationsLegacyInterface interface {

	// Add visualization to a query.
	//
	// Creates visualization in the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queryvisualizations/create instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error)

	// Remove visualization.
	//
	// Removes a visualization from the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queryvisualizations/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error

	// Remove visualization.
	//
	// Removes a visualization from the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queryvisualizations/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	DeleteById(ctx context.Context, id string) error

	// Edit existing visualization.
	//
	// Updates visualization in the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available. Please
	// use :method:queryvisualizations/update instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error)
}

type QueryVisualizationsLegacyService added in v0.44.0

type QueryVisualizationsLegacyService interface {

	// Add visualization to a query.
	//
	// Creates visualization in the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queryvisualizations/create instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error)

	// Remove visualization.
	//
	// Removes a visualization from the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queryvisualizations/delete instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error

	// Edit existing visualization.
	//
	// Updates visualization in the query.
	//
	// **Note**: A new version of the Databricks SQL API is now available.
	// Please use :method:queryvisualizations/update instead. [Learn more]
	//
	// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html
	Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error)
}

This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace. Data structures may change over time.

**Note**: A new version of the Databricks SQL API is now available. Please see the latest version. Learn more

type QueryVisualizationsService added in v0.19.0

type QueryVisualizationsService interface {

	// Add a visualization to a query.
	//
	// Adds a visualization to a query.
	Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error)

	// Remove a visualization.
	//
	// Removes a visualization.
	Delete(ctx context.Context, request DeleteVisualizationRequest) error

	// Update a visualization.
	//
	// Updates a visualization.
	Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error)
}

This is an evolving API that facilitates the addition and removal of visualizations from existing queries in the Databricks Workspace. Data structures can change over time.

type RepeatedEndpointConfPairs

type RepeatedEndpointConfPairs struct {
	// Deprecated: Use configuration_pairs
	ConfigPair []EndpointConfPair `json:"config_pair,omitempty"`

	ConfigurationPairs []EndpointConfPair `json:"configuration_pairs,omitempty"`
}

type RestoreDashboardRequest

type RestoreDashboardRequest struct {
	DashboardId string `json:"-" url:"-"`
}

Restore a dashboard

type RestoreQueriesLegacyRequest added in v0.44.0

type RestoreQueriesLegacyRequest struct {
	QueryId string `json:"-" url:"-"`
}

Restore a query

type RestoreResponse added in v0.34.0

type RestoreResponse struct {
}

type ResultData added in v0.3.0

type ResultData struct {
	// The number of bytes in the result chunk. This field is not available when
	// using `INLINE` disposition.
	ByteCount int64 `json:"byte_count,omitempty"`
	// The position within the sequence of result set chunks.
	ChunkIndex int `json:"chunk_index,omitempty"`
	// The `JSON_ARRAY` format is an array of arrays of values, where each
	// non-null value is formatted as a string. Null values are encoded as JSON
	// `null`.
	DataArray [][]string `json:"data_array,omitempty"`

	ExternalLinks []ExternalLink `json:"external_links,omitempty"`
	// When fetching, provides the `chunk_index` for the _next_ chunk. If
	// absent, indicates there are no more chunks. The next chunk can be fetched
	// with a :method:statementexecution/getStatementResultChunkN request.
	NextChunkIndex int `json:"next_chunk_index,omitempty"`
	// When fetching, provides a link to fetch the _next_ chunk. If absent,
	// indicates there are no more chunks. This link is an absolute `path` to be
	// joined with your `$DATABRICKS_HOST`, and should be treated as an opaque
	// link. This is an alternative to using `next_chunk_index`.
	NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"`
	// The number of rows within the result chunk.
	RowCount int64 `json:"row_count,omitempty"`
	// The starting row offset within the result set.
	RowOffset int64 `json:"row_offset,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ResultData) MarshalJSON added in v0.23.0

func (s ResultData) MarshalJSON() ([]byte, error)

func (*ResultData) UnmarshalJSON added in v0.23.0

func (s *ResultData) UnmarshalJSON(b []byte) error

type ResultManifest added in v0.3.0

type ResultManifest struct {
	// Array of result set chunk metadata.
	Chunks []BaseChunkInfo `json:"chunks,omitempty"`

	Format Format `json:"format,omitempty"`
	// The schema is an ordered list of column descriptions.
	Schema *ResultSchema `json:"schema,omitempty"`
	// The total number of bytes in the result set. This field is not available
	// when using `INLINE` disposition.
	TotalByteCount int64 `json:"total_byte_count,omitempty"`
	// The total number of chunks that the result set has been divided into.
	TotalChunkCount int `json:"total_chunk_count,omitempty"`
	// The total number of rows in the result set.
	TotalRowCount int64 `json:"total_row_count,omitempty"`
	// Indicates whether the result is truncated due to `row_limit` or
	// `byte_limit`.
	Truncated bool `json:"truncated,omitempty"`

	ForceSendFields []string `json:"-"`
}

The result manifest provides schema and metadata for the result set.

func (ResultManifest) MarshalJSON added in v0.23.0

func (s ResultManifest) MarshalJSON() ([]byte, error)

func (*ResultManifest) UnmarshalJSON added in v0.23.0

func (s *ResultManifest) UnmarshalJSON(b []byte) error

type ResultSchema added in v0.3.0

type ResultSchema struct {
	ColumnCount int `json:"column_count,omitempty"`

	Columns []ColumnInfo `json:"columns,omitempty"`

	ForceSendFields []string `json:"-"`
}

The schema is an ordered list of column descriptions.

func (ResultSchema) MarshalJSON added in v0.23.0

func (s ResultSchema) MarshalJSON() ([]byte, error)

func (*ResultSchema) UnmarshalJSON added in v0.23.0

func (s *ResultSchema) UnmarshalJSON(b []byte) error

type RunAsMode added in v0.44.0

type RunAsMode string
const RunAsModeOwner RunAsMode = `OWNER`
const RunAsModeViewer RunAsMode = `VIEWER`

func (*RunAsMode) Set added in v0.44.0

func (f *RunAsMode) Set(v string) error

Set raw string value and validate it against allowed values

func (*RunAsMode) String added in v0.44.0

func (f *RunAsMode) String() string

String representation for fmt.Print

func (*RunAsMode) Type added in v0.44.0

func (f *RunAsMode) Type() string

Type always returns RunAsMode to satisfy [pflag.Value] interface

type RunAsRole added in v0.19.0

type RunAsRole string

Sets the **Run as** role for the object. Must be set to one of `"viewer"` (signifying "run as viewer" behavior) or `"owner"` (signifying "run as owner" behavior)

const RunAsRoleOwner RunAsRole = `owner`
const RunAsRoleViewer RunAsRole = `viewer`

func (*RunAsRole) Set added in v0.19.0

func (f *RunAsRole) Set(v string) error

Set raw string value and validate it against allowed values

func (*RunAsRole) String added in v0.19.0

func (f *RunAsRole) String() string

String representation for fmt.Print

func (*RunAsRole) Type added in v0.19.0

func (f *RunAsRole) Type() string

Type always returns RunAsRole to satisfy [pflag.Value] interface

type ServiceError added in v0.3.0

type ServiceError struct {
	ErrorCode ServiceErrorCode `json:"error_code,omitempty"`
	// A brief summary of the error condition.
	Message string `json:"message,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (ServiceError) MarshalJSON added in v0.23.0

func (s ServiceError) MarshalJSON() ([]byte, error)

func (*ServiceError) UnmarshalJSON added in v0.23.0

func (s *ServiceError) UnmarshalJSON(b []byte) error

type ServiceErrorCode added in v0.3.0

type ServiceErrorCode string
const ServiceErrorCodeAborted ServiceErrorCode = `ABORTED`
const ServiceErrorCodeAlreadyExists ServiceErrorCode = `ALREADY_EXISTS`
const ServiceErrorCodeBadRequest ServiceErrorCode = `BAD_REQUEST`
const ServiceErrorCodeCancelled ServiceErrorCode = `CANCELLED`
const ServiceErrorCodeDeadlineExceeded ServiceErrorCode = `DEADLINE_EXCEEDED`
const ServiceErrorCodeInternalError ServiceErrorCode = `INTERNAL_ERROR`
const ServiceErrorCodeIoError ServiceErrorCode = `IO_ERROR`
const ServiceErrorCodeNotFound ServiceErrorCode = `NOT_FOUND`
const ServiceErrorCodeResourceExhausted ServiceErrorCode = `RESOURCE_EXHAUSTED`
const ServiceErrorCodeServiceUnderMaintenance ServiceErrorCode = `SERVICE_UNDER_MAINTENANCE`
const ServiceErrorCodeTemporarilyUnavailable ServiceErrorCode = `TEMPORARILY_UNAVAILABLE`
const ServiceErrorCodeUnauthenticated ServiceErrorCode = `UNAUTHENTICATED`
const ServiceErrorCodeUnknown ServiceErrorCode = `UNKNOWN`
const ServiceErrorCodeWorkspaceTemporarilyUnavailable ServiceErrorCode = `WORKSPACE_TEMPORARILY_UNAVAILABLE`

func (*ServiceErrorCode) Set added in v0.3.0

func (f *ServiceErrorCode) Set(v string) error

Set raw string value and validate it against allowed values

func (*ServiceErrorCode) String added in v0.3.0

func (f *ServiceErrorCode) String() string

String representation for fmt.Print

func (*ServiceErrorCode) Type added in v0.3.0

func (f *ServiceErrorCode) Type() string

Type always returns ServiceErrorCode to satisfy [pflag.Value] interface

type SetRequest

type SetRequest struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// Object ID. The ACL for the object with this UUID is overwritten by this
	// request's POST content.
	ObjectId string `json:"-" url:"-"`
	// The type of object permission to set.
	ObjectType ObjectTypePlural `json:"-" url:"-"`
}

Set object ACL

type SetResponse

type SetResponse struct {
	AccessControlList []AccessControl `json:"access_control_list,omitempty"`
	// An object's type and UUID, separated by a forward slash (/) character.
	ObjectId string `json:"object_id,omitempty"`
	// A singular noun object type.
	ObjectType ObjectType `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (SetResponse) MarshalJSON added in v0.23.0

func (s SetResponse) MarshalJSON() ([]byte, error)

func (*SetResponse) UnmarshalJSON added in v0.23.0

func (s *SetResponse) UnmarshalJSON(b []byte) error

type SetWorkspaceWarehouseConfigRequest

type SetWorkspaceWarehouseConfigRequest struct {
	// Optional: Channel selection details
	Channel *Channel `json:"channel,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"`
	// Spark confs for external hive metastore configuration JSON serialized
	// size must be less than <= 512K
	DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"`
	// List of Warehouse Types allowed in this workspace (limits allowed value
	// of the type field in CreateWarehouse and EditWarehouse). Note: Some types
	// cannot be disabled, they don't need to be specified in
	// SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing
	// warehouses to be converted to another type. Used by frontend to save
	// specific type availability in the warehouse create and edit form UI.
	EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"`
	// Deprecated: Use sql_configuration_parameters
	GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"`
	// GCP only: Google Service Account used to pass to cluster to access Google
	// Cloud Storage
	GoogleServiceAccount string `json:"google_service_account,omitempty"`
	// AWS Only: Instance profile used to pass IAM role to the cluster
	InstanceProfileArn string `json:"instance_profile_arn,omitempty"`
	// Security policy for warehouses
	SecurityPolicy SetWorkspaceWarehouseConfigRequestSecurityPolicy `json:"security_policy,omitempty"`
	// SQL configuration parameters
	SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (SetWorkspaceWarehouseConfigRequest) MarshalJSON added in v0.23.0

func (s SetWorkspaceWarehouseConfigRequest) MarshalJSON() ([]byte, error)

func (*SetWorkspaceWarehouseConfigRequest) UnmarshalJSON added in v0.23.0

func (s *SetWorkspaceWarehouseConfigRequest) UnmarshalJSON(b []byte) error

type SetWorkspaceWarehouseConfigRequestSecurityPolicy

type SetWorkspaceWarehouseConfigRequestSecurityPolicy string

Security policy for warehouses

const SetWorkspaceWarehouseConfigRequestSecurityPolicyDataAccessControl SetWorkspaceWarehouseConfigRequestSecurityPolicy = `DATA_ACCESS_CONTROL`
const SetWorkspaceWarehouseConfigRequestSecurityPolicyNone SetWorkspaceWarehouseConfigRequestSecurityPolicy = `NONE`
const SetWorkspaceWarehouseConfigRequestSecurityPolicyPassthrough SetWorkspaceWarehouseConfigRequestSecurityPolicy = `PASSTHROUGH`

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) Set

Set raw string value and validate it against allowed values

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) String

String representation for fmt.Print

func (*SetWorkspaceWarehouseConfigRequestSecurityPolicy) Type

Type always returns SetWorkspaceWarehouseConfigRequestSecurityPolicy to satisfy [pflag.Value] interface

type SetWorkspaceWarehouseConfigResponse added in v0.34.0

type SetWorkspaceWarehouseConfigResponse struct {
}

type SpotInstancePolicy

type SpotInstancePolicy string

Configurations whether the warehouse should use spot instances.

const SpotInstancePolicyCostOptimized SpotInstancePolicy = `COST_OPTIMIZED`
const SpotInstancePolicyPolicyUnspecified SpotInstancePolicy = `POLICY_UNSPECIFIED`
const SpotInstancePolicyReliabilityOptimized SpotInstancePolicy = `RELIABILITY_OPTIMIZED`

func (*SpotInstancePolicy) Set

func (f *SpotInstancePolicy) Set(v string) error

Set raw string value and validate it against allowed values

func (*SpotInstancePolicy) String

func (f *SpotInstancePolicy) String() string

String representation for fmt.Print

func (*SpotInstancePolicy) Type

func (f *SpotInstancePolicy) Type() string

Type always returns SpotInstancePolicy to satisfy [pflag.Value] interface

type StartRequest

type StartRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Start a warehouse

type StartWarehouseResponse added in v0.34.0

type StartWarehouseResponse struct {
}

type State

type State string

State of the warehouse

const StateDeleted State = `DELETED`
const StateDeleting State = `DELETING`
const StateRunning State = `RUNNING`
const StateStarting State = `STARTING`
const StateStopped State = `STOPPED`
const StateStopping State = `STOPPING`

func (*State) Set

func (f *State) Set(v string) error

Set raw string value and validate it against allowed values

func (*State) String

func (f *State) String() string

String representation for fmt.Print

func (*State) Type

func (f *State) Type() string

Type always returns State to satisfy [pflag.Value] interface

type StatementExecutionAPI added in v0.3.0

type StatementExecutionAPI struct {
	// contains filtered or unexported fields
}

The Databricks SQL Statement Execution API can be used to execute SQL statements on a SQL warehouse and fetch the result.

**Getting started**

We suggest beginning with the Databricks SQL Statement Execution API tutorial.

**Overview of statement execution and result fetching**

Statement execution begins by issuing a :method:statementexecution/executeStatement request with a valid SQL statement and warehouse ID, along with optional parameters such as the data catalog and output format. If no other parameters are specified, the server will wait for up to 10s before returning a response. If the statement has completed within this timespan, the response will include the result data as a JSON array and metadata. Otherwise, if no result is available after the 10s timeout expired, the response will provide the statement ID that can be used to poll for results by using a :method:statementexecution/getStatement request.

You can specify whether the call should behave synchronously, asynchronously or start synchronously with a fallback to asynchronous execution. This is controlled with the `wait_timeout` and `on_wait_timeout` settings. If `wait_timeout` is set between 5-50 seconds (default: 10s), the call waits for results up to the specified timeout; when set to `0s`, the call is asynchronous and responds immediately with a statement ID. The `on_wait_timeout` setting specifies what should happen when the timeout is reached while the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to `CANCEL`, which cancels the statement.

In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns directly with a statement ID. The status of the statement execution can be polled by issuing :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded, this call also returns the result and metadata in the response. - Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can be used to fetch status and results in the same way as in the asynchronous mode.

Depending on the size, the result can be split into multiple chunks. If the statement execution is successful, the statement response contains a manifest and the first chunk of the result. The manifest contains schema information and provides metadata for each chunk in the result. Result chunks can be retrieved by index with :method:statementexecution/getStatementResultChunkN which may be called in any order and in parallel. For sequential fetching, each chunk, apart from the last, also contains a `next_chunk_index` and `next_chunk_internal_link` that point to the next chunk.

A statement can be canceled with :method:statementexecution/cancelExecution.

**Fetching result data: format and disposition**

To specify the format of the result data, use the `format` field, which can be set to one of the following options: `JSON_ARRAY` (JSON), `ARROW_STREAM` (Apache Arrow Columnar), or `CSV`.

There are two ways to receive statement results, controlled by the `disposition` setting, which can be either `INLINE` or `EXTERNAL_LINKS`:

- `INLINE`: In this mode, the result data is directly included in the response. It's best suited for smaller results. This mode can only be used with the `JSON_ARRAY` format.

- `EXTERNAL_LINKS`: In this mode, the response provides links that can be used to download the result data in chunks separately. This approach is ideal for larger results and offers higher throughput. This mode can be used with all the formats: `JSON_ARRAY`, `ARROW_STREAM`, and `CSV`.

By default, the API uses `format=JSON_ARRAY` and `disposition=INLINE`.

**Limits and limitations**

Note: The byte limit for INLINE disposition is based on internal storage metrics and will not exactly match the byte count of the actual payload.

- Statements with `disposition=INLINE` are limited to 25 MiB and will fail when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` are limited to 100 GiB. Result sets larger than this limit will be truncated. Truncation is indicated by the `truncated` field in the result manifest. - The maximum query text size is 16 MiB. - Cancelation might silently fail. A successful response from a cancel request indicates that the cancel request was successfully received and sent to the processing engine. However, an outstanding statement might have already completed execution when the cancel request arrives. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for things such as caller delays and network latency from caller to service. - To guarantee that the statement is kept alive, you must poll at least once every 15 minutes. - The results are only available for one hour after success; polling does not extend this. - The SQL Execution API must be used for the entire lifecycle of the statement. For example, you cannot use the Jobs API to execute the command, and then the SQL Execution API to cancel it.

func NewStatementExecution added in v0.3.0

func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionAPI

func (*StatementExecutionAPI) CancelExecution added in v0.3.0

func (a *StatementExecutionAPI) CancelExecution(ctx context.Context, request CancelExecutionRequest) error

func (*StatementExecutionAPI) ExecuteAndWait added in v0.10.0

[EXPERIMENTAL] Execute a query and wait for results to be available

func (*StatementExecutionAPI) ExecuteStatement added in v0.3.0

func (a *StatementExecutionAPI) ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error)

func (*StatementExecutionAPI) GetStatement added in v0.3.0

func (a *StatementExecutionAPI) GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error)

func (*StatementExecutionAPI) GetStatementByStatementId added in v0.3.0

func (a *StatementExecutionAPI) GetStatementByStatementId(ctx context.Context, statementId string) (*StatementResponse, error)

Get status, manifest, and result first chunk.

This request can be used to poll for the statement's status. When the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and further calls will receive an HTTP 404 response.

**NOTE** This call currently might take up to 5 seconds to get the latest status and result.

func (*StatementExecutionAPI) GetStatementResultChunkN added in v0.3.0

func (a *StatementExecutionAPI) GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error)

func (*StatementExecutionAPI) GetStatementResultChunkNByStatementIdAndChunkIndex added in v0.3.0

func (a *StatementExecutionAPI) GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error)

Get result chunk by index.

After the statement execution has `SUCCEEDED`, this request can be used to fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is typically fetched with :method:statementexecution/executeStatement or :method:statementexecution/getStatement, this request can be used to fetch subsequent chunks. The response structure is identical to the nested `result` element described in the :method:statementexecution/getStatement request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set.

type StatementExecutionInterface added in v0.29.0

type StatementExecutionInterface interface {

	// Cancel statement execution.
	//
	// Requests that an executing statement be canceled. Callers must poll for
	// status to see the terminal state.
	CancelExecution(ctx context.Context, request CancelExecutionRequest) error

	// Execute a SQL statement.
	ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error)

	// Get status, manifest, and result first chunk.
	//
	// This request can be used to poll for the statement's status. When the
	// `status.state` field is `SUCCEEDED` it will also return the result manifest
	// and the first chunk of the result data. When the statement is in the terminal
	// states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state
	// set. After at least 12 hours in terminal state, the statement is removed from
	// the warehouse and further calls will receive an HTTP 404 response.
	//
	// **NOTE** This call currently might take up to 5 seconds to get the latest
	// status and result.
	GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error)

	// Get status, manifest, and result first chunk.
	//
	// This request can be used to poll for the statement's status. When the
	// `status.state` field is `SUCCEEDED` it will also return the result manifest
	// and the first chunk of the result data. When the statement is in the terminal
	// states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state
	// set. After at least 12 hours in terminal state, the statement is removed from
	// the warehouse and further calls will receive an HTTP 404 response.
	//
	// **NOTE** This call currently might take up to 5 seconds to get the latest
	// status and result.
	GetStatementByStatementId(ctx context.Context, statementId string) (*StatementResponse, error)

	// Get result chunk by index.
	//
	// After the statement execution has `SUCCEEDED`, this request can be used to
	// fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is
	// typically fetched with :method:statementexecution/executeStatement or
	// :method:statementexecution/getStatement, this request can be used to fetch
	// subsequent chunks. The response structure is identical to the nested `result`
	// element described in the :method:statementexecution/getStatement request, and
	// similarly includes the `next_chunk_index` and `next_chunk_internal_link`
	// fields for simple iteration through the result set.
	GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error)

	// Get result chunk by index.
	//
	// After the statement execution has `SUCCEEDED`, this request can be used to
	// fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is
	// typically fetched with :method:statementexecution/executeStatement or
	// :method:statementexecution/getStatement, this request can be used to fetch
	// subsequent chunks. The response structure is identical to the nested `result`
	// element described in the :method:statementexecution/getStatement request, and
	// similarly includes the `next_chunk_index` and `next_chunk_internal_link`
	// fields for simple iteration through the result set.
	GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error)
	// contains filtered or unexported methods
}

type StatementExecutionService added in v0.3.0

type StatementExecutionService interface {

	// Cancel statement execution.
	//
	// Requests that an executing statement be canceled. Callers must poll for
	// status to see the terminal state.
	CancelExecution(ctx context.Context, request CancelExecutionRequest) error

	// Execute a SQL statement.
	ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error)

	// Get status, manifest, and result first chunk.
	//
	// This request can be used to poll for the statement's status. When the
	// `status.state` field is `SUCCEEDED` it will also return the result
	// manifest and the first chunk of the result data. When the statement is in
	// the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200
	// with the state set. After at least 12 hours in terminal state, the
	// statement is removed from the warehouse and further calls will receive an
	// HTTP 404 response.
	//
	// **NOTE** This call currently might take up to 5 seconds to get the latest
	// status and result.
	GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error)

	// Get result chunk by index.
	//
	// After the statement execution has `SUCCEEDED`, this request can be used
	// to fetch any chunk by index. Whereas the first chunk with `chunk_index=0`
	// is typically fetched with :method:statementexecution/executeStatement or
	// :method:statementexecution/getStatement, this request can be used to
	// fetch subsequent chunks. The response structure is identical to the
	// nested `result` element described in the
	// :method:statementexecution/getStatement request, and similarly includes
	// the `next_chunk_index` and `next_chunk_internal_link` fields for simple
	// iteration through the result set.
	GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error)
}

The Databricks SQL Statement Execution API can be used to execute SQL statements on a SQL warehouse and fetch the result.

**Getting started**

We suggest beginning with the Databricks SQL Statement Execution API tutorial.

**Overview of statement execution and result fetching**

Statement execution begins by issuing a :method:statementexecution/executeStatement request with a valid SQL statement and warehouse ID, along with optional parameters such as the data catalog and output format. If no other parameters are specified, the server will wait for up to 10s before returning a response. If the statement has completed within this timespan, the response will include the result data as a JSON array and metadata. Otherwise, if no result is available after the 10s timeout expired, the response will provide the statement ID that can be used to poll for results by using a :method:statementexecution/getStatement request.

You can specify whether the call should behave synchronously, asynchronously or start synchronously with a fallback to asynchronous execution. This is controlled with the `wait_timeout` and `on_wait_timeout` settings. If `wait_timeout` is set between 5-50 seconds (default: 10s), the call waits for results up to the specified timeout; when set to `0s`, the call is asynchronous and responds immediately with a statement ID. The `on_wait_timeout` setting specifies what should happen when the timeout is reached while the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to `CANCEL`, which cancels the statement.

In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns directly with a statement ID. The status of the statement execution can be polled by issuing :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded, this call also returns the result and metadata in the response. - Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can be used to fetch status and results in the same way as in the asynchronous mode.

Depending on the size, the result can be split into multiple chunks. If the statement execution is successful, the statement response contains a manifest and the first chunk of the result. The manifest contains schema information and provides metadata for each chunk in the result. Result chunks can be retrieved by index with :method:statementexecution/getStatementResultChunkN which may be called in any order and in parallel. For sequential fetching, each chunk, apart from the last, also contains a `next_chunk_index` and `next_chunk_internal_link` that point to the next chunk.

A statement can be canceled with :method:statementexecution/cancelExecution.

**Fetching result data: format and disposition**

To specify the format of the result data, use the `format` field, which can be set to one of the following options: `JSON_ARRAY` (JSON), `ARROW_STREAM` (Apache Arrow Columnar), or `CSV`.

There are two ways to receive statement results, controlled by the `disposition` setting, which can be either `INLINE` or `EXTERNAL_LINKS`:

- `INLINE`: In this mode, the result data is directly included in the response. It's best suited for smaller results. This mode can only be used with the `JSON_ARRAY` format.

- `EXTERNAL_LINKS`: In this mode, the response provides links that can be used to download the result data in chunks separately. This approach is ideal for larger results and offers higher throughput. This mode can be used with all the formats: `JSON_ARRAY`, `ARROW_STREAM`, and `CSV`.

By default, the API uses `format=JSON_ARRAY` and `disposition=INLINE`.

**Limits and limitations**

Note: The byte limit for INLINE disposition is based on internal storage metrics and will not exactly match the byte count of the actual payload.

- Statements with `disposition=INLINE` are limited to 25 MiB and will fail when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` are limited to 100 GiB. Result sets larger than this limit will be truncated. Truncation is indicated by the `truncated` field in the result manifest. - The maximum query text size is 16 MiB. - Cancelation might silently fail. A successful response from a cancel request indicates that the cancel request was successfully received and sent to the processing engine. However, an outstanding statement might have already completed execution when the cancel request arrives. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for things such as caller delays and network latency from caller to service. - To guarantee that the statement is kept alive, you must poll at least once every 15 minutes. - The results are only available for one hour after success; polling does not extend this. - The SQL Execution API must be used for the entire lifecycle of the statement. For example, you cannot use the Jobs API to execute the command, and then the SQL Execution API to cancel it.

type StatementParameterListItem added in v0.18.0

type StatementParameterListItem struct {
	// The name of a parameter marker to be substituted in the statement.
	Name string `json:"name"`
	// The data type, given as a string. For example: `INT`, `STRING`,
	// `DECIMAL(10,2)`. If no type is given the type is assumed to be `STRING`.
	// Complex types, such as `ARRAY`, `MAP`, and `STRUCT` are not supported.
	// For valid types, refer to the section [Data types] of the SQL language
	// reference.
	//
	// [Data types]: https://docs.databricks.com/sql/language-manual/functions/cast.html
	Type string `json:"type,omitempty"`
	// The value to substitute, represented as a string. If omitted, the value
	// is interpreted as NULL.
	Value string `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (StatementParameterListItem) MarshalJSON added in v0.23.0

func (s StatementParameterListItem) MarshalJSON() ([]byte, error)

func (*StatementParameterListItem) UnmarshalJSON added in v0.23.0

func (s *StatementParameterListItem) UnmarshalJSON(b []byte) error

type StatementResponse added in v0.44.0

type StatementResponse struct {
	// The result manifest provides schema and metadata for the result set.
	Manifest *ResultManifest `json:"manifest,omitempty"`

	Result *ResultData `json:"result,omitempty"`
	// The statement ID is returned upon successfully submitting a SQL
	// statement, and is a required reference for all subsequent calls.
	StatementId string `json:"statement_id,omitempty"`
	// The status response includes execution state and if relevant, error
	// information.
	Status *StatementStatus `json:"status,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (StatementResponse) MarshalJSON added in v0.44.0

func (s StatementResponse) MarshalJSON() ([]byte, error)

func (*StatementResponse) UnmarshalJSON added in v0.44.0

func (s *StatementResponse) UnmarshalJSON(b []byte) error

type StatementState added in v0.3.0

type StatementState string

Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution failed; reason for failure described in accomanying error message - `CANCELED`: user canceled; can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution successful, and statement closed; result no longer available for fetch

const StatementStateCanceled StatementState = `CANCELED`

user canceled; can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL`

const StatementStateClosed StatementState = `CLOSED`

execution successful, and statement closed; result no longer available for fetch

const StatementStateFailed StatementState = `FAILED`

execution failed; reason for failure described in accomanying error message

const StatementStatePending StatementState = `PENDING`

waiting for warehouse

const StatementStateRunning StatementState = `RUNNING`

running

const StatementStateSucceeded StatementState = `SUCCEEDED`

execution was successful, result data available for fetch

func (*StatementState) Set added in v0.3.0

func (f *StatementState) Set(v string) error

Set raw string value and validate it against allowed values

func (*StatementState) String added in v0.3.0

func (f *StatementState) String() string

String representation for fmt.Print

func (*StatementState) Type added in v0.3.0

func (f *StatementState) Type() string

Type always returns StatementState to satisfy [pflag.Value] interface

type StatementStatus added in v0.3.0

type StatementStatus struct {
	Error *ServiceError `json:"error,omitempty"`
	// Statement execution state: - `PENDING`: waiting for warehouse -
	// `RUNNING`: running - `SUCCEEDED`: execution was successful, result data
	// available for fetch - `FAILED`: execution failed; reason for failure
	// described in accomanying error message - `CANCELED`: user canceled; can
	// come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL`
	// - `CLOSED`: execution successful, and statement closed; result no longer
	// available for fetch
	State StatementState `json:"state,omitempty"`
}

The status response includes execution state and if relevant, error information.

type Status

type Status string

Health status of the warehouse.

const StatusDegraded Status = `DEGRADED`
const StatusFailed Status = `FAILED`
const StatusHealthy Status = `HEALTHY`
const StatusStatusUnspecified Status = `STATUS_UNSPECIFIED`

func (*Status) Set

func (f *Status) Set(v string) error

Set raw string value and validate it against allowed values

func (*Status) String

func (f *Status) String() string

String representation for fmt.Print

func (*Status) Type

func (f *Status) Type() string

Type always returns Status to satisfy [pflag.Value] interface

type StopRequest

type StopRequest struct {
	// Required. Id of the SQL warehouse.
	Id string `json:"-" url:"-"`
}

Stop a warehouse

type StopWarehouseResponse added in v0.34.0

type StopWarehouseResponse struct {
}

type Success

type Success struct {
	Message SuccessMessage `json:"message,omitempty"`
}

type SuccessMessage

type SuccessMessage string
const SuccessMessageSuccess SuccessMessage = `Success`

func (*SuccessMessage) Set

func (f *SuccessMessage) Set(v string) error

Set raw string value and validate it against allowed values

func (*SuccessMessage) String

func (f *SuccessMessage) String() string

String representation for fmt.Print

func (*SuccessMessage) Type

func (f *SuccessMessage) Type() string

Type always returns SuccessMessage to satisfy [pflag.Value] interface

type TerminationReason

type TerminationReason struct {
	// status code indicating why the cluster was terminated
	Code TerminationReasonCode `json:"code,omitempty"`
	// list of parameters that provide additional information about why the
	// cluster was terminated
	Parameters map[string]string `json:"parameters,omitempty"`
	// type of the termination
	Type TerminationReasonType `json:"type,omitempty"`
}

type TerminationReasonCode

type TerminationReasonCode string

status code indicating why the cluster was terminated

const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED`
const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE`
const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE`
const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`
const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`
const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`
const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED`
const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE`
const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE`
const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE`
const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE`
const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`
const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION`
const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING`
const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING`
const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`
const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE`
const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE`
const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT`
const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`
const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE`
const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE`
const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT`
const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN`
const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST`
const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE`
const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE`
const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE`
const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY`
const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE`
const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE`
const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE`
const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY`
const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED`
const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED`
const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE`
const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE`
const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED`
const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY`
const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE`
const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE`
const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE`
const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR`
const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT`
const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE`
const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE`
const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED`
const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE`
const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`
const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY`
const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT`
const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE`
const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE`
const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE`
const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE`
const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED`
const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED`
const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR`
const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION`
const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE`
const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES`
const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD`
const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR`
const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE`
const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION`
const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE`
const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE`
const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE`
const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE`
const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED`
const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE`
const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN`
const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE`
const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE`
const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST`
const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE`
const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR`
const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR`

func (*TerminationReasonCode) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonCode) String

func (f *TerminationReasonCode) String() string

String representation for fmt.Print

func (*TerminationReasonCode) Type

func (f *TerminationReasonCode) Type() string

Type always returns TerminationReasonCode to satisfy [pflag.Value] interface

type TerminationReasonType

type TerminationReasonType string

type of the termination

const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR`
const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE`
const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT`
const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS`

func (*TerminationReasonType) Set

Set raw string value and validate it against allowed values

func (*TerminationReasonType) String

func (f *TerminationReasonType) String() string

String representation for fmt.Print

func (*TerminationReasonType) Type

func (f *TerminationReasonType) Type() string

Type always returns TerminationReasonType to satisfy [pflag.Value] interface

type TextValue added in v0.44.0

type TextValue struct {
	Value string `json:"value,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (TextValue) MarshalJSON added in v0.44.0

func (s TextValue) MarshalJSON() ([]byte, error)

func (*TextValue) UnmarshalJSON added in v0.44.0

func (s *TextValue) UnmarshalJSON(b []byte) error

type TimeRange

type TimeRange struct {
	// The end time in milliseconds.
	EndTimeMs int64 `json:"end_time_ms,omitempty" url:"end_time_ms,omitempty"`
	// The start time in milliseconds.
	StartTimeMs int64 `json:"start_time_ms,omitempty" url:"start_time_ms,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (TimeRange) MarshalJSON added in v0.23.0

func (s TimeRange) MarshalJSON() ([]byte, error)

func (*TimeRange) UnmarshalJSON added in v0.23.0

func (s *TimeRange) UnmarshalJSON(b []byte) error

type TransferOwnershipObjectId

type TransferOwnershipObjectId struct {
	// Email address for the new owner, who must exist in the workspace.
	NewOwner string `json:"new_owner,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (TransferOwnershipObjectId) MarshalJSON added in v0.23.0

func (s TransferOwnershipObjectId) MarshalJSON() ([]byte, error)

func (*TransferOwnershipObjectId) UnmarshalJSON added in v0.23.0

func (s *TransferOwnershipObjectId) UnmarshalJSON(b []byte) error

type TransferOwnershipRequest

type TransferOwnershipRequest struct {
	// Email address for the new owner, who must exist in the workspace.
	NewOwner string `json:"new_owner,omitempty"`
	// The ID of the object on which to change ownership.
	ObjectId TransferOwnershipObjectId `json:"-" url:"-"`
	// The type of object on which to change ownership.
	ObjectType OwnableObjectType `json:"-" url:"-"`

	ForceSendFields []string `json:"-"`
}

Transfer object ownership

func (TransferOwnershipRequest) MarshalJSON added in v0.23.0

func (s TransferOwnershipRequest) MarshalJSON() ([]byte, error)

func (*TransferOwnershipRequest) UnmarshalJSON added in v0.23.0

func (s *TransferOwnershipRequest) UnmarshalJSON(b []byte) error

type TrashAlertRequest added in v0.44.0

type TrashAlertRequest struct {
	Id string `json:"-" url:"-"`
}

Delete an alert

type TrashQueryRequest added in v0.44.0

type TrashQueryRequest struct {
	Id string `json:"-" url:"-"`
}

Delete a query

type UpdateAlertRequest added in v0.44.0

type UpdateAlertRequest struct {
	Alert *UpdateAlertRequestAlert `json:"alert,omitempty"`

	Id string `json:"-" url:"-"`
	// Field mask is required to be passed into the PATCH request. Field mask
	// specifies which fields of the setting payload will be updated. The field
	// mask needs to be supplied as single string. To specify multiple fields in
	// the field mask, use comma as the separator (no space).
	UpdateMask string `json:"update_mask"`
}

type UpdateAlertRequestAlert added in v0.44.0

type UpdateAlertRequestAlert struct {
	// Trigger conditions of the alert.
	Condition *AlertCondition `json:"condition,omitempty"`
	// Custom body of alert notification, if it exists. See [here] for custom
	// templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomBody string `json:"custom_body,omitempty"`
	// Custom subject of alert notification, if it exists. This can include
	// email subject entries and Slack notification headers, for example. See
	// [here] for custom templating instructions.
	//
	// [here]: https://docs.databricks.com/sql/user/alerts/index.html
	CustomSubject string `json:"custom_subject,omitempty"`
	// The display name of the alert.
	DisplayName string `json:"display_name,omitempty"`
	// Whether to notify alert subscribers when alert returns back to normal.
	NotifyOnOk bool `json:"notify_on_ok,omitempty"`
	// The owner's username. This field is set to "Unavailable" if the user has
	// been deleted.
	OwnerUserName string `json:"owner_user_name,omitempty"`
	// UUID of the query attached to the alert.
	QueryId string `json:"query_id,omitempty"`
	// Number of seconds an alert must wait after being triggered to rearm
	// itself. After rearming, it can be triggered again. If 0 or not specified,
	// the alert will not be triggered again.
	SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (UpdateAlertRequestAlert) MarshalJSON added in v0.44.0

func (s UpdateAlertRequestAlert) MarshalJSON() ([]byte, error)

func (*UpdateAlertRequestAlert) UnmarshalJSON added in v0.44.0

func (s *UpdateAlertRequestAlert) UnmarshalJSON(b []byte) error

type UpdateQueryRequest added in v0.44.0

type UpdateQueryRequest struct {
	Id string `json:"-" url:"-"`

	Query *UpdateQueryRequestQuery `json:"query,omitempty"`
	// Field mask is required to be passed into the PATCH request. Field mask
	// specifies which fields of the setting payload will be updated. The field
	// mask needs to be supplied as single string. To specify multiple fields in
	// the field mask, use comma as the separator (no space).
	UpdateMask string `json:"update_mask"`
}

type UpdateQueryRequestQuery added in v0.44.0

type UpdateQueryRequestQuery struct {
	// Whether to apply a 1000 row limit to the query result.
	ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"`
	// Name of the catalog where this query will be executed.
	Catalog string `json:"catalog,omitempty"`
	// General description that conveys additional information about this query
	// such as usage notes.
	Description string `json:"description,omitempty"`
	// Display name of the query that appears in list views, widget headings,
	// and on the query page.
	DisplayName string `json:"display_name,omitempty"`
	// Username of the user that owns the query.
	OwnerUserName string `json:"owner_user_name,omitempty"`
	// List of query parameter definitions.
	Parameters []QueryParameter `json:"parameters,omitempty"`
	// Text of the query to be run.
	QueryText string `json:"query_text,omitempty"`
	// Sets the "Run as" role for the object.
	RunAsMode RunAsMode `json:"run_as_mode,omitempty"`
	// Name of the schema where this query will be executed.
	Schema string `json:"schema,omitempty"`

	Tags []string `json:"tags,omitempty"`
	// ID of the SQL warehouse attached to the query.
	WarehouseId string `json:"warehouse_id,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (UpdateQueryRequestQuery) MarshalJSON added in v0.44.0

func (s UpdateQueryRequestQuery) MarshalJSON() ([]byte, error)

func (*UpdateQueryRequestQuery) UnmarshalJSON added in v0.44.0

func (s *UpdateQueryRequestQuery) UnmarshalJSON(b []byte) error

type UpdateResponse added in v0.34.0

type UpdateResponse struct {
}

type UpdateVisualizationRequest added in v0.44.0

type UpdateVisualizationRequest struct {
	Id string `json:"-" url:"-"`
	// Field mask is required to be passed into the PATCH request. Field mask
	// specifies which fields of the setting payload will be updated. The field
	// mask needs to be supplied as single string. To specify multiple fields in
	// the field mask, use comma as the separator (no space).
	UpdateMask string `json:"update_mask"`

	Visualization *UpdateVisualizationRequestVisualization `json:"visualization,omitempty"`
}

type UpdateVisualizationRequestVisualization added in v0.44.0

type UpdateVisualizationRequestVisualization struct {
	// The display name of the visualization.
	DisplayName string `json:"display_name,omitempty"`
	// The visualization options varies widely from one visualization type to
	// the next and is unsupported. Databricks does not recommend modifying
	// visualization options directly.
	SerializedOptions string `json:"serialized_options,omitempty"`
	// The visualization query plan varies widely from one visualization type to
	// the next and is unsupported. Databricks does not recommend modifying the
	// visualization query plan directly.
	SerializedQueryPlan string `json:"serialized_query_plan,omitempty"`
	// The type of visualization: counter, table, funnel, and so on.
	Type string `json:"type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (UpdateVisualizationRequestVisualization) MarshalJSON added in v0.44.0

func (s UpdateVisualizationRequestVisualization) MarshalJSON() ([]byte, error)

func (*UpdateVisualizationRequestVisualization) UnmarshalJSON added in v0.44.0

func (s *UpdateVisualizationRequestVisualization) UnmarshalJSON(b []byte) error

type User

type User struct {
	Email string `json:"email,omitempty"`

	Id int `json:"id,omitempty"`

	Name string `json:"name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (User) MarshalJSON added in v0.23.0

func (s User) MarshalJSON() ([]byte, error)

func (*User) UnmarshalJSON added in v0.23.0

func (s *User) UnmarshalJSON(b []byte) error

type Visualization

type Visualization struct {
	// The timestamp indicating when the visualization was created.
	CreateTime string `json:"create_time,omitempty"`
	// The display name of the visualization.
	DisplayName string `json:"display_name,omitempty"`
	// UUID identifying the visualization.
	Id string `json:"id,omitempty"`
	// UUID of the query that the visualization is attached to.
	QueryId string `json:"query_id,omitempty"`
	// The visualization options varies widely from one visualization type to
	// the next and is unsupported. Databricks does not recommend modifying
	// visualization options directly.
	SerializedOptions string `json:"serialized_options,omitempty"`
	// The visualization query plan varies widely from one visualization type to
	// the next and is unsupported. Databricks does not recommend modifying the
	// visualization query plan directly.
	SerializedQueryPlan string `json:"serialized_query_plan,omitempty"`
	// The type of visualization: counter, table, funnel, and so on.
	Type string `json:"type,omitempty"`
	// The timestamp indicating when the visualization was updated.
	UpdateTime string `json:"update_time,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Visualization) MarshalJSON added in v0.23.0

func (s Visualization) MarshalJSON() ([]byte, error)

func (*Visualization) UnmarshalJSON added in v0.23.0

func (s *Visualization) UnmarshalJSON(b []byte) error

type WaitGetWarehouseRunning added in v0.10.0

type WaitGetWarehouseRunning[R any] struct {
	Response *R
	Id       string `json:"id"`
	Poll     func(time.Duration, func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)
	// contains filtered or unexported fields
}

WaitGetWarehouseRunning is a wrapper that calls WarehousesAPI.WaitGetWarehouseRunning and waits to reach RUNNING state.

func (*WaitGetWarehouseRunning[R]) Get added in v0.10.0

Get the GetWarehouseResponse with the default timeout of 20 minutes.

func (*WaitGetWarehouseRunning[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWarehouseRunning[R]) GetWithTimeout(timeout time.Duration) (*GetWarehouseResponse, error)

Get the GetWarehouseResponse with custom timeout.

func (*WaitGetWarehouseRunning[R]) OnProgress added in v0.10.0

func (w *WaitGetWarehouseRunning[R]) OnProgress(callback func(*GetWarehouseResponse)) *WaitGetWarehouseRunning[R]

OnProgress invokes a callback every time it polls for the status update.

type WaitGetWarehouseStopped added in v0.10.0

type WaitGetWarehouseStopped[R any] struct {
	Response *R
	Id       string `json:"id"`
	Poll     func(time.Duration, func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)
	// contains filtered or unexported fields
}

WaitGetWarehouseStopped is a wrapper that calls WarehousesAPI.WaitGetWarehouseStopped and waits to reach STOPPED state.

func (*WaitGetWarehouseStopped[R]) Get added in v0.10.0

Get the GetWarehouseResponse with the default timeout of 20 minutes.

func (*WaitGetWarehouseStopped[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWarehouseStopped[R]) GetWithTimeout(timeout time.Duration) (*GetWarehouseResponse, error)

Get the GetWarehouseResponse with custom timeout.

func (*WaitGetWarehouseStopped[R]) OnProgress added in v0.10.0

func (w *WaitGetWarehouseStopped[R]) OnProgress(callback func(*GetWarehouseResponse)) *WaitGetWarehouseStopped[R]

OnProgress invokes a callback every time it polls for the status update.

type WarehouseAccessControlRequest added in v0.15.0

type WarehouseAccessControlRequest struct {
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Permission level
	PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"`
	// application ID of a service principal
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehouseAccessControlRequest) MarshalJSON added in v0.23.0

func (s WarehouseAccessControlRequest) MarshalJSON() ([]byte, error)

func (*WarehouseAccessControlRequest) UnmarshalJSON added in v0.23.0

func (s *WarehouseAccessControlRequest) UnmarshalJSON(b []byte) error

type WarehouseAccessControlResponse added in v0.15.0

type WarehouseAccessControlResponse struct {
	// All permissions.
	AllPermissions []WarehousePermission `json:"all_permissions,omitempty"`
	// Display name of the user or service principal.
	DisplayName string `json:"display_name,omitempty"`
	// name of the group
	GroupName string `json:"group_name,omitempty"`
	// Name of the service principal.
	ServicePrincipalName string `json:"service_principal_name,omitempty"`
	// name of the user
	UserName string `json:"user_name,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehouseAccessControlResponse) MarshalJSON added in v0.23.0

func (s WarehouseAccessControlResponse) MarshalJSON() ([]byte, error)

func (*WarehouseAccessControlResponse) UnmarshalJSON added in v0.23.0

func (s *WarehouseAccessControlResponse) UnmarshalJSON(b []byte) error

type WarehousePermission added in v0.15.0

type WarehousePermission struct {
	Inherited bool `json:"inherited,omitempty"`

	InheritedFromObject []string `json:"inherited_from_object,omitempty"`
	// Permission level
	PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehousePermission) MarshalJSON added in v0.23.0

func (s WarehousePermission) MarshalJSON() ([]byte, error)

func (*WarehousePermission) UnmarshalJSON added in v0.23.0

func (s *WarehousePermission) UnmarshalJSON(b []byte) error

type WarehousePermissionLevel added in v0.15.0

type WarehousePermissionLevel string

Permission level

const WarehousePermissionLevelCanManage WarehousePermissionLevel = `CAN_MANAGE`
const WarehousePermissionLevelCanMonitor WarehousePermissionLevel = `CAN_MONITOR`
const WarehousePermissionLevelCanUse WarehousePermissionLevel = `CAN_USE`
const WarehousePermissionLevelIsOwner WarehousePermissionLevel = `IS_OWNER`

func (*WarehousePermissionLevel) Set added in v0.15.0

Set raw string value and validate it against allowed values

func (*WarehousePermissionLevel) String added in v0.15.0

func (f *WarehousePermissionLevel) String() string

String representation for fmt.Print

func (*WarehousePermissionLevel) Type added in v0.15.0

func (f *WarehousePermissionLevel) Type() string

Type always returns WarehousePermissionLevel to satisfy [pflag.Value] interface

type WarehousePermissions added in v0.15.0

type WarehousePermissions struct {
	AccessControlList []WarehouseAccessControlResponse `json:"access_control_list,omitempty"`

	ObjectId string `json:"object_id,omitempty"`

	ObjectType string `json:"object_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehousePermissions) MarshalJSON added in v0.23.0

func (s WarehousePermissions) MarshalJSON() ([]byte, error)

func (*WarehousePermissions) UnmarshalJSON added in v0.23.0

func (s *WarehousePermissions) UnmarshalJSON(b []byte) error

type WarehousePermissionsDescription added in v0.15.0

type WarehousePermissionsDescription struct {
	Description string `json:"description,omitempty"`
	// Permission level
	PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehousePermissionsDescription) MarshalJSON added in v0.23.0

func (s WarehousePermissionsDescription) MarshalJSON() ([]byte, error)

func (*WarehousePermissionsDescription) UnmarshalJSON added in v0.23.0

func (s *WarehousePermissionsDescription) UnmarshalJSON(b []byte) error

type WarehousePermissionsRequest added in v0.15.0

type WarehousePermissionsRequest struct {
	AccessControlList []WarehouseAccessControlRequest `json:"access_control_list,omitempty"`
	// The SQL warehouse for which to get or manage permissions.
	WarehouseId string `json:"-" url:"-"`
}

type WarehouseTypePair

type WarehouseTypePair struct {
	// If set to false the specific warehouse type will not be be allowed as a
	// value for warehouse_type in CreateWarehouse and EditWarehouse
	Enabled bool `json:"enabled,omitempty"`
	// Warehouse type: `PRO` or `CLASSIC`.
	WarehouseType WarehouseTypePairWarehouseType `json:"warehouse_type,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WarehouseTypePair) MarshalJSON added in v0.23.0

func (s WarehouseTypePair) MarshalJSON() ([]byte, error)

func (*WarehouseTypePair) UnmarshalJSON added in v0.23.0

func (s *WarehouseTypePair) UnmarshalJSON(b []byte) error

type WarehouseTypePairWarehouseType added in v0.9.0

type WarehouseTypePairWarehouseType string

Warehouse type: `PRO` or `CLASSIC`.

const WarehouseTypePairWarehouseTypeClassic WarehouseTypePairWarehouseType = `CLASSIC`
const WarehouseTypePairWarehouseTypePro WarehouseTypePairWarehouseType = `PRO`
const WarehouseTypePairWarehouseTypeTypeUnspecified WarehouseTypePairWarehouseType = `TYPE_UNSPECIFIED`

func (*WarehouseTypePairWarehouseType) Set added in v0.9.0

Set raw string value and validate it against allowed values

func (*WarehouseTypePairWarehouseType) String added in v0.9.0

String representation for fmt.Print

func (*WarehouseTypePairWarehouseType) Type added in v0.9.0

Type always returns WarehouseTypePairWarehouseType to satisfy [pflag.Value] interface

type WarehousesAPI

type WarehousesAPI struct {
	// contains filtered or unexported fields
}

A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. Compute resources are infrastructure resources that provide processing capabilities in the cloud.

func NewWarehouses

func NewWarehouses(client *client.DatabricksClient) *WarehousesAPI

func (*WarehousesAPI) Create

Create a warehouse.

Creates a new SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
	Tags: &sql.EndpointTags{
		CustomTags: []sql.EndpointTagPair{sql.EndpointTagPair{
			Key:   "Owner",
			Value: "eng-dev-ecosystem-team_at_databricks.com",
		}},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) CreateAndWait deprecated

func (a *WarehousesAPI) CreateAndWait(ctx context.Context, createWarehouseRequest CreateWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Create and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Create.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) Delete

func (a *WarehousesAPI) Delete(ctx context.Context, request DeleteWarehouseRequest) error

func (*WarehousesAPI) DeleteById

func (a *WarehousesAPI) DeleteById(ctx context.Context, id string) error

Delete a warehouse.

Deletes a SQL warehouse.

func (*WarehousesAPI) Edit

func (a *WarehousesAPI) Edit(ctx context.Context, editWarehouseRequest EditWarehouseRequest) (*WaitGetWarehouseRunning[struct{}], error)

Update a warehouse.

Updates the configuration for a SQL warehouse.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
	Tags: &sql.EndpointTags{
		CustomTags: []sql.EndpointTagPair{sql.EndpointTagPair{
			Key:   "Owner",
			Value: "eng-dev-ecosystem-team_at_databricks.com",
		}},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = w.Warehouses.Edit(ctx, sql.EditWarehouseRequest{
	Id:             created.Id,
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
})
if err != nil {
	panic(err)
}

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) EditAndWait deprecated

func (a *WarehousesAPI) EditAndWait(ctx context.Context, editWarehouseRequest EditWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Edit and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Edit.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) EndpointInfoNameToIdMap

func (a *WarehousesAPI) EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error)

EndpointInfoNameToIdMap calls WarehousesAPI.ListAll and creates a map of results with EndpointInfo.Name as key and EndpointInfo.Id as value.

Returns an error if there's more than one EndpointInfo with the same .Name.

Note: All EndpointInfo instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) Get

func (a *WarehousesAPI) Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error)
Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

created, err := w.Warehouses.CreateAndWait(ctx, sql.CreateWarehouseRequest{
	Name:           fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	ClusterSize:    "2X-Small",
	MaxNumClusters: 1,
	AutoStopMins:   10,
	Tags: &sql.EndpointTags{
		CustomTags: []sql.EndpointTagPair{sql.EndpointTagPair{
			Key:   "Owner",
			Value: "eng-dev-ecosystem-team_at_databricks.com",
		}},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

wh, err := w.Warehouses.GetById(ctx, created.Id)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", wh)

// cleanup

err = w.Warehouses.DeleteById(ctx, created.Id)
if err != nil {
	panic(err)
}
Output:

func (*WarehousesAPI) GetById

Get warehouse info.

Gets the information for a single SQL warehouse.

func (*WarehousesAPI) GetByName

func (a *WarehousesAPI) GetByName(ctx context.Context, name string) (*EndpointInfo, error)

GetByName calls WarehousesAPI.EndpointInfoNameToIdMap and returns a single EndpointInfo.

Returns an error if there's more than one EndpointInfo with the same .Name.

Note: All EndpointInfo instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) GetPermissionLevels added in v0.19.0

func (a *WarehousesAPI) GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error)

func (*WarehousesAPI) GetPermissionLevelsByWarehouseId added in v0.19.0

func (a *WarehousesAPI) GetPermissionLevelsByWarehouseId(ctx context.Context, warehouseId string) (*GetWarehousePermissionLevelsResponse, error)

Get SQL warehouse permission levels.

Gets the permission levels that a user can have on an object.

func (*WarehousesAPI) GetPermissions added in v0.19.0

func (a *WarehousesAPI) GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error)

func (*WarehousesAPI) GetPermissionsByWarehouseId added in v0.19.0

func (a *WarehousesAPI) GetPermissionsByWarehouseId(ctx context.Context, warehouseId string) (*WarehousePermissions, error)

Get SQL warehouse permissions.

Gets the permissions of a SQL warehouse. SQL warehouses can inherit permissions from their root object.

func (*WarehousesAPI) GetWorkspaceWarehouseConfig

func (a *WarehousesAPI) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error)

func (*WarehousesAPI) List added in v0.24.0

List warehouses.

Lists all SQL warehouses that a user has manager permissions on.

This method is generated by Databricks SDK Code Generator.

func (*WarehousesAPI) ListAll

func (a *WarehousesAPI) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error)

List warehouses.

Lists all SQL warehouses that a user has manager permissions on.

This method is generated by Databricks SDK Code Generator.

Example (SqlWarehouses)
ctx := context.Background()
w, err := databricks.NewWorkspaceClient()
if err != nil {
	panic(err)
}

all, err := w.Warehouses.ListAll(ctx, sql.ListWarehousesRequest{})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)
Output:

func (*WarehousesAPI) SetPermissions added in v0.19.0

func (a *WarehousesAPI) SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)

func (*WarehousesAPI) SetWorkspaceWarehouseConfig

func (a *WarehousesAPI) SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error

func (*WarehousesAPI) Start

func (a *WarehousesAPI) Start(ctx context.Context, startRequest StartRequest) (*WaitGetWarehouseRunning[struct{}], error)

Start a warehouse.

Starts a SQL warehouse.

func (*WarehousesAPI) StartAndWait deprecated

func (a *WarehousesAPI) StartAndWait(ctx context.Context, startRequest StartRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Start and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Start.Get() or WarehousesAPI.WaitGetWarehouseRunning

func (*WarehousesAPI) Stop

func (a *WarehousesAPI) Stop(ctx context.Context, stopRequest StopRequest) (*WaitGetWarehouseStopped[struct{}], error)

Stop a warehouse.

Stops a SQL warehouse.

func (*WarehousesAPI) StopAndWait deprecated

func (a *WarehousesAPI) StopAndWait(ctx context.Context, stopRequest StopRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

Calls WarehousesAPI.Stop and waits to reach STOPPED state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.

Deprecated: use WarehousesAPI.Stop.Get() or WarehousesAPI.WaitGetWarehouseStopped

func (*WarehousesAPI) UpdatePermissions added in v0.19.0

func (a *WarehousesAPI) UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)

func (*WarehousesAPI) WaitGetWarehouseRunning added in v0.10.0

func (a *WarehousesAPI) WaitGetWarehouseRunning(ctx context.Context, id string,
	timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

WaitGetWarehouseRunning repeatedly calls WarehousesAPI.Get and waits to reach RUNNING state

func (*WarehousesAPI) WaitGetWarehouseStopped added in v0.10.0

func (a *WarehousesAPI) WaitGetWarehouseStopped(ctx context.Context, id string,
	timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

WaitGetWarehouseStopped repeatedly calls WarehousesAPI.Get and waits to reach STOPPED state

type WarehousesInterface added in v0.29.0

type WarehousesInterface interface {

	// WaitGetWarehouseRunning repeatedly calls [WarehousesAPI.Get] and waits to reach RUNNING state
	WaitGetWarehouseRunning(ctx context.Context, id string,
		timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

	// WaitGetWarehouseStopped repeatedly calls [WarehousesAPI.Get] and waits to reach STOPPED state
	WaitGetWarehouseStopped(ctx context.Context, id string,
		timeout time.Duration, callback func(*GetWarehouseResponse)) (*GetWarehouseResponse, error)

	// Create a warehouse.
	//
	// Creates a new SQL warehouse.
	Create(ctx context.Context, createWarehouseRequest CreateWarehouseRequest) (*WaitGetWarehouseRunning[CreateWarehouseResponse], error)

	// Calls [WarehousesAPIInterface.Create] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.
	//
	// Deprecated: use [WarehousesAPIInterface.Create].Get() or [WarehousesAPIInterface.WaitGetWarehouseRunning]
	CreateAndWait(ctx context.Context, createWarehouseRequest CreateWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

	// Delete a warehouse.
	//
	// Deletes a SQL warehouse.
	Delete(ctx context.Context, request DeleteWarehouseRequest) error

	// Delete a warehouse.
	//
	// Deletes a SQL warehouse.
	DeleteById(ctx context.Context, id string) error

	// Update a warehouse.
	//
	// Updates the configuration for a SQL warehouse.
	Edit(ctx context.Context, editWarehouseRequest EditWarehouseRequest) (*WaitGetWarehouseRunning[struct{}], error)

	// Calls [WarehousesAPIInterface.Edit] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.
	//
	// Deprecated: use [WarehousesAPIInterface.Edit].Get() or [WarehousesAPIInterface.WaitGetWarehouseRunning]
	EditAndWait(ctx context.Context, editWarehouseRequest EditWarehouseRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

	// Get warehouse info.
	//
	// Gets the information for a single SQL warehouse.
	Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error)

	// Get warehouse info.
	//
	// Gets the information for a single SQL warehouse.
	GetById(ctx context.Context, id string) (*GetWarehouseResponse, error)

	// Get SQL warehouse permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error)

	// Get SQL warehouse permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevelsByWarehouseId(ctx context.Context, warehouseId string) (*GetWarehousePermissionLevelsResponse, error)

	// Get SQL warehouse permissions.
	//
	// Gets the permissions of a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error)

	// Get SQL warehouse permissions.
	//
	// Gets the permissions of a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	GetPermissionsByWarehouseId(ctx context.Context, warehouseId string) (*WarehousePermissions, error)

	// Get the workspace configuration.
	//
	// Gets the workspace level configuration that is shared by all SQL warehouses
	// in a workspace.
	GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error)

	// List warehouses.
	//
	// Lists all SQL warehouses that a user has manager permissions on.
	//
	// This method is generated by Databricks SDK Code Generator.
	List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo]

	// List warehouses.
	//
	// Lists all SQL warehouses that a user has manager permissions on.
	//
	// This method is generated by Databricks SDK Code Generator.
	ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error)

	// EndpointInfoNameToIdMap calls [WarehousesAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value.
	//
	// Returns an error if there's more than one [EndpointInfo] with the same .Name.
	//
	// Note: All [EndpointInfo] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error)

	// GetByName calls [WarehousesAPI.EndpointInfoNameToIdMap] and returns a single [EndpointInfo].
	//
	// Returns an error if there's more than one [EndpointInfo] with the same .Name.
	//
	// Note: All [EndpointInfo] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByName(ctx context.Context, name string) (*EndpointInfo, error)

	// Set SQL warehouse permissions.
	//
	// Sets permissions on an object, replacing existing permissions if they exist.
	// Deletes all direct permissions if none are specified. Objects can inherit
	// permissions from their root object.
	SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)

	// Set the workspace configuration.
	//
	// Sets the workspace level configuration that is shared by all SQL warehouses
	// in a workspace.
	SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error

	// Start a warehouse.
	//
	// Starts a SQL warehouse.
	Start(ctx context.Context, startRequest StartRequest) (*WaitGetWarehouseRunning[struct{}], error)

	// Calls [WarehousesAPIInterface.Start] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.
	//
	// Deprecated: use [WarehousesAPIInterface.Start].Get() or [WarehousesAPIInterface.WaitGetWarehouseRunning]
	StartAndWait(ctx context.Context, startRequest StartRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

	// Stop a warehouse.
	//
	// Stops a SQL warehouse.
	Stop(ctx context.Context, stopRequest StopRequest) (*WaitGetWarehouseStopped[struct{}], error)

	// Calls [WarehousesAPIInterface.Stop] and waits to reach STOPPED state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[GetWarehouseResponse](60*time.Minute) functional option.
	//
	// Deprecated: use [WarehousesAPIInterface.Stop].Get() or [WarehousesAPIInterface.WaitGetWarehouseStopped]
	StopAndWait(ctx context.Context, stopRequest StopRequest, options ...retries.Option[GetWarehouseResponse]) (*GetWarehouseResponse, error)

	// Update SQL warehouse permissions.
	//
	// Updates the permissions on a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)
}

type WarehousesService

type WarehousesService interface {

	// Create a warehouse.
	//
	// Creates a new SQL warehouse.
	Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error)

	// Delete a warehouse.
	//
	// Deletes a SQL warehouse.
	Delete(ctx context.Context, request DeleteWarehouseRequest) error

	// Update a warehouse.
	//
	// Updates the configuration for a SQL warehouse.
	Edit(ctx context.Context, request EditWarehouseRequest) error

	// Get warehouse info.
	//
	// Gets the information for a single SQL warehouse.
	Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error)

	// Get SQL warehouse permission levels.
	//
	// Gets the permission levels that a user can have on an object.
	GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error)

	// Get SQL warehouse permissions.
	//
	// Gets the permissions of a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error)

	// Get the workspace configuration.
	//
	// Gets the workspace level configuration that is shared by all SQL
	// warehouses in a workspace.
	GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error)

	// List warehouses.
	//
	// Lists all SQL warehouses that a user has manager permissions on.
	//
	// Use ListAll() to get all EndpointInfo instances
	List(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error)

	// Set SQL warehouse permissions.
	//
	// Sets permissions on an object, replacing existing permissions if they
	// exist. Deletes all direct permissions if none are specified. Objects can
	// inherit permissions from their root object.
	SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)

	// Set the workspace configuration.
	//
	// Sets the workspace level configuration that is shared by all SQL
	// warehouses in a workspace.
	SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error

	// Start a warehouse.
	//
	// Starts a SQL warehouse.
	Start(ctx context.Context, request StartRequest) error

	// Stop a warehouse.
	//
	// Stops a SQL warehouse.
	Stop(ctx context.Context, request StopRequest) error

	// Update SQL warehouse permissions.
	//
	// Updates the permissions on a SQL warehouse. SQL warehouses can inherit
	// permissions from their root object.
	UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error)
}

A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL. Compute resources are infrastructure resources that provide processing capabilities in the cloud.

type Widget

type Widget struct {
	// The unique ID for this widget.
	Id string `json:"id,omitempty"`

	Options *WidgetOptions `json:"options,omitempty"`
	// The visualization description API changes frequently and is unsupported.
	// You can duplicate a visualization by copying description objects received
	// _from the API_ and then using them to create a new one with a POST
	// request to the same endpoint. Databricks does not recommend constructing
	// ad-hoc visualizations entirely in JSON.
	Visualization *LegacyVisualization `json:"visualization,omitempty"`
	// Unused field.
	Width int `json:"width,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (Widget) MarshalJSON added in v0.23.0

func (s Widget) MarshalJSON() ([]byte, error)

func (*Widget) UnmarshalJSON added in v0.23.0

func (s *Widget) UnmarshalJSON(b []byte) error

type WidgetOptions

type WidgetOptions struct {
	// Timestamp when this object was created
	CreatedAt string `json:"created_at,omitempty"`
	// Custom description of the widget
	Description string `json:"description,omitempty"`
	// Whether this widget is hidden on the dashboard.
	IsHidden bool `json:"isHidden,omitempty"`
	// How parameters used by the visualization in this widget relate to other
	// widgets on the dashboard. Databricks does not recommend modifying this
	// definition in JSON.
	ParameterMappings any `json:"parameterMappings,omitempty"`
	// Coordinates of this widget on a dashboard. This portion of the API
	// changes frequently and is unsupported.
	Position *WidgetPosition `json:"position,omitempty"`
	// Custom title of the widget
	Title string `json:"title,omitempty"`
	// Timestamp of the last time this object was updated.
	UpdatedAt string `json:"updated_at,omitempty"`

	ForceSendFields []string `json:"-"`
}

func (WidgetOptions) MarshalJSON added in v0.23.0

func (s WidgetOptions) MarshalJSON() ([]byte, error)

func (*WidgetOptions) UnmarshalJSON added in v0.23.0

func (s *WidgetOptions) UnmarshalJSON(b []byte) error

type WidgetPosition added in v0.19.0

type WidgetPosition struct {
	// reserved for internal use
	AutoHeight bool `json:"autoHeight,omitempty"`
	// column in the dashboard grid. Values start with 0
	Col int `json:"col,omitempty"`
	// row in the dashboard grid. Values start with 0
	Row int `json:"row,omitempty"`
	// width of the widget measured in dashboard grid cells
	SizeX int `json:"sizeX,omitempty"`
	// height of the widget measured in dashboard grid cells
	SizeY int `json:"sizeY,omitempty"`

	ForceSendFields []string `json:"-"`
}

Coordinates of this widget on a dashboard. This portion of the API changes frequently and is unsupported.

func (WidgetPosition) MarshalJSON added in v0.23.0

func (s WidgetPosition) MarshalJSON() ([]byte, error)

func (*WidgetPosition) UnmarshalJSON added in v0.23.0

func (s *WidgetPosition) UnmarshalJSON(b []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL