Documentation ¶
Index ¶
- Constants
- func DataSourceCluster() common.Resource
- func DataSourceClusterZones() common.Resource
- func DataSourceClusters() common.Resource
- func DataSourceNodeType() common.Resource
- func DataSourceSparkVersion() common.Resource
- func FixInstancePoolChangeIfAny(d *schema.ResourceData, cluster any) error
- func LatestSparkVersionOrDefault(ctx context.Context, w *databricks.WorkspaceClient, ...) string
- func ModifyRequestOnInstancePool(cluster any) error
- func ResourceCluster() common.Resource
- func ResourceLibrary() common.Resource
- func SetForceSendFieldsForCluster(cluster any, d *schema.ResourceData) error
- func SparkConfDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool
- func StartClusterAndGetInfo(ctx context.Context, w *databricks.WorkspaceClient, clusterID string) (*compute.ClusterDetails, error)
- func ZoneDiffSuppress(k, old, new string, d *schema.ResourceData) bool
- type AbfssStorageInfo
- type AutoScale
- type Availability
- type AwsAttributes
- type AzureAttributes
- type AzureDiskVolumeType
- type Cluster
- type ClusterEvent
- type ClusterEventType
- type ClusterID
- type ClusterInfo
- type ClusterList
- type ClusterSize
- type ClusterSpec
- type ClusterState
- type ClustersAPI
- func (a ClustersAPI) Context() context.Context
- func (a ClustersAPI) Create(cluster Cluster) (info ClusterInfo, err error)
- func (a ClustersAPI) Edit(cluster Cluster) (info ClusterInfo, err error)
- func (a ClustersAPI) Events(eventsRequest EventsRequest) ([]ClusterEvent, error)
- func (a ClustersAPI) Get(clusterID string) (ci ClusterInfo, err error)
- func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error)
- func (a ClustersAPI) GetSmallestNodeType(request compute.NodeTypeRequest) string
- func (a ClustersAPI) List() ([]ClusterInfo, error)
- func (a ClustersAPI) ListZones() (ZonesInfo, error)
- func (a ClustersAPI) PermanentDelete(clusterID string) error
- func (a ClustersAPI) Pin(clusterID string) error
- func (a ClustersAPI) Resize(resizeRequest ResizeRequest) (info ClusterInfo, err error)
- func (a ClustersAPI) Start(clusterID string) error
- func (a ClustersAPI) StartAndGetInfo(clusterID string) (ClusterInfo, error)
- func (a ClustersAPI) Terminate(clusterID string) error
- func (a ClustersAPI) Unpin(clusterID string) error
- func (a ClustersAPI) WorkspaceClient() *databricks.WorkspaceClient
- type DbfsStorageInfo
- type DockerBasicAuth
- type DockerImage
- type EbsVolumeType
- type EventDetails
- type EventsRequest
- type EventsResponse
- type GcpAttributes
- type GcsStorageInfo
- type InitScriptStorageInfo
- type LibraryResource
- type LibraryWithAlias
- type LocalFileInfo
- type LogSyncStatus
- type MountInfo
- type NetworkFileSystemInfo
- type ResizeCause
- type ResizeRequest
- type S3StorageInfo
- type SortOrder
- type SparkNode
- type SparkNodeAwsAttributes
- type StorageInfo
- type TerminationReason
- type WorkloadType
- type WorkloadTypeClients
- type WorkspaceFileInfo
- type ZonesInfo
Constants ¶
const ( // AwsAvailabilitySpot is spot instance type for clusters AwsAvailabilitySpot = "SPOT" // AwsAvailabilityOnDemand is OnDemand instance type for clusters AwsAvailabilityOnDemand = "ON_DEMAND" // AwsAvailabilitySpotWithFallback is Spot instance type for clusters with option // to fallback into on-demand if instance cannot be acquired AwsAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK" )
const ( // AzureAvailabilitySpot is spot instance type for clusters AzureAvailabilitySpot = "SPOT_AZURE" // AzureAvailabilityOnDemand is OnDemand instance type for clusters AzureAvailabilityOnDemand = "ON_DEMAND_AZURE" // AzureAvailabilitySpotWithFallback is Spot instance type for clusters with option // to fallback into on-demand if instance cannot be acquired AzureAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK_AZURE" )
https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#--azureavailability
const ( // GcpAvailabilityPreemptible is Preemptible instance type for clusters GcpAvailabilityPreemptible = "PREEMPTIBLE_GCP" // GcpAvailabilityOnDemand is OnDemand instance type for clusters GcpAvailabilityOnDemand = "ON_DEMAND_GCP" // GcpAvailabilityPreemptible is Preemptible instance type for clusters with option // to fallback into on-demand if instance cannot be acquired GcpAvailabilityPreemptibleWithFallback = "PREEMPTIBLE_WITH_FALLBACK_GCP" )
https://docs.gcp.databricks.com/dev-tools/api/latest/clusters.html#gcpavailability
const ( // AzureDiskVolumeTypeStandard is for standard local redundant storage AzureDiskVolumeTypeStandard = "STANDARD_LRS" // AzureDiskVolumeTypePremium is for premium local redundant storage AzureDiskVolumeTypePremium = "PREMIUM_LRS" )
const ( // EbsVolumeTypeGeneralPurposeSsd is general purpose ssd (starts at 32 gb) EbsVolumeTypeGeneralPurposeSsd = "GENERAL_PURPOSE_SSD" // EbsVolumeTypeThroughputOptimizedHdd is throughput optimized hdd (starts at 500 gb) EbsVolumeTypeThroughputOptimizedHdd = "THROUGHPUT_OPTIMIZED_HDD" )
const ( // ClusterStatePending Indicates that a cluster is in the process of being created. ClusterStatePending = "PENDING" // ClusterStateRunning Indicates that a cluster has been started and is ready for use. ClusterStateRunning = "RUNNING" // ClusterStateRestarting Indicates that a cluster is in the process of restarting. ClusterStateRestarting = "RESTARTING" // ClusterStateResizing Indicates that a cluster is in the process of adding or removing nodes. ClusterStateResizing = "RESIZING" // ClusterStateTerminating Indicates that a cluster is in the process of being destroyed. ClusterStateTerminating = "TERMINATING" // ClusterStateTerminated Indicates that a cluster has been successfully destroyed. ClusterStateTerminated = "TERMINATED" // ClusterStateError This state is not used anymore. It was used to indicate a cluster // that failed to be created. Terminating and Terminated are used instead. ClusterStateError = "ERROR" // ClusterStateUnknown Indicates that a cluster is in an unknown state. A cluster should never be in this state. ClusterStateUnknown = "UNKNOWN" )
const DbfsDeprecationWarning = "For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'."
const DefaultProvisionTimeout = 30 * time.Minute
Variables ¶
This section is empty.
Functions ¶
func DataSourceCluster ¶ added in v1.1.0
func DataSourceClusterZones ¶
DataSourceClusterZones ...
func DataSourceClusters ¶
func DataSourceNodeType ¶
DataSourceNodeType returns smallest node depedning on the cloud
func DataSourceSparkVersion ¶
DataSourceSparkVersion returns DBR version matching to the specification
func FixInstancePoolChangeIfAny ¶ added in v1.40.0
func FixInstancePoolChangeIfAny(d *schema.ResourceData, cluster any) error
This method is a duplicate of FixInstancePoolChangeIfAny(d *schema.ResourceData) in clusters/clusters_api.go that uses Go SDK. Long term, FixInstancePoolChangeIfAny(d *schema.ResourceData) in clusters_api.go will be removed once all the resources using clusters are migrated to Go SDK. https://github.com/databricks/terraform-provider-databricks/issues/824
func LatestSparkVersionOrDefault ¶ added in v1.49.1
func LatestSparkVersionOrDefault(ctx context.Context, w *databricks.WorkspaceClient, svr compute.SparkVersionRequest) string
LatestSparkVersionOrDefault returns Spark version matching the definition, or default in case of error
func ModifyRequestOnInstancePool ¶ added in v1.40.0
This method is a duplicate of ModifyRequestOnInstancePool() in clusters/clusters_api.go that uses Go SDK. Long term, ModifyRequestOnInstancePool() in clusters_api.go will be removed once all the resources using clusters are migrated to Go SDK.
func ResourceCluster ¶
func ResourceLibrary ¶
func SetForceSendFieldsForCluster ¶ added in v1.47.0
func SetForceSendFieldsForCluster(cluster any, d *schema.ResourceData) error
func SparkConfDiffSuppressFunc ¶
func SparkConfDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool
func StartClusterAndGetInfo ¶ added in v1.31.0
func StartClusterAndGetInfo(ctx context.Context, w *databricks.WorkspaceClient, clusterID string) (*compute.ClusterDetails, error)
StartAndGetInfo starts cluster and returns info
func ZoneDiffSuppress ¶
func ZoneDiffSuppress(k, old, new string, d *schema.ResourceData) bool
Types ¶
type AbfssStorageInfo ¶ added in v1.7.0
type AbfssStorageInfo struct {
Destination string `json:"destination"`
}
AbfssStorageInfo contains the struct for when storing files in ADLS
type AutoScale ¶
type AutoScale struct { MinWorkers int32 `json:"min_workers,omitempty"` MaxWorkers int32 `json:"max_workers,omitempty"` }
AutoScale is a struct the describes auto scaling for clusters
type Availability ¶
type Availability string
Availability is a type for describing AWS availability on cluster nodes
type AwsAttributes ¶
type AwsAttributes struct { FirstOnDemand int32 `json:"first_on_demand,omitempty"` Availability Availability `json:"availability,omitempty"` ZoneID string `json:"zone_id,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"` EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"` EbsVolumeCount int32 `json:"ebs_volume_count,omitempty"` EbsVolumeSize int32 `json:"ebs_volume_size,omitempty"` }
AwsAttributes encapsulates the aws attributes for aws based clusters https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterattributes
type AzureAttributes ¶
type AzureAttributes struct { FirstOnDemand int32 `json:"first_on_demand,omitempty"` Availability Availability `json:"availability,omitempty"` SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` }
AzureAttributes encapsulates the Azure attributes for Azure based clusters https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/clusters#clusterazureattributes
type AzureDiskVolumeType ¶
type AzureDiskVolumeType string
AzureDiskVolumeType is disk type on azure vms
type Cluster ¶
type Cluster struct { ClusterID string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` NumWorkers int32 `json:"num_workers" tf:"group:size"` Autoscale *AutoScale `json:"autoscale,omitempty" tf:"group:size"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty" tf:"computed"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty" tf:"computed"` NodeTypeID string `json:"node_type_id,omitempty" tf:"group:node_type,computed"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty" tf:"group:node_type,computed"` InstancePoolID string `json:"instance_pool_id,omitempty" tf:"group:node_type"` DriverInstancePoolID string `json:"driver_instance_pool_id,omitempty" tf:"group:node_type,computed"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty" tf:"conflicts:instance_pool_id,suppress_diff"` AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty" tf:"conflicts:instance_pool_id,suppress_diff"` GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty" tf:"conflicts:instance_pool_id,suppress_diff"` AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` PolicyID string `json:"policy_id,omitempty"` ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` SparkConf map[string]string `json:"spark_conf,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty" tf:"max_items:10"` InitScripts []InitScriptStorageInfo `json:"init_scripts,omitempty" tf:"max_items:10"` ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` DataSecurityMode string `json:"data_security_mode,omitempty" tf:"suppress_diff"` SingleUserName string `json:"single_user_name,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty" tf:"force_new"` WorkloadType *WorkloadType `json:"workload_type,omitempty"` RuntimeEngine string `json:"runtime_engine,omitempty"` ClusterMounts []MountInfo `json:"cluster_mount_infos,omitempty" tf:"alias:cluster_mount_info"` }
Cluster contains the information when trying to submit api calls or editing a cluster
func (*Cluster) FixInstancePoolChangeIfAny ¶ added in v1.23.0
func (cluster *Cluster) FixInstancePoolChangeIfAny(d *schema.ResourceData)
TODO: Remove this once all the resources using clusters are migrated to Go SDK. They would then be using FixInstancePoolChangeIfAny(d *schema.ResourceData, cluster compute.CreateCluster) defined in resource_cluster.go that is a duplicate of this method but uses Go SDK. https://github.com/databricks/terraform-provider-databricks/issues/824
func (*Cluster) ModifyRequestOnInstancePool ¶
func (cluster *Cluster) ModifyRequestOnInstancePool()
TODO: Remove this once all the resources using clusters are migrated to Go SDK. They would then be using ModifyRequestOnInstancePool(cluster *compute.CreateCluster) defined in resource_cluster.go that is a duplicate of this method but uses Go SDK. ModifyRequestOnInstancePool helps remove all request fields that should not be submitted when instance pool is selected.
type ClusterEvent ¶
type ClusterEvent struct { ClusterID string `json:"cluster_id"` Timestamp int64 `json:"timestamp"` Type ClusterEventType `json:"type"` Details EventDetails `json:"details"` }
ClusterEvent - event information https://docs.databricks.com/dev-tools/api/latest/clusters.html#clustereventsclusterevent
type ClusterEventType ¶
type ClusterEventType string
ClusterEventType - constants for API
const ( EvTypeCreating ClusterEventType = "CREATING" EvTypeDidNotExpandDisk ClusterEventType = "DID_NOT_EXPAND_DISK" EvTypeExpandedDisk ClusterEventType = "EXPANDED_DISK" EvTypeFailedToExpandDisk ClusterEventType = "FAILED_TO_EXPAND_DISK" EvTypeInitScriptsStarting ClusterEventType = "INIT_SCRIPTS_STARTING" EvTypeInitScriptsFinished ClusterEventType = "INIT_SCRIPTS_FINISHED" EvTypeStarting ClusterEventType = "STARTING" EvTypeRestarting ClusterEventType = "RESTARTING" EvTypeTerminating ClusterEventType = "TERMINATING" EvTypeEdited ClusterEventType = "EDITED" EvTypeRunning ClusterEventType = "RUNNING" EvTypeResizing ClusterEventType = "RESIZING" EvTypeUpsizeCompleted ClusterEventType = "UPSIZE_COMPLETED" EvTypeNodesLost ClusterEventType = "NODES_LOST" EvTypeDriverHealthy ClusterEventType = "DRIVER_HEALTHY" EvTypeSparkException ClusterEventType = "SPARK_EXCEPTION" EvTypeDriverNotResponding ClusterEventType = "DRIVER_NOT_RESPONDING" EvTypeDbfsDown ClusterEventType = "DBFS_DOWN" EvTypeMetastoreDown ClusterEventType = "METASTORE_DOWN" EvTypeNodeBlacklisted ClusterEventType = "NODE_BLACKLISTED" EvTypePinned ClusterEventType = "PINNED" EvTypeUnpinned ClusterEventType = "UNPINNED" )
Constants for Event Types
type ClusterID ¶
type ClusterID struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}
ClusterID holds cluster ID
type ClusterInfo ¶
type ClusterInfo struct { NumWorkers int32 `json:"num_workers,omitempty"` AutoScale *AutoScale `json:"autoscale,omitempty"` ClusterID string `json:"cluster_id,omitempty"` CreatorUserName string `json:"creator_user_name,omitempty"` Driver *SparkNode `json:"driver,omitempty"` Executors []SparkNode `json:"executors,omitempty"` SparkContextID int64 `json:"spark_context_id,omitempty"` JdbcPort int32 `json:"jdbc_port,omitempty"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` SparkConf map[string]string `json:"spark_conf,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` NodeTypeID string `json:"node_type_id,omitempty"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` InitScripts []InitScriptStorageInfo `json:"init_scripts,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` InstancePoolID string `json:"instance_pool_id,omitempty"` DriverInstancePoolID string `json:"driver_instance_pool_id,omitempty" tf:"computed"` PolicyID string `json:"policy_id,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` ClusterSource Availability `json:"cluster_source" tf:"computed"` DockerImage *DockerImage `json:"docker_image,omitempty"` State ClusterState `json:"state"` StateMessage string `json:"state_message,omitempty"` StartTime int64 `json:"start_time,omitempty"` TerminateTime int64 `json:"terminate_time,omitempty"` LastStateLossTime int64 `json:"last_state_loss_time,omitempty"` LastActivityTime int64 `json:"last_activity_time,omitempty"` ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"` ClusterCores float64 `json:"cluster_cores,omitempty"` DefaultTags map[string]string `json:"default_tags"` ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"` TerminationReason *TerminationReason `json:"termination_reason,omitempty"` DataSecurityMode string `json:"data_security_mode,omitempty"` RuntimeEngine string `json:"runtime_engine,omitempty"` }
ClusterInfo contains the information when getting cluster info from the get request.
func (*ClusterInfo) IsRunningOrResizing ¶
func (ci *ClusterInfo) IsRunningOrResizing() bool
IsRunningOrResizing returns true if cluster is running or resizing
type ClusterList ¶
type ClusterList struct {
Clusters []ClusterInfo `json:"clusters,omitempty"`
}
ClusterList shows existing clusters
type ClusterSize ¶
type ClusterSize struct { NumWorkers int32 `json:"num_workers"` AutoScale *AutoScale `json:"autoscale"` }
ClusterSize is structure to keep https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclustersize
type ClusterSpec ¶ added in v1.36.3
type ClusterSpec struct { compute.ClusterSpec LibraryWithAlias }
func (ClusterSpec) CustomizeSchema ¶ added in v1.36.3
func (ClusterSpec) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema
func (ClusterSpec) CustomizeSchemaResourceSpecific ¶ added in v1.43.0
func (ClusterSpec) CustomizeSchemaResourceSpecific(s *common.CustomizableSchema) *common.CustomizableSchema
type ClusterState ¶
type ClusterState string
ClusterState is for describing possible cluster states
func (ClusterState) CanReach ¶
func (state ClusterState) CanReach(desired ClusterState) bool
CanReach returns true if cluster state can reach desired state
type ClustersAPI ¶
type ClustersAPI struct {
// contains filtered or unexported fields
}
ClustersAPI is a struct that contains the Databricks api client to perform queries
func NewClustersAPI ¶
func NewClustersAPI(ctx context.Context, m any) ClustersAPI
NewClustersAPI creates ClustersAPI instance from provider meta
func (ClustersAPI) Context ¶ added in v1.49.1
func (a ClustersAPI) Context() context.Context
Temporary function to be used until all resources are migrated to Go SDK Return a context
func (ClustersAPI) Create ¶
func (a ClustersAPI) Create(cluster Cluster) (info ClusterInfo, err error)
Create creates a new Spark cluster and waits till it's running
func (ClustersAPI) Edit ¶
func (a ClustersAPI) Edit(cluster Cluster) (info ClusterInfo, err error)
Edit edits the configuration of a cluster to match the provided attributes and size
func (ClustersAPI) Events ¶
func (a ClustersAPI) Events(eventsRequest EventsRequest) ([]ClusterEvent, error)
Events - only using Cluster ID string to get all events https://docs.databricks.com/dev-tools/api/latest/clusters.html#events
func (ClustersAPI) Get ¶
func (a ClustersAPI) Get(clusterID string) (ci ClusterInfo, err error)
Get retrieves the information for a cluster given its identifier
func (ClustersAPI) GetOrCreateRunningCluster ¶
func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error)
GetOrCreateRunningCluster creates an autoterminating cluster if it doesn't exist
func (ClustersAPI) GetSmallestNodeType ¶
func (a ClustersAPI) GetSmallestNodeType(request compute.NodeTypeRequest) string
func (ClustersAPI) List ¶
func (a ClustersAPI) List() ([]ClusterInfo, error)
List return information about all pinned clusters, currently active clusters, up to 70 of the most recently terminated interactive clusters in the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days
func (ClustersAPI) ListZones ¶
func (a ClustersAPI) ListZones() (ZonesInfo, error)
ListZones returns the zones info sent by the cloud service provider
func (ClustersAPI) PermanentDelete ¶
func (a ClustersAPI) PermanentDelete(clusterID string) error
PermanentDelete permanently delete a cluster
func (ClustersAPI) Pin ¶
func (a ClustersAPI) Pin(clusterID string) error
Pin ensure that an interactive cluster configuration is retained even after a cluster has been terminated for more than 30 days
func (ClustersAPI) Resize ¶ added in v1.2.1
func (a ClustersAPI) Resize(resizeRequest ResizeRequest) (info ClusterInfo, err error)
Resize api can only be used when the cluster is in Running State
func (ClustersAPI) Start ¶
func (a ClustersAPI) Start(clusterID string) error
Start a terminated Spark cluster given its ID and wait till it's running
func (ClustersAPI) StartAndGetInfo ¶
func (a ClustersAPI) StartAndGetInfo(clusterID string) (ClusterInfo, error)
StartAndGetInfo starts cluster and returns info
func (ClustersAPI) Terminate ¶
func (a ClustersAPI) Terminate(clusterID string) error
Terminate terminates a Spark cluster given its ID
func (ClustersAPI) Unpin ¶
func (a ClustersAPI) Unpin(clusterID string) error
Unpin allows the cluster to eventually be removed from the list returned by the List API
func (ClustersAPI) WorkspaceClient ¶ added in v1.49.1
func (a ClustersAPI) WorkspaceClient() *databricks.WorkspaceClient
Temporary function to be used until all resources are migrated to Go SDK Create a workspace client
type DbfsStorageInfo ¶
type DbfsStorageInfo struct {
Destination string `json:"destination"`
}
DbfsStorageInfo contains the destination string for DBFS
type DockerBasicAuth ¶
type DockerBasicAuth struct { Username string `json:"username"` Password string `json:"password" tf:"sensitive"` }
DockerBasicAuth contains the auth information when fetching containers
type DockerImage ¶
type DockerImage struct { URL string `json:"url"` BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` }
DockerImage contains the image url and the auth for DCS
type EventDetails ¶
type EventDetails struct { CurrentNumWorkers int32 `json:"current_num_workers,omitempty"` TargetNumWorkers int32 `json:"target_num_workers,omitempty"` PreviousAttributes *AwsAttributes `json:"previous_attributes,omitempty"` Attributes *AwsAttributes `json:"attributes,omitempty"` PreviousClusterSize *ClusterSize `json:"previous_cluster_size,omitempty"` ClusterSize *ClusterSize `json:"cluster_size,omitempty"` ResizeCause *ResizeCause `json:"cause,omitempty"` Reason *TerminationReason `json:"reason,omitempty"` User string `json:"user"` }
EventDetails - details about specific events https://docs.databricks.com/dev-tools/api/latest/clusters.html#clustereventseventdetails
type EventsRequest ¶
type EventsRequest struct { ClusterID string `json:"cluster_id"` StartTime int64 `json:"start_time,omitempty"` EndTime int64 `json:"end_time,omitempty"` Order SortOrder `json:"order,omitempty"` EventTypes []ClusterEventType `json:"event_types,omitempty"` Offset int64 `json:"offset,omitempty"` Limit int64 `json:"limit,omitempty"` MaxItems uint `json:"-"` }
EventsRequest - request structure https://docs.databricks.com/dev-tools/api/latest/clusters.html#request-structure
type EventsResponse ¶
type EventsResponse struct { Events []ClusterEvent `json:"events"` NextPage *EventsRequest `json:"next_page"` TotalCount int64 `json:"total_count"` }
EventsResponse - answer from API https://docs.databricks.com/dev-tools/api/latest/clusters.html#response-structure
type GcpAttributes ¶
type GcpAttributes struct { UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` GoogleServiceAccount string `json:"google_service_account,omitempty"` Availability Availability `json:"availability,omitempty"` BootDiskSize int32 `json:"boot_disk_size,omitempty"` ZoneId string `json:"zone_id,omitempty"` LocalSsdCount int32 `json:"local_ssd_count,omitempty"` }
GcpAttributes encapsultes GCP specific attributes https://docs.gcp.databricks.com/dev-tools/api/latest/clusters.html#clustergcpattributes
type GcsStorageInfo ¶
type GcsStorageInfo struct {
Destination string `json:"destination"`
}
GcsStorageInfo contains the struct for when storing files in GCS
type InitScriptStorageInfo ¶
type InitScriptStorageInfo struct { Dbfs *DbfsStorageInfo `json:"dbfs,omitempty" tf:"group:storage"` Gcs *GcsStorageInfo `json:"gcs,omitempty" tf:"group:storage"` S3 *S3StorageInfo `json:"s3,omitempty" tf:"group:storage"` Abfss *AbfssStorageInfo `json:"abfss,omitempty" tf:"group:storage"` File *LocalFileInfo `json:"file,omitempty"` Workspace *WorkspaceFileInfo `json:"workspace,omitempty"` Volumes *compute.VolumesStorageInfo `json:"volumes,omitempty"` }
InitScriptStorageInfo captures the allowed sources of init scripts.
type LibraryResource ¶ added in v1.39.0
func (LibraryResource) CustomizeSchema ¶ added in v1.39.0
func (LibraryResource) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema
func (LibraryResource) CustomizeSchemaResourceSpecific ¶ added in v1.43.0
func (LibraryResource) CustomizeSchemaResourceSpecific(s *common.CustomizableSchema) *common.CustomizableSchema
type LibraryWithAlias ¶ added in v1.48.0
type LocalFileInfo ¶
type LocalFileInfo struct {
Destination string `json:"destination"`
}
LocalFileInfo represents a local file on disk, e.g. in a customer's container.
type LogSyncStatus ¶
type LogSyncStatus struct { LastAttempted int64 `json:"last_attempted,omitempty"` LastException string `json:"last_exception,omitempty"` }
LogSyncStatus encapsulates when the cluster logs were last delivered.
type MountInfo ¶ added in v1.9.1
type MountInfo struct { NetworkFileSystemInfo *NetworkFileSystemInfo `json:"network_filesystem_info"` RemoteMountDirectory string `json:"remote_mount_dir_path,omitempty"` LocalMountDirectory string `json:"local_mount_dir_path"` }
MountInfo provides configuration to mount a network file system
type NetworkFileSystemInfo ¶ added in v1.9.1
type NetworkFileSystemInfo struct { ServerAddress string `json:"server_address"` MountOptions string `json:"mount_options,omitempty"` }
NetworkFileSystemInfo contains information about network file system server
type ResizeRequest ¶ added in v1.2.1
type S3StorageInfo ¶
type S3StorageInfo struct { // TODO: add instance profile validation check + prefix validation Destination string `json:"destination"` Region string `json:"region,omitempty" tf:"group:location"` Endpoint string `json:"endpoint,omitempty" tf:"group:location"` EnableEncryption bool `json:"enable_encryption,omitempty"` EncryptionType string `json:"encryption_type,omitempty"` KmsKey string `json:"kms_key,omitempty"` CannedACL string `json:"canned_acl,omitempty"` }
S3StorageInfo contains the struct for when storing files in S3
type SortOrder ¶
type SortOrder string
SortOrder - constants for API https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlistorder
type SparkNode ¶
type SparkNode struct { PrivateIP string `json:"private_ip,omitempty"` PublicDNS string `json:"public_dns,omitempty"` NodeID string `json:"node_id,omitempty"` InstanceID string `json:"instance_id,omitempty"` StartTimestamp int64 `json:"start_timestamp,omitempty"` NodeAwsAttributes *SparkNodeAwsAttributes `json:"node_aws_attributes,omitempty"` HostPrivateIP string `json:"host_private_ip,omitempty"` }
SparkNode encapsulates all the attributes of a node that is part of a databricks cluster
type SparkNodeAwsAttributes ¶
type SparkNodeAwsAttributes struct {
IsSpot bool `json:"is_spot,omitempty"`
}
SparkNodeAwsAttributes is the struct that determines if the node is a spot instance or not
type StorageInfo ¶
type StorageInfo struct { Dbfs *DbfsStorageInfo `json:"dbfs,omitempty" tf:"group:storage"` S3 *S3StorageInfo `json:"s3,omitempty" tf:"group:storage"` }
StorageInfo contains the struct for either DBFS or S3 storage depending on which one is relevant.
type TerminationReason ¶
type TerminationReason struct { Code string `json:"code,omitempty"` Type string `json:"type,omitempty"` Parameters map[string]string `json:"parameters,omitempty"` }
TerminationReason encapsulates the termination code and potential parameters
type WorkloadType ¶ added in v1.1.0
type WorkloadType struct {
Clients *WorkloadTypeClients `json:"clients"`
}
WorkloadType defines which workloads may run on the cluster
type WorkloadTypeClients ¶ added in v1.1.0
type WorkspaceFileInfo ¶ added in v1.15.0
type WorkspaceFileInfo struct {
Destination string `json:"destination"`
}
WorkspaceFileInfo represents a file in the Databricks workspace.