Documentation ¶
Overview ¶
Package databricks is a Go SDK for the Databricks REST API v 2.0
Index ¶
- Constants
- func ParseResponse(resp *http.Response, out interface{}) error
- func TokenClient(ctx context.Context, token string) *http.Client
- type ACLItem
- type ACLPermission
- type AWSAvailability
- type AddBlockToDBFSStreamInput
- type AddBlockToDBFSStreamOutput
- type AddMemberInput
- type AddMemberOutput
- type Autoscale
- type AwsAttributes
- type CancelJobRunInput
- type CancelJobRunOutput
- type Client
- type CloseDBFSStreamInput
- type CloseDBFSStreamOutput
- type Cluster
- type ClusterEvent
- type ClusterEventType
- type ClusterIDParam
- type ClusterInstance
- type ClusterLibrariesParam
- type ClusterLibraryStatus
- type ClusterLogConf
- type ClusterLogSyncStatus
- type ClusterParams
- type ClusterSize
- type ClusterSource
- type ClusterSpec
- type ClusterState
- type ClusterTag
- type ClustersService
- func (s *ClustersService) CreateCluster(in *CreateClusterInput) (*CreateClusterOutput, error)
- func (s *ClustersService) DeleteCluster(in *DeleteClusterInput) (*DeleteClusterOutput, error)
- func (s *ClustersService) DescribeCluster(in *DescribeClusterInput) (*DescribeClusterOutput, error)
- func (s *ClustersService) EditCluster(in *EditClusterInput) (*EditClusterOutput, error)
- func (s *ClustersService) ListClusterEvents(in *ListClusterEventsInput) (*ListClusterEventsOutput, error)
- func (s *ClustersService) ListClusters(in *ListClustersInput) (*ListClustersOutput, error)
- func (s *ClustersService) ListNodeTypes(in *ListNodeTypesInput) (*ListNodeTypesOutput, error)
- func (s *ClustersService) ListRuntimeVersions(in *ListRuntimeVersionsInput) (*ListRuntimeVersionsOutput, error)
- func (s *ClustersService) ListZones(in *ListZonesInput) (*ListZonesOutput, error)
- func (s *ClustersService) PinCluster(in *PinClusterInput) (*PinClusterOutput, error)
- func (s *ClustersService) ResizeCluster(in *ResizeClusterInput) (*ResizeClusterOutput, error)
- func (s *ClustersService) RestartCluster(in *RestartClusterInput) (*RestartClusterOutput, error)
- func (s *ClustersService) StartCluster(in *StartClusterInput) (*StartClusterOutput, error)
- func (s *ClustersService) TerminateCluster(in *TerminateClusterInput) (*TerminateClusterOutput, error)
- func (s *ClustersService) UnpinCluster(in *UnpinClusterInput) (*UnpinClusterOutput, error)
- type CreateACLInput
- type CreateACLOutput
- type CreateClusterInput
- type CreateClusterOutput
- type CreateDBFSDirectoryInput
- type CreateDBFSDirectoryOutput
- type CreateDBFSStreamInput
- type CreateDBFSStreamOutput
- type CreateDirectoryInput
- type CreateDirectoryOutput
- type CreateGroupInput
- type CreateGroupOutput
- type CreateJobInput
- type CreateJobOutput
- type CreatePoolInput
- type CreatePoolOutput
- type CreateScopeInput
- type CreateScopeOutput
- type CreateSecretInput
- type CreateSecretOutput
- type CreateTokenInput
- type CreateTokenOutput
- type DBC
- type DBFSService
- func (s *DBFSService) AddBlockToDBFSStream(in *AddBlockToDBFSStreamInput) (*AddBlockToDBFSStreamOutput, error)
- func (s *DBFSService) CloseDBFSStream(in *CloseDBFSStreamInput) (*CloseDBFSStreamOutput, error)
- func (s *DBFSService) CreateDBFSDirectory(in *CreateDBFSDirectoryInput) (*CreateDBFSDirectoryOutput, error)
- func (s *DBFSService) CreateDBFSStream(in *CreateDBFSStreamInput) (*CreateDBFSStreamOutput, error)
- func (s *DBFSService) DeleteDBFSFilepath(in *DeleteDBFSFilepathInput) (*DeleteDBFSFilepathOutput, error)
- func (s *DBFSService) DescribeDBFSFilepath(in *DescribeDBFSFilepathInput) (*DescribeDBFSFilepathOutput, error)
- func (s *DBFSService) GetDBFSFilepathStatus(in *GetDBFSFilepathStatusInput) (*GetDBFSFilepathStatusOutput, error)
- func (s *DBFSService) MoveDBFSFilepath(in *MoveDBFSFilepathInput) (*MoveDBFSFilepathOutput, error)
- func (s *DBFSService) ReadDBFSFile(in *ReadDBFSFileInput) (*ReadDBFSFileOutput, error)
- func (s *DBFSService) WriteDBFSFile(in *WriteDBFSFileInput) (*WriteDBFSFileOutput, error)
- type DBFSStorageInfo
- type Decoder
- type DeleteACLInput
- type DeleteACLOutput
- type DeleteClusterInput
- type DeleteClusterOutput
- type DeleteDBFSFilepathInput
- type DeleteDBFSFilepathOutput
- type DeleteGroupInput
- type DeleteGroupOutput
- type DeleteJobInput
- type DeleteJobOutput
- type DeleteJobRunInput
- type DeleteJobRunOutput
- type DeleteNodeInput
- type DeleteNodeOutput
- type DeletePoolInput
- type DeletePoolOutput
- type DeleteScopeInput
- type DeleteScopeOutput
- type DeleteSecretInput
- type DeleteSecretOutput
- type DeregisterProfileInput
- type DeregisterProfileOutput
- type DescribeClusterInput
- type DescribeClusterOutput
- type DescribeDBFSFilepathInput
- type DescribeDBFSFilepathOutput
- type DescribeJobInput
- type DescribeJobOutput
- type DescribeJobRunInput
- type DescribeJobRunOutput
- type DescribeJobRunOutputInput
- type DescribeJobRunOutputOutput
- type DescribeLibraryStatusInput
- type DescribeLibraryStatusOutput
- type DescribeNodeInput
- type DescribeNodeOutput
- type DiskSpec
- type DiskType
- type DockerBasicAuth
- type DockerImage
- type EBSVolumeType
- type EditClusterInput
- type EditClusterOutput
- type EditPoolInput
- type EditPoolOutput
- type EmailNotifications
- type Encoder
- type Error
- type EventDetails
- type ExportFormat
- type ExportJobRunInput
- type ExportJobRunOutput
- type ExportNodeInput
- type ExportNodeOutput
- type GetACLInput
- type GetACLOutput
- type GetDBFSFilepathStatusInput
- type GetDBFSFilepathStatusOutput
- type GetPoolInput
- type GetPoolOutput
- type GroupsService
- func (s *GroupsService) AddMember(in *AddMemberInput) (*AddMemberOutput, error)
- func (s *GroupsService) CreateGroup(in *CreateGroupInput) (*CreateGroupOutput, error)
- func (s *GroupsService) DeleteGroup(in *DeleteGroupInput) (*DeleteGroupOutput, error)
- func (s *GroupsService) ListGroups(in *ListGroupsInput) (*ListGroupsOutput, error)
- func (s *GroupsService) ListMembers(in *ListMembersInput) (*ListMembersOutput, error)
- func (s *GroupsService) ListParents(in *ListParentsInput) (*ListParentsOutput, error)
- func (s *GroupsService) RemoveMember(in *RemoveMemberInput) (*RemoveMemberOutput, error)
- type HandleParam
- type ImportNodeInput
- type ImportNodeOutput
- type InitScriptInfo
- type InstallLibraryInput
- type InstallLibraryOutput
- type InstancePoolAWSAttributes
- type InstancePoolAndStats
- type InstancePoolIDParam
- type InstancePoolMutableParams
- type InstancePoolState
- type InstancePoolStats
- type InstancePoolStatus
- type InstanceProfileARNParam
- type Job
- type JobIDParam
- type JobSettings
- type JobTask
- type JobsAWSAttributes
- type JobsService
- func (s *JobsService) CancelJobRun(in *CancelJobRunInput) (*CancelJobRunOutput, error)
- func (s *JobsService) CreateJob(in *CreateJobInput) (*CreateJobOutput, error)
- func (s *JobsService) DeleteJob(in *DeleteJobInput) (*DeleteJobOutput, error)
- func (s *JobsService) DeleteJobRun(in *DeleteJobRunInput) (*DeleteJobRunOutput, error)
- func (s *JobsService) DescribeJob(in *DescribeJobInput) (*DescribeJobOutput, error)
- func (s *JobsService) DescribeJobRun(in *DescribeJobRunInput) (*DescribeJobRunOutput, error)
- func (s *JobsService) DescribeJobRunOutput(in *DescribeJobRunOutputInput) (*DescribeJobRunOutputOutput, error)
- func (s *JobsService) ExportJobRun(in *ExportJobRunInput) (*ExportJobRunOutput, error)
- func (s *JobsService) ListJobRuns(in *ListJobRunsInput) (*ListJobRunsOutput, error)
- func (s *JobsService) ListJobs(in *ListJobsInput) (*ListJobsOutput, error)
- func (s *JobsService) ResetJob(in *ResetJobInput) (*ResetJobOutput, error)
- func (s *JobsService) RunJob(in *RunJobInput) (*RunJobOutput, error)
- func (s *JobsService) SubmitJobRun(in *SubmitJobRunInput) (*SubmitJobRunOutput, error)
- type Language
- type LibrariesService
- func (s *LibrariesService) DescribeLibraryStatus(in *DescribeLibraryStatusInput) (*DescribeLibraryStatusOutput, error)
- func (s *LibrariesService) InstallLibrary(in *InstallLibraryInput) (*InstallLibraryOutput, error)
- func (s *LibrariesService) ListLibraryStatuses(in *ListLibraryStatusesInput) (*ListLibraryStatusesOutput, error)
- func (s *LibrariesService) UninstallLibrary(in *UninstallLibraryInput) (*UninstallLibraryOutput, error)
- type Library
- type LibraryFullStatus
- type LibraryInstallStatus
- type ListACLsInput
- type ListACLsOutput
- type ListClusterEventsInput
- type ListClusterEventsOutput
- type ListClustersInput
- type ListClustersOutput
- type ListGroupsInput
- type ListGroupsOutput
- type ListJobRunsInput
- type ListJobRunsOutput
- type ListJobsInput
- type ListJobsOutput
- type ListLibraryStatusesInput
- type ListLibraryStatusesOutput
- type ListMembersInput
- type ListMembersOutput
- type ListNodeTypesInput
- type ListNodeTypesOutput
- type ListObjectsInput
- type ListObjectsOutput
- type ListOrder
- type ListParentsInput
- type ListParentsOutput
- type ListPoolsInput
- type ListPoolsOutput
- type ListProfilesInput
- type ListProfilesOutput
- type ListRuntimeVersionsInput
- type ListRuntimeVersionsOutput
- type ListScopesInput
- type ListScopesOutput
- type ListSecretsInput
- type ListSecretsOutput
- type ListTokensInput
- type ListTokensOutput
- type ListZonesInput
- type ListZonesOutput
- type MavenLibrary
- type MoveDBFSFilepathInput
- type MoveDBFSFilepathOutput
- type NewCluster
- type NodeInstanceType
- type NodeType
- type NotebookOutput
- type NotebookTask
- type ObjectInfo
- type ObjectType
- type ParameterPair
- type PathParam
- type PendingInstanceError
- type PinClusterInput
- type PinClusterOutput
- type PoolsService
- func (s *PoolsService) CreatePool(in *CreatePoolInput) (*CreatePoolOutput, error)
- func (s *PoolsService) DeletePool(in *DeletePoolInput) (*DeletePoolOutput, error)
- func (s *PoolsService) EditPool(in *EditPoolInput) (*EditPoolOutput, error)
- func (s *PoolsService) GetPool(in *GetPoolInput) (*GetPoolOutput, error)
- func (s *PoolsService) ListPools(in *ListPoolsInput) (*ListPoolsOutput, error)
- type Principal
- type ProfilesService
- func (s *ProfilesService) DeregisterProfile(in *DeregisterProfileInput) (*DeregisterProfileOutput, error)
- func (s *ProfilesService) ListInstanceProfiles(in *ListProfilesInput) (*ListProfilesOutput, error)
- func (s *ProfilesService) RegisterProfile(in *RegisterProfileInput) (*RegisterProfileOutput, error)
- type PythonPyLibrary
- type RCranLibrary
- type ReadDBFSFileInput
- type ReadDBFSFileOutput
- type RegisterProfileInput
- type RegisterProfileOutput
- type RemoveMemberInput
- type RemoveMemberOutput
- type ResetJobInput
- type ResetJobOutput
- type ResizeCause
- type ResizeClusterInput
- type ResizeClusterOutput
- type RestartClusterInput
- type RestartClusterOutput
- type RevokeTokenInput
- type RevokeTokenOutput
- type Run
- type RunIDParam
- type RunJobInput
- type RunJobOutput
- type RunLifecycleState
- type RunParameters
- type RunResultState
- type RunState
- type S3StorageInfo
- type Schedule
- type Scope
- type SecretMetadata
- type SecretsService
- func (s *SecretsService) CreateACL(in *CreateACLInput) (*CreateACLOutput, error)
- func (s *SecretsService) CreateScope(in *CreateScopeInput) (*CreateScopeOutput, error)
- func (s *SecretsService) CreateSecret(in *CreateSecretInput) (*CreateSecretOutput, error)
- func (s *SecretsService) DeleteACL(in *DeleteACLInput) (*DeleteACLOutput, error)
- func (s *SecretsService) DeleteScope(in *DeleteScopeInput) (*DeleteScopeOutput, error)
- func (s *SecretsService) DeleteSecret(in *DeleteSecretInput) (*DeleteSecretOutput, error)
- func (s *SecretsService) GetACL(in *GetACLInput) (*GetACLOutput, error)
- func (s *SecretsService) ListACLs(in *ListACLsInput) (*ListACLsOutput, error)
- func (s *SecretsService) ListScopes(in *ListScopesInput) (*ListScopesOutput, error)
- func (s *SecretsService) ListSecrets(in *ListSecretsInput) (*ListSecretsOutput, error)
- type SparkConf
- type SparkJarTask
- type SparkNode
- type SparkNodeAwsAttributes
- type SparkPythonTask
- type SparkSubmitTask
- type SparkVersion
- type StartClusterInput
- type StartClusterOutput
- type SubmitJobRunInput
- type SubmitJobRunOutput
- type TerminateClusterInput
- type TerminateClusterOutput
- type TerminationCode
- type TerminationParameter
- type TerminationReason
- type TokenInfo
- type TokensService
- type TriggerType
- type UninstallLibraryInput
- type UninstallLibraryOutput
- type UnpinClusterInput
- type UnpinClusterOutput
- type Validator
- type ViewItem
- type ViewType
- type ViewsToExport
- type WorkspacesService
- func (s *WorkspacesService) CreateDirectory(in *CreateDirectoryInput) (*CreateDirectoryOutput, error)
- func (s *WorkspacesService) DeleteNode(in *DeleteNodeInput) (*DeleteNodeOutput, error)
- func (s *WorkspacesService) DescribeNode(in *DescribeNodeInput) (*DescribeNodeOutput, error)
- func (s *WorkspacesService) ExportNode(in *ExportNodeInput) (*ExportNodeOutput, error)
- func (s *WorkspacesService) ImportNode(in *ImportNodeInput) (*ImportNodeOutput, error)
- func (s *WorkspacesService) ListObjects(in *ListObjectsInput) (*ListObjectsOutput, error)
- type WriteDBFSFileInput
- type WriteDBFSFileOutput
Constants ¶
const BackendTypeDatabricks = "DATABRICKS"
BackendTypeDatabricks is the only currently valid backend type
Variables ¶
This section is empty.
Functions ¶
func ParseResponse ¶
ParseResponse inspects the given *http.Response to determine if it was successful (i.e., the status code is 200-range). If resp.Body is not nil, it is assumed to be JSON, and is unmarshaled onto the provided out.
Types ¶
type ACLItem ¶
type ACLItem struct { Principal string `json:"principal"` Permission ACLPermission `json:"permission"` }
ACLItem represents a permission granted to a principal
type ACLPermission ¶
type ACLPermission string
ACLPermission is a type alias for string, used to define an enumeration of valid values
const ( // ACLPermissionRead allows reading secret metadata ACLPermissionRead ACLPermission = "READ" // ACLPermissionWrite allows creating and deleting secrets ACLPermissionWrite ACLPermission = "WRITE" // ACLPermissionManage allows managing ACLs of other principals within the // same scope ACLPermissionManage ACLPermission = "MANAGE" )
type AWSAvailability ¶
type AWSAvailability uint8
const ( AWSAvailabilitySpot AWSAvailability = iota + 1 AWSAvailabilityOnDemand AWSAvailabilitySpotWithFallback )
func (AWSAvailability) MarshalText ¶
func (a AWSAvailability) MarshalText() (text []byte, err error)
func (*AWSAvailability) UnmarshalText ¶
func (a *AWSAvailability) UnmarshalText(text []byte) error
type AddBlockToDBFSStreamInput ¶
type AddBlockToDBFSStreamInput struct { HandleParam Data []byte `json:"data"` }
func (AddBlockToDBFSStreamInput) Validate ¶
func (a AddBlockToDBFSStreamInput) Validate() error
type AddBlockToDBFSStreamOutput ¶
type AddBlockToDBFSStreamOutput struct{}
type AddMemberInput ¶
type AddMemberInput struct { Principal // Name of the parent group into which this principal is added. ParentName string `json:"parent_name"` }
AddMemberInput describes the input required for the AddGroupMember operation.
func (AddMemberInput) Validate ¶
func (r AddMemberInput) Validate() error
Validate implements the Validator interface for AddGroupMemberInput).
type AddMemberOutput ¶
type AddMemberOutput struct{}
AddMemberOutput describes the value returned from the AddGroupMember operation.
type AwsAttributes ¶
type AwsAttributes struct { Availability AWSAvailability `json:"availability"` ZoneID string `json:"zone_id"` }
AwsAttributes are attributes set during cluster creation related to Amazon Web Services.
type CancelJobRunInput ¶
type CancelJobRunInput struct {
RunIDParam
}
type CancelJobRunOutput ¶
type CancelJobRunOutput struct{}
type Client ¶
type Client struct { BaseURL *url.URL UserAgent string Clusters *ClustersService DBFS *DBFSService Groups *GroupsService Jobs *JobsService Libraries *LibrariesService Pools *PoolsService Profiles *ProfilesService Secrets *SecretsService Tokens *TokensService Workspaces *WorkspacesService // contains filtered or unexported fields }
Client wraps *http.Client and implements the Databricks REST API v2.0.
func NewClient ¶
NewClient returns a *Client that embeds the given *http.Client and is configured to send API requests to the provided baseURL.
func (*Client) Do ¶
Do sends the given request using the receiver's *http.Client, and parses a successful response into the provided out.
func (*Client) NewRequest ¶
NewRequest creates an *http.Request using the given method, and path parsed in the context of the receiver's BaseURL. The given body, if not nil, is JSON-encoded (or URL-encoded if `method == "GET"`) and added as the body of the returned request.
type CloseDBFSStreamInput ¶
type CloseDBFSStreamInput struct {
HandleParam
}
type CloseDBFSStreamOutput ¶
type CloseDBFSStreamOutput struct{}
type Cluster ¶
type Cluster struct { Workers int `json:"num_workers"` Autoscale *Autoscale `json:"autoscale,omitempty"` ID string `json:"cluster_id"` Creator string `json:"creator_user_name"` Driver SparkNode `json:"driver"` Executors []SparkNode `json:"executors"` SparkContextID int `json:"spark_context_id"` JDBCPort int `json:"jdbc_port"` Name string `json:"cluster_name"` SparkVersion string `json:"spark_version"` SparkConf SparkConf `json:"spark_conf"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` NodeTypeID string `json:"node_type_id"` DriverNodeTypeID string `json:"driver_node_type_id"` SSHPublicKeys []string `json:"ssh_public_keys"` CustomTags []ClusterTag `json:"custom_tags"` ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` InitScripts []InitScriptInfo `json:"init_scripts"` DockerImage *DockerImage `json:"docker_image,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars"` AutoterminationMinutes *int `json:"autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk"` InstancePoolID string `json:"instance_pool_id"` ClusterSource ClusterSource `json:"cluster_source"` State ClusterState `json:"state"` StateMessage string `json:"state_message"` StartTime time.Time `json:"start_time"` TerminatedTime *time.Time `json:"terminated_time,omitempty"` LastStateLossTime *time.Time `json:"last_state_loss_time,omitempty"` LastActivityTime *time.Time `json:"last_activity_time,omitempty"` ClusterMemoryMB int `json:"cluster_memory_mb"` ClusterCores float64 `json:"cluster_cores"` DefaultTags []ClusterTag `json:"default_tags"` ClusterLogStatus ClusterLogSyncStatus `json:"cluster_log_status"` TerminationReason *TerminationReason `json:"termination_reason,omitempty"` // contains filtered or unexported fields }
func (Cluster) MarshalJSON ¶
func (*Cluster) UnmarshalJSON ¶
type ClusterEvent ¶
type ClusterEvent struct { ClusterID string `json:"cluster_id"` Timestamp time.Time `json:"timestamp"` Type ClusterEventType `json:"type"` Details EventDetails `json:"details,omitempty"` }
func (ClusterEvent) MarshalJSON ¶
func (c ClusterEvent) MarshalJSON() ([]byte, error)
func (*ClusterEvent) UnmarshalJSON ¶
func (c *ClusterEvent) UnmarshalJSON(data []byte) error
type ClusterEventType ¶
type ClusterEventType uint8
const ( ClusterEventTypeCreating ClusterEventType = iota + 1 ClusterEventTypeDidNotExpandDisk ClusterEventTypeExpandedDisk ClusterEventTypeFailedToExpandDisk ClusterEventTypeInitScriptsStarting ClusterEventTypeInitScriptsFinished ClusterEventTypeStarting ClusterEventTypeRestarting ClusterEventTypeTerminating ClusterEventTypeEdited ClusterEventTypeRunning ClusterEventTypeResizing ClusterEventTypeUpsizeCompleted ClusterEventTypeNodesLost ClusterEventTypeDriverHealthy ClusterEventTypeSparkException ClusterEventTypeDriverNotResponding ClusterEventTypeDBFSDown ClusterEventTypeMetastoreDown ClusterEventTypeAutoscalingStatsReport ClusterEventTypeNodeBlacklisted ClusterEventTypePinned ClusterEventTypeUnpinned )
func (ClusterEventType) MarshalJSON ¶
func (c ClusterEventType) MarshalJSON() ([]byte, error)
nolint funlen // more than 40 lines necessary for the big switch
func (*ClusterEventType) UnmarshalJSON ¶
func (c *ClusterEventType) UnmarshalJSON(data []byte) error
nolint funlen // more than 40 lines necessary for the big switch
type ClusterIDParam ¶
type ClusterIDParam struct {
ClusterID string `json:"cluster_id"`
}
func (*ClusterIDParam) Decode ¶
func (c *ClusterIDParam) Decode(data string) error
func (ClusterIDParam) Encode ¶
func (c ClusterIDParam) Encode() string
func (ClusterIDParam) Validate ¶
func (c ClusterIDParam) Validate() error
type ClusterInstance ¶
type ClusterLibrariesParam ¶
type ClusterLibrariesParam struct { ClusterIDParam Libraries []Library `json:"libraries"` }
func (ClusterLibrariesParam) Validate ¶
func (c ClusterLibrariesParam) Validate() error
type ClusterLibraryStatus ¶
type ClusterLibraryStatus struct { ClusterID string `json:"cluster_id"` LibraryStatuses []LibraryFullStatus `json:"library_statuses"` }
type ClusterLogConf ¶
type ClusterLogConf struct { // DBFS location of cluster log. destination must be provided. For example, // { "dbfs": { "destination": "dbfs:/home/cluster_log" } } DBFS *DBFSStorageInfo // S3 location of cluster log. destination and either region or endpoint // must be provided. For example: // // { // "s3": { // "destination": "s3://cluster_log_bucket/prefix", // "region": "us-west-2" // } // } S3 *S3StorageInfo }
ClusterLogConf describes a cluster log destination path
type ClusterLogSyncStatus ¶
type ClusterLogSyncStatus struct { LastAttempted time.Time `json:"last_attempted"` LastException string `json:"last_exception,omitempty"` }
func (ClusterLogSyncStatus) MarshalJSON ¶
func (c ClusterLogSyncStatus) MarshalJSON() ([]byte, error)
func (*ClusterLogSyncStatus) UnmarshalJSON ¶
func (c *ClusterLogSyncStatus) UnmarshalJSON(data []byte) error
type ClusterParams ¶
type ClusterParams struct { // NumWorkers is the number of worker nodes that this cluster should have. A // cluster has one Spark driver and num_workers executors for a total of // num_workers + 1 Spark nodes. // // Note: When reading the properties of a cluster, this field reflects the // desired number of workers rather than the actual number of workers. For // instance, if a cluster is resized from 5 to 10 workers, this field will // immediately be updated to reflect the target size of 10 workers, whereas // the workers listed in executors will gradually increase from 5 to 10 as // the new nodes are provisioned. NumWorkers int `json:"num_workers,omitempty"` // Cluster name requested by the user. This doesn’t have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName string `json:"cluster_name,omitempty"` // The runtime version of the cluster. You can retrieve a list of available // runtime versions by using the Runtime Versions API call. This field is // required. SparkVersion string `json:"spark_version"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads A // list of available node types can be retrieved by using the List Node // Types API call. This field is required. NodeTypeID string `json:"node_type_id"` // The node type of the Spark driver. This field is optional; if unset, the // driver node type will be set as the same value as NodeTypeID DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` // SSH public key contents that will be added to each Spark node in this // cluster. The corresponding private keys can be used to login with the // user name ubuntu on port 2200. Up to 10 keys can be specified. SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` // A set of optional, user-specified Spark configuration key-value pairs. // You can also pass in a string of extra JVM options to the driver and the // executors via spark.driver.extraJavaOptions and // spark.executor.extraJavaOptions respectively. // // Example Spark confs: // // {"spark.speculation": true, "spark.streaming.ui.retainedBatches": 5} // // or // // {"spark.driver.extraJavaOptions": "-verbose:gc -XX:+PrintGCDetails"} SparkConf SparkConf `json:"spark_conf,omitempty"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with Databricks Runtime 3.0 // or above. Autoscale *Autoscale `json:"autoscale,omitempty"` // Additional tags for cluster resources. Databricks will tag all cluster // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to default_tags. Notes: // * Tags are not supported on legacy node types such as compute-optimized // and memory-optimized // * Databricks allows at most 45 custom tags CustomTags []ClusterTag `json:"custom_tags,omitempty"` // The configuration for delivering Spark logs to a long-term storage // destination. Only one destination can be specified for one cluster. If // the conf is given, the logs will be delivered to the destination every 5 // mins. The destination of driver logs is // <destination>/<cluster-ID>/driver, while the destination of executor logs // is <destination>/<cluster-ID>/executor. ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If cluster_log_conf is specified, init script logs are sent to // <destination>/<cluster-ID>/init_scripts. InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` // Docker image for a custom container. DockerImage *DockerImage `json:"docker_image,omitempty"` // An object containing a set of optional, user-specified environment // variable key-value pairs. Key-value pairs of the form (X,Y) are exported // as is (i.e., export X='Y') while launching the driver and workers. In // order to specify an additional set of SPARK_DAEMON_JAVA_OPTS, we // recommend appending them to $SPARK_DAEMON_JAVA_OPTS as shown in the // example below. This ensures that all default databricks managed // environmental variables are included as well. Example Spark environment // variables: // // { // "SPARK_WORKER_MEMORY": "28000m", // "SPARK_LOCAL_DIRS": "/local_disk0" // } // or // {"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"} SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 // minutes. You can also set this value to 0 to explicitly disable automatic // termination. AutoterminationMinutes *int `json:"autotermination_minutes,omitempty"` // Autoscaling Local Storage: when enabled, this cluster will dynamically // acquire additional disk space when its Spark workers are running low on // disk space. This feature requires specific AWS permissions to function // correctly - refer to [Autoscaling local // storage](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage) // for details. EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` // The optional ID of the instance pool to which the cluster belongs. Refer // to [Instance Pools // API](https://docs.databricks.com/dev-tools/api/latest/instance-pools.html#instance-pools-api) // for details. InstancePoolID string `json:"instance_pool_id,omitempty"` // An optional token that can be used to guarantee the idempotency of // cluster creation requests. If an active cluster with the provided token // already exists, the request will not create a new cluster, but it will // return the ID of the existing cluster instead. The existence of a cluster // with the same token is not checked against terminated clusters. // // If you specify the idempotency token, upon failure you can retry until // the request succeeds. Databricks will guarantee that exactly one cluster // will be launched with that idempotency token. // // This token should have at most 64 characters. IdempotencyToken string `json:"idempotency_token,omitempty"` }
func (ClusterParams) Validate ¶
func (c ClusterParams) Validate() error
type ClusterSize ¶
type ClusterSource ¶
type ClusterSource uint8
const ( ClusterSourceUI ClusterSource = iota + 1 ClusterSourceJOB ClusterSourceAPI )
type ClusterSpec ¶
type ClusterSpec struct { ExistingClusterID string `json:"existing_cluster_id,omitempty"` NewCluster *NewCluster `json:"new_cluster,omitempty"` Libraries []Library `json:"libraries,omitempty"` }
type ClusterState ¶
type ClusterState uint8
const ( ClusterStateUnknown ClusterState = iota ClusterStatePending ClusterStateRunning ClusterStateRestarting ClusterStateResizing ClusterStateTerminating ClusterStateTerminated ClusterStateError )
func (ClusterState) IsActive ¶
func (c ClusterState) IsActive() bool
func (ClusterState) String ¶
func (c ClusterState) String() string
type ClusterTag ¶
type ClusterTag struct { // The key of the tag. The key length must be between 1 and 127 UTF-8 // characters, inclusive. For a list of all restrictions, see AWS Tag // Restrictions: // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions Key string `json:"key"` // The value of the tag. The value length must be less than or equal to 255 // UTF-8 characters. For a list of all restrictions, see AWS Tag // Restrictions: // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions Value string `json:"value"` }
ClusterTag is a key-value pair
func (ClusterTag) Validate ¶
func (c ClusterTag) Validate() error
type ClustersService ¶
type ClustersService service
ClustersService provides client behavior for the Databricks Clusters API
The Clusters API allows you to create, start, edit, list, terminate, and delete clusters. The maximum allowed size of a request to the Clusters API is 10MB.
Cluster lifecycle methods require a cluster ID, which is returned from Create. To obtain a list of clusters, invoke List.
Databricks maps cluster node instance types to compute units known as DBUs. See the instance type pricing page for a list of the supported instance types and their corresponding DBUs. For instance provider information, see AWS instance type specifications and pricing.
Databricks always provides one year’s deprecation notice before ceasing support for an instance type.
func (*ClustersService) CreateCluster ¶
func (s *ClustersService) CreateCluster(in *CreateClusterInput) (*CreateClusterOutput, error)
CreateCluster calls the CreateCluster Databricks API
func (*ClustersService) DeleteCluster ¶
func (s *ClustersService) DeleteCluster(in *DeleteClusterInput) (*DeleteClusterOutput, error)
DeleteCluster calls the DeleteCluster Databricks API
func (*ClustersService) DescribeCluster ¶
func (s *ClustersService) DescribeCluster(in *DescribeClusterInput) (*DescribeClusterOutput, error)
DescribeCluster calls the DescribeCluster Databricks API
func (*ClustersService) EditCluster ¶
func (s *ClustersService) EditCluster(in *EditClusterInput) (*EditClusterOutput, error)
EditCluster calls the EditCluster Databricks API
func (*ClustersService) ListClusterEvents ¶
func (s *ClustersService) ListClusterEvents(in *ListClusterEventsInput) (*ListClusterEventsOutput, error)
ListClusterEvents calls the ListClusterEvents Databricks API
func (*ClustersService) ListClusters ¶
func (s *ClustersService) ListClusters(in *ListClustersInput) (*ListClustersOutput, error)
ListClusters calls the ListClusters Databricks API
func (*ClustersService) ListNodeTypes ¶
func (s *ClustersService) ListNodeTypes(in *ListNodeTypesInput) (*ListNodeTypesOutput, error)
ListNodeTypes calls the ListNodeTypes Databricks API
func (*ClustersService) ListRuntimeVersions ¶
func (s *ClustersService) ListRuntimeVersions(in *ListRuntimeVersionsInput) (*ListRuntimeVersionsOutput, error)
ListRuntimeVersions calls the ListRuntimeVersions Databricks API
func (*ClustersService) ListZones ¶
func (s *ClustersService) ListZones(in *ListZonesInput) (*ListZonesOutput, error)
ListZones calls the ListZones Databricks API
func (*ClustersService) PinCluster ¶
func (s *ClustersService) PinCluster(in *PinClusterInput) (*PinClusterOutput, error)
PinCluster calls the PinCluster Databricks API
func (*ClustersService) ResizeCluster ¶
func (s *ClustersService) ResizeCluster(in *ResizeClusterInput) (*ResizeClusterOutput, error)
ResizeCluster calls the ResizeCluster Databricks API
func (*ClustersService) RestartCluster ¶
func (s *ClustersService) RestartCluster(in *RestartClusterInput) (*RestartClusterOutput, error)
RestartCluster calls the RestartCluster Databricks API
func (*ClustersService) StartCluster ¶
func (s *ClustersService) StartCluster(in *StartClusterInput) (*StartClusterOutput, error)
StartCluster calls the StartCluster Databricks API
func (*ClustersService) TerminateCluster ¶
func (s *ClustersService) TerminateCluster(in *TerminateClusterInput) (*TerminateClusterOutput, error)
TerminateCluster calls the TerminateCluster Databricks API
func (*ClustersService) UnpinCluster ¶
func (s *ClustersService) UnpinCluster(in *UnpinClusterInput) (*UnpinClusterOutput, error)
UnpinCluster calls the UnpinCluster Databricks API
type CreateACLInput ¶
type CreateACLInput struct { Scope string `json:"scope"` Principal string `json:"principal"` Permission ACLPermission `json:"permission"` }
CreateACLInput describes the input required for the CreateACL operation.
func (CreateACLInput) Validate ¶
func (c CreateACLInput) Validate() error
Validate implements the Validator interface for CreateACLInput.
type CreateACLOutput ¶
type CreateACLOutput struct{}
CreateACLOutput describes the value returned from the CreateACL operation.
type CreateClusterInput ¶
type CreateClusterInput struct { NumWorkers int `json:"num_workers,omitempty"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` NodeTypeID string `json:"node_type_id"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` SparkConf SparkConf `json:"spark_conf,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` Autoscale *Autoscale `json:"autoscale,omitempty"` CustomTags []ClusterTag `json:"custom_tags,omitempty"` ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` AutoterminationMinutes *int `json:"autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` InstancePoolID string `json:"instance_pool_id,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` }
func (CreateClusterInput) Validate ¶
func (c CreateClusterInput) Validate() error
type CreateClusterOutput ¶
type CreateClusterOutput struct {
ClusterID string
}
type CreateDBFSDirectoryInput ¶
type CreateDBFSDirectoryInput struct {
PathParam
}
type CreateDBFSDirectoryOutput ¶
type CreateDBFSDirectoryOutput struct{}
type CreateDBFSStreamInput ¶
type CreateDBFSStreamOutput ¶
type CreateDBFSStreamOutput struct {
Handle int `json:"handle"`
}
type CreateDirectoryInput ¶
type CreateDirectoryInput struct {
PathParam
}
type CreateDirectoryOutput ¶
type CreateDirectoryOutput struct{}
type CreateGroupInput ¶
type CreateGroupInput struct {
GroupName string `json:"group_name"`
}
CreateGroupInput describes the input required for the CreateGroup operation.
func (CreateGroupInput) Validate ¶
func (r CreateGroupInput) Validate() error
Validate implements the Validator interface for CreateGroupInput).
type CreateGroupOutput ¶
type CreateGroupOutput struct {
GroupName string `json:"group_name"`
}
CreateGroupOutput describes the value returned from the CreateGroup operation.
type CreateJobInput ¶
type CreateJobInput struct {
JobSettings
}
type CreateJobOutput ¶
type CreateJobOutput struct {
JobID int `json:"job_id"`
}
type CreatePoolInput ¶
type CreatePoolInput struct { InstancePoolMutableParams AWSAttributes *InstancePoolAWSAttributes `json:"aws_attributes,omitempty"` CustomTags []ClusterTag `json:"custom_tags,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` DiskSpec DiskSpec `json:"disk_spec,omitempty"` PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` }
func (CreatePoolInput) Validate ¶
func (c CreatePoolInput) Validate() error
type CreatePoolOutput ¶
type CreatePoolOutput struct {
InstancePoolID string `json:"instance_pool_id"`
}
type CreateScopeInput ¶
type CreateScopeInput struct { Scope string `json:"scope"` Manager string `json:"initial_manage_principal,omitempty"` }
CreateScopeInput describes the input required for the CreateScope operation.
func (CreateScopeInput) Validate ¶
func (r CreateScopeInput) Validate() error
Validate implements the Validator interface for CreateScopeInput.
type CreateScopeOutput ¶
type CreateScopeOutput struct{}
CreateScopeOutput describes the value returned from the CreateScope operation.
type CreateSecretInput ¶
type CreateSecretInput struct { Scope string `json:"scope"` Key string `json:"key"` StringValue string `json:"string_value,omitempty"` BytesValue []byte `json:"bytes_value,omitempty"` }
CreateSecretInput describes the input required for the CreateSecret operation.
func (CreateSecretInput) Validate ¶
func (c CreateSecretInput) Validate() error
Validate implements the Validator interface for CreateSecretInput
type CreateSecretOutput ¶
type CreateSecretOutput struct{}
CreateSecretOutput describes the value returned from the CreateSecret operation.
type CreateTokenInput ¶
type CreateTokenInput struct { LifetimeSeconds int `json:"lifetime_seconds,omitempty"` Comment string `json:"comment,omitempty"` }
func (CreateTokenInput) Validate ¶
func (c CreateTokenInput) Validate() error
type CreateTokenOutput ¶
type DBC ¶
type DBC struct { Version string `json:"version"` OrigID int `json:"origId"` Name string `json:"name"` Language *Language `json:"language"` Dashboards json.RawMessage `json:"dashboards"` GUID string `json:"guid"` GlobalVars json.RawMessage `json:"globalVars"` IPythonMetadata json.RawMessage `json:"iPythonMetadata"` InputWidgets json.RawMessage `json:"inputWidgets"` Commands json.RawMessage `json:"commands"` }
type DBFSService ¶
type DBFSService service
func (*DBFSService) AddBlockToDBFSStream ¶
func (s *DBFSService) AddBlockToDBFSStream(in *AddBlockToDBFSStreamInput) (*AddBlockToDBFSStreamOutput, error)
func (*DBFSService) CloseDBFSStream ¶
func (s *DBFSService) CloseDBFSStream(in *CloseDBFSStreamInput) (*CloseDBFSStreamOutput, error)
func (*DBFSService) CreateDBFSDirectory ¶
func (s *DBFSService) CreateDBFSDirectory(in *CreateDBFSDirectoryInput) (*CreateDBFSDirectoryOutput, error)
func (*DBFSService) CreateDBFSStream ¶
func (s *DBFSService) CreateDBFSStream(in *CreateDBFSStreamInput) (*CreateDBFSStreamOutput, error)
func (*DBFSService) DeleteDBFSFilepath ¶
func (s *DBFSService) DeleteDBFSFilepath(in *DeleteDBFSFilepathInput) (*DeleteDBFSFilepathOutput, error)
func (*DBFSService) DescribeDBFSFilepath ¶
func (s *DBFSService) DescribeDBFSFilepath(in *DescribeDBFSFilepathInput) (*DescribeDBFSFilepathOutput, error)
func (*DBFSService) GetDBFSFilepathStatus ¶
func (s *DBFSService) GetDBFSFilepathStatus(in *GetDBFSFilepathStatusInput) (*GetDBFSFilepathStatusOutput, error)
func (*DBFSService) MoveDBFSFilepath ¶
func (s *DBFSService) MoveDBFSFilepath(in *MoveDBFSFilepathInput) (*MoveDBFSFilepathOutput, error)
func (*DBFSService) ReadDBFSFile ¶
func (s *DBFSService) ReadDBFSFile(in *ReadDBFSFileInput) (*ReadDBFSFileOutput, error)
func (*DBFSService) WriteDBFSFile ¶
func (s *DBFSService) WriteDBFSFile(in *WriteDBFSFileInput) (*WriteDBFSFileOutput, error)
type DBFSStorageInfo ¶
type DBFSStorageInfo struct { // DBFS destination, e.g. dbfs:/my/path Destination string `json:"destination"` }
DBFSStorageInfo describes where to store information in DBFS
type DeleteACLInput ¶
DeleteACLInput describes the input required for the DeleteACL operation.
func (DeleteACLInput) Validate ¶
func (d DeleteACLInput) Validate() error
Validate implements the Validator interface for DeleteACLInput.
type DeleteACLOutput ¶
type DeleteACLOutput struct{}
DeleteACLOutput describes the value returned from the DeleteACL operation.
type DeleteClusterInput ¶
type DeleteClusterInput struct {
ClusterIDParam
}
DeleteClusterInput is the input to the DeleteCluster operation
type DeleteClusterOutput ¶
type DeleteClusterOutput struct{}
DeleteClusterOutput is the output returned from the DeleteCluster operation
type DeleteDBFSFilepathInput ¶
type DeleteDBFSFilepathOutput ¶
type DeleteDBFSFilepathOutput struct{}
type DeleteGroupInput ¶
type DeleteGroupInput struct {
GroupName string `json:"group_name"`
}
DeleteGroupInput describes the input required for the DeleteGroup operation.
func (DeleteGroupInput) Validate ¶
func (r DeleteGroupInput) Validate() error
Validate implements the Validator interface for DeleteGroupInput).
type DeleteGroupOutput ¶
type DeleteGroupOutput struct{}
DeleteGroupOutput describes the value returned from the DeleteGroup operation.
type DeleteJobInput ¶
type DeleteJobInput struct {
JobIDParam
}
type DeleteJobOutput ¶
type DeleteJobOutput struct{}
type DeleteJobRunInput ¶
type DeleteJobRunInput struct {
RunIDParam
}
type DeleteJobRunOutput ¶
type DeleteJobRunOutput struct{}
type DeleteNodeInput ¶
type DeleteNodeOutput ¶
type DeleteNodeOutput struct{}
type DeletePoolInput ¶
type DeletePoolInput struct {
InstancePoolIDParam
}
type DeletePoolOutput ¶
type DeletePoolOutput struct{}
type DeleteScopeInput ¶
type DeleteScopeInput struct {
Scope string `json:"scope"`
}
DeleteScopeInput describes the input required for the DeleteScope operation.
func (DeleteScopeInput) Validate ¶
func (r DeleteScopeInput) Validate() error
Validate implements the Validator interface for DeleteScopeInput.
type DeleteScopeOutput ¶
type DeleteScopeOutput struct{}
DeleteScopeOutput describes the value returned from the DeleteScope operation.
type DeleteSecretInput ¶
DeleteSecretInput describes the input required for the DeleteSecret operation.
func (DeleteSecretInput) Validate ¶
func (d DeleteSecretInput) Validate() error
Validate implements the Validator interface for DeleteSecretInput.
type DeleteSecretOutput ¶
type DeleteSecretOutput struct{}
DeleteSecretOutput describes the value returned from the DeleteSecret operation.
type DeregisterProfileInput ¶
type DeregisterProfileInput struct {
InstanceProfileARNParam
}
type DeregisterProfileOutput ¶
type DeregisterProfileOutput struct{}
type DescribeClusterInput ¶
type DescribeClusterInput struct {
ClusterIDParam
}
DescribeClusterInput is the input to the DescribeCluster operation
type DescribeClusterOutput ¶
type DescribeClusterOutput struct {
Cluster
}
DescribeClusterOutput is the output returned from the DescribeCluster operation
type DescribeDBFSFilepathInput ¶
type DescribeDBFSFilepathInput struct {
PathParam
}
type DescribeDBFSFilepathOutput ¶
type DescribeDBFSFilepathOutput struct {
Files []fileInfo `json:"files"`
}
type DescribeJobInput ¶
type DescribeJobInput struct {
JobIDParam
}
type DescribeJobOutput ¶
type DescribeJobOutput struct {
Job
}
type DescribeJobRunInput ¶
type DescribeJobRunInput struct {
RunIDParam
}
type DescribeJobRunOutput ¶
type DescribeJobRunOutput struct {
Run
}
type DescribeJobRunOutputInput ¶
type DescribeJobRunOutputInput struct {
RunIDParam
}
type DescribeJobRunOutputOutput ¶
type DescribeJobRunOutputOutput struct { Error string `json:"error,omitempty"` NotebookOutput *NotebookOutput `json:"notebook_output,omitempty"` Metadata *Run `json:"metadata"` }
type DescribeLibraryStatusInput ¶
type DescribeLibraryStatusInput struct {
ClusterIDParam
}
type DescribeLibraryStatusOutput ¶
type DescribeLibraryStatusOutput struct {
ClusterLibraryStatus `json:"cluster_library_status"`
}
type DescribeNodeInput ¶
type DescribeNodeInput struct {
PathParam
}
type DescribeNodeOutput ¶
type DescribeNodeOutput struct {
ObjectInfo
}
type DiskSpec ¶
type DiskType ¶
type DiskType struct {
EBSVolumeType EBSVolumeType `json:"ebs_volume_type"`
}
type DockerBasicAuth ¶
type DockerBasicAuth struct { // User name for the Docker repository. Username string `json:"username"` // Password for the Docker repository. Password string `json:"password"` }
DockerBasicAuth contains Docker repository basic authentication information.
type DockerImage ¶
type DockerImage struct { // URL for the Docker image URL string `json:"url"` // Basic authentication information for Docker repository. BasicAuth DockerBasicAuth `json:"basic_auth"` }
DockerImage contains information for accessing an image in a Docker registry
type EBSVolumeType ¶
type EBSVolumeType uint8
const ( EBSVolumeTypeGeneralPurposeSSD EBSVolumeType = iota + 1 EBSVolumeTypeThroughputOptimizedHDD )
func (EBSVolumeType) MarshalText ¶
func (e EBSVolumeType) MarshalText() (text []byte, err error)
func (*EBSVolumeType) UnmarshalText ¶
func (e *EBSVolumeType) UnmarshalText(text []byte) error
func (EBSVolumeType) Validate ¶
func (e EBSVolumeType) Validate() error
type EditClusterInput ¶
type EditClusterInput struct { NumWorkers int `json:"num_workers,omitempty"` Autoscale *Autoscale `json:"autoscale,omitempty"` ClusterID string `json:"cluster_id"` ClusterName string `json:"cluster_name,omitempty"` SparkVersion string `json:"spark_version"` SparkConf SparkConf `json:"spark_conf,omitempty"` AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` NodeTypeID string `json:"node_type_id"` DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` CustomTags []ClusterTag `json:"custom_tags,omitempty"` ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` DockerImage *DockerImage `json:"docker_image,omitempty"` SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` AutoterminationMinutes *int `json:"autotermination_minutes,omitempty"` EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` InstancePoolID string `json:"instance_pool_id,omitempty"` }
func (EditClusterInput) Validate ¶
func (e EditClusterInput) Validate() error
type EditClusterOutput ¶
type EditClusterOutput struct{}
type EditPoolInput ¶
type EditPoolInput struct { InstancePoolIDParam InstancePoolMutableParams }
func (EditPoolInput) Validate ¶
func (e EditPoolInput) Validate() error
type EditPoolOutput ¶
type EditPoolOutput struct{}
type EmailNotifications ¶
type Encoder ¶
type Encoder interface {
Encode() string
}
Encoder is used to URL-encode the receiver.
type EventDetails ¶
type EventDetails struct { CurrentNumWorkers *int `json:"current_num_workers,omitempty"` TargetNumWorkers *int `json:"target_num_workers,omitempty"` PreviousAttributes *AwsAttributes `json:"previous_attributes,omitempty"` Attributes *AwsAttributes `json:"attributes,omitempty"` PreviousClusterSize *ClusterSize `json:"previous_cluster_size,omitempty"` ClusterSize *ClusterSize `json:"cluster_size,omitempty"` Cause *ResizeCause `json:"cause,omitempty"` Reason *TerminationReason `json:"reason,omitempty"` User *string `json:"user,omitempty"` }
type ExportFormat ¶
type ExportFormat uint
const ( ExportFormatSource ExportFormat = iota + 1 ExportFormatHTML ExportFormatJupyter ExportFormatDBC )
func (ExportFormat) MarshalText ¶
func (e ExportFormat) MarshalText() (text []byte, err error)
func (*ExportFormat) UnmarshalText ¶
func (e *ExportFormat) UnmarshalText(text []byte) error
func (ExportFormat) Validate ¶
func (e ExportFormat) Validate() error
type ExportJobRunInput ¶
type ExportJobRunInput struct { RunIDParam ViewsToExport ViewsToExport `json:"views_to_export"` }
func (ExportJobRunInput) Encode ¶
func (e ExportJobRunInput) Encode() string
type ExportJobRunOutput ¶
type ExportJobRunOutput struct {
Views []ViewItem `json:"views"`
}
type ExportNodeInput ¶
type ExportNodeInput struct { PathParam Format ExportFormat `json:"format"` DirectDownload bool `json:"direct_download"` }
func (ExportNodeInput) Encode ¶
func (e ExportNodeInput) Encode() string
func (ExportNodeInput) Validate ¶
func (e ExportNodeInput) Validate() error
type ExportNodeOutput ¶
type ExportNodeOutput struct {
Content []byte `json:"content"`
}
type GetACLInput ¶
GetACLInput describes the input required for the GetACL operation.
func (GetACLInput) Encode ¶
func (g GetACLInput) Encode() string
Encode implements the Encoder interface for GetACLInput.
func (GetACLInput) Validate ¶
func (g GetACLInput) Validate() error
Validate implements the Validator interface for GetACLInput.
type GetACLOutput ¶
type GetACLOutput struct { Principal string `json:"principal"` Permission ACLPermission `json:"permission"` }
GetACLOutput describes the value returned from the GetACL operation.
type GetDBFSFilepathStatusInput ¶
type GetDBFSFilepathStatusInput struct {
PathParam
}
type GetDBFSFilepathStatusOutput ¶
type GetDBFSFilepathStatusOutput struct {
// contains filtered or unexported fields
}
type GetPoolInput ¶
type GetPoolInput struct {
InstancePoolIDParam
}
type GetPoolOutput ¶
type GetPoolOutput struct { InstancePoolAndStats Status InstancePoolStatus `json:"status"` }
type GroupsService ¶
type GroupsService service
GroupsService provides client behavior for the Databricks Groups API
func (*GroupsService) AddMember ¶
func (s *GroupsService) AddMember(in *AddMemberInput) (*AddMemberOutput, error)
AddMember sends an add group member request to the Databricks Groups API using the provided AddGroupMemberInput. It returns a AddGroupMemberOutput on success, or an error otherwise.
func (*GroupsService) CreateGroup ¶
func (s *GroupsService) CreateGroup(in *CreateGroupInput) (*CreateGroupOutput, error)
CreateGroup sends a create group request to the Databricks Groups API using the provided CreateGroupInput. It returns a CreateGroupOutput on success, or an error otherwise.
func (*GroupsService) DeleteGroup ¶
func (s *GroupsService) DeleteGroup(in *DeleteGroupInput) (*DeleteGroupOutput, error)
DeleteGroup sends a delete group request to the Databricks Groups API using the provided DeleteGroupInput. It returns a DeleteGroupOutput on success, or an error otherwise.
func (*GroupsService) ListGroups ¶
func (s *GroupsService) ListGroups(in *ListGroupsInput) (*ListGroupsOutput, error)
ListGroups sends a list groups request to the Databricks Groups API using the provided ListGroupsInput. It returns a ListGroupsOutput on success, or an error otherwise.
func (*GroupsService) ListMembers ¶
func (s *GroupsService) ListMembers(in *ListMembersInput) (*ListMembersOutput, error)
ListMembers sends a list group members request to the Databricks Groups API using the provided ListGroupMembersInput. It returns a ListGroupMembersOutput on success, or an error otherwise.
func (*GroupsService) ListParents ¶
func (s *GroupsService) ListParents(in *ListParentsInput) (*ListParentsOutput, error)
ListParents sends a list group parents request to the Databricks Groups API using the provided ListGroupParentsInput. It returns a ListGroupParentsOutput on success, or an error otherwise.
func (*GroupsService) RemoveMember ¶
func (s *GroupsService) RemoveMember(in *RemoveMemberInput) (*RemoveMemberOutput, error)
RemoveMember sends a remove group member request to the Databricks Groups API using the provided RemoveGroupMemberInput. It returns a RemoveGroupMemberOutput on success, or an error otherwise.
type HandleParam ¶
type HandleParam struct {
Handle int `json:"handle"`
}
func (HandleParam) Validate ¶
func (h HandleParam) Validate() error
type ImportNodeInput ¶
type ImportNodeInput struct { PathParam Format ExportFormat `json:"format"` Language *Language `json:"language,omitempty"` Content []byte `json:"content"` Overwrite bool `json:"overwrite"` }
func (ImportNodeInput) Validate ¶
func (i ImportNodeInput) Validate() error
type ImportNodeOutput ¶
type ImportNodeOutput struct{}
type InitScriptInfo ¶
type InitScriptInfo struct { // DBFS location of init script. destination must be provided. For example, // { "dbfs" : { "destination" : "dbfs:/home/init_script" } } DBFS *DBFSStorageInfo `json:"dbfs,omitempty"` // S3 location of init script. destination and either region or endpoint // must be provided. For example: // // { // "s3": { // "destination": "s3://init_script_bucket/prefix", // "region": "us-west-2" // } // } S3 *S3StorageInfo `json:"s3,omitempty"` }
InitScriptInfo describes a path to an init script.
type InstallLibraryInput ¶
type InstallLibraryInput struct {
ClusterLibrariesParam
}
type InstallLibraryOutput ¶
type InstallLibraryOutput struct{}
type InstancePoolAWSAttributes ¶
type InstancePoolAWSAttributes struct { Availability AWSAvailability `json:"availability"` ZoneID string `json:"zone_id"` SpotBidPricePercent int `json:"spot_bid_price_percent"` }
func (InstancePoolAWSAttributes) Validate ¶
func (i InstancePoolAWSAttributes) Validate() error
type InstancePoolAndStats ¶
type InstancePoolAndStats struct { InstancePoolMutableParams AWSAttributes *InstancePoolAWSAttributes `json:"aws_attributes"` CustomTags []ClusterTag `json:"custom_tags"` EnableElasticDisk bool `json:"enable_elastic_disk"` DiskSpec DiskSpec `json:"disk_spec"` PreloadedSparkVersions []string `json:"preloaded_spark_versions"` InstancePoolID string `json:"instance_pool_id"` DefaultTags []ClusterTag `json:"default_tags"` State InstancePoolState `json:"state"` Stats InstancePoolStats `json:"stats"` }
func (InstancePoolAndStats) Validate ¶
func (i InstancePoolAndStats) Validate() error
type InstancePoolIDParam ¶
type InstancePoolIDParam struct {
InstancePoolID string `json:"instance_pool_id"`
}
func (InstancePoolIDParam) Encode ¶
func (i InstancePoolIDParam) Encode() string
func (InstancePoolIDParam) Validate ¶
func (i InstancePoolIDParam) Validate() error
type InstancePoolMutableParams ¶
type InstancePoolMutableParams struct { InstancePoolName string `json:"instance_pool_name"` NodeTypeID string `json:"node_type_id"` MinIdleInstances *int `json:"min_idle_instances,omitempty"` MaxCapacity *int `json:"max_capacity,omitempty"` IdleInstanceTerminationMinutes *int `json:"idle_instance_termination_minutes,omitempty"` }
func (InstancePoolMutableParams) Validate ¶
func (i InstancePoolMutableParams) Validate() error
type InstancePoolState ¶
type InstancePoolState uint
const ( InstancePoolStateActive InstancePoolState = iota + 1 InstancePoolStateDeleted )
func (InstancePoolState) MarshalText ¶
func (i InstancePoolState) MarshalText() (text []byte, err error)
func (*InstancePoolState) UnmarshalText ¶
func (i *InstancePoolState) UnmarshalText(text []byte) error
type InstancePoolStats ¶
type InstancePoolStatus ¶
type InstancePoolStatus struct {
PendingInstanceErrors []PendingInstanceError `json:"pending_instance_errors"`
}
type InstanceProfileARNParam ¶
type InstanceProfileARNParam struct {
InstanceProfileARN string `json:"instance_profile_arn"`
}
func (InstanceProfileARNParam) Validate ¶
func (i InstanceProfileARNParam) Validate() error
type Job ¶
type Job struct { JobID int `json:"job_id"` CreatorUserName string `json:"creator_user_name"` Settings JobSettings `json:"settings"` CreatedTime time.Time `json:"created_time"` }
func (Job) MarshalJSON ¶
func (*Job) UnmarshalJSON ¶
type JobIDParam ¶
type JobIDParam struct {
JobID int `json:"job_id"`
}
func (JobIDParam) Encode ¶
func (j JobIDParam) Encode() string
func (JobIDParam) Validate ¶
func (j JobIDParam) Validate() error
type JobSettings ¶
type JobSettings struct { ExistingClusterID string `json:"existing_cluster_id,omitempty"` NewCluster *NewCluster `json:"new_cluster,omitempty"` NotebookTask *NotebookTask `json:"notebook_task,omitempty"` SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` Name string `json:"name,omitempty"` Libraries []Library `json:"libraries,omitempty"` EmailNotifications EmailNotifications `json:"email_notifications,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` MaxRetries int `json:"max_retries,omitempty"` MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` Schedule Schedule `json:"schedule,omitempty"` MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"` }
func (JobSettings) Validate ¶
func (j JobSettings) Validate() error
type JobTask ¶
type JobTask struct { NotebookTask *NotebookTask `json:"notebook_task,omitempty"` SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` }
type JobsAWSAttributes ¶
type JobsAWSAttributes struct {
Availability AWSAvailability `json:"availability"`
}
type JobsService ¶
type JobsService service
func (*JobsService) CancelJobRun ¶
func (s *JobsService) CancelJobRun(in *CancelJobRunInput) (*CancelJobRunOutput, error)
func (*JobsService) CreateJob ¶
func (s *JobsService) CreateJob(in *CreateJobInput) (*CreateJobOutput, error)
func (*JobsService) DeleteJob ¶
func (s *JobsService) DeleteJob(in *DeleteJobInput) (*DeleteJobOutput, error)
func (*JobsService) DeleteJobRun ¶
func (s *JobsService) DeleteJobRun(in *DeleteJobRunInput) (*DeleteJobRunOutput, error)
func (*JobsService) DescribeJob ¶
func (s *JobsService) DescribeJob(in *DescribeJobInput) (*DescribeJobOutput, error)
func (*JobsService) DescribeJobRun ¶
func (s *JobsService) DescribeJobRun(in *DescribeJobRunInput) (*DescribeJobRunOutput, error)
func (*JobsService) DescribeJobRunOutput ¶
func (s *JobsService) DescribeJobRunOutput(in *DescribeJobRunOutputInput) (*DescribeJobRunOutputOutput, error)
func (*JobsService) ExportJobRun ¶
func (s *JobsService) ExportJobRun(in *ExportJobRunInput) (*ExportJobRunOutput, error)
func (*JobsService) ListJobRuns ¶
func (s *JobsService) ListJobRuns(in *ListJobRunsInput) (*ListJobRunsOutput, error)
func (*JobsService) ListJobs ¶
func (s *JobsService) ListJobs(in *ListJobsInput) (*ListJobsOutput, error)
func (*JobsService) ResetJob ¶
func (s *JobsService) ResetJob(in *ResetJobInput) (*ResetJobOutput, error)
func (*JobsService) RunJob ¶
func (s *JobsService) RunJob(in *RunJobInput) (*RunJobOutput, error)
func (*JobsService) SubmitJobRun ¶
func (s *JobsService) SubmitJobRun(in *SubmitJobRunInput) (*SubmitJobRunOutput, error)
type LibrariesService ¶
type LibrariesService service
func (*LibrariesService) DescribeLibraryStatus ¶
func (s *LibrariesService) DescribeLibraryStatus(in *DescribeLibraryStatusInput) (*DescribeLibraryStatusOutput, error)
func (*LibrariesService) InstallLibrary ¶
func (s *LibrariesService) InstallLibrary(in *InstallLibraryInput) (*InstallLibraryOutput, error)
func (*LibrariesService) ListLibraryStatuses ¶
func (s *LibrariesService) ListLibraryStatuses(in *ListLibraryStatusesInput) (*ListLibraryStatusesOutput, error)
func (*LibrariesService) UninstallLibrary ¶
func (s *LibrariesService) UninstallLibrary(in *UninstallLibraryInput) (*UninstallLibraryOutput, error)
type Library ¶
type Library struct { JAR string `json:"jar,omitempty"` EGG string `json:"egg,omitempty"` WHL string `json:"whl,omitempty"` PyPI *PythonPyLibrary `json:"pypi,omitempty"` Maven *MavenLibrary `json:"maven,omitempty"` Cran *RCranLibrary `json:"cran,omitempty"` }
type LibraryFullStatus ¶
type LibraryFullStatus struct { Library Library `json:"library"` Status LibraryInstallStatus `json:"status"` Messages []string `json:"messages"` IsLibraryForAllClusters bool `json:"is_library_for_all_clusters"` }
type LibraryInstallStatus ¶
type LibraryInstallStatus uint
const ( LibraryInstallStatusPending LibraryInstallStatus = iota + 1 LibraryInstallStatusResolving LibraryInstallStatusInstalling LibraryInstallStatusInstalled LibraryInstallStatusFailed LibraryInstallStatusUninstallOnRestart )
func (LibraryInstallStatus) MarshalText ¶
func (l LibraryInstallStatus) MarshalText() (text []byte, err error)
func (*LibraryInstallStatus) UnmarshalText ¶
func (l *LibraryInstallStatus) UnmarshalText(text []byte) error
type ListACLsInput ¶
type ListACLsInput struct {
Scope string `json:"scope"`
}
ListACLsInput describes the input required for the ListACLs operation.
func (ListACLsInput) Encode ¶
func (l ListACLsInput) Encode() string
Encode implements the Encoder interface for ListACLsInput.
func (ListACLsInput) Validate ¶
func (l ListACLsInput) Validate() error
Validate implements the Validator interface for ListACLsInput.
type ListACLsOutput ¶
type ListACLsOutput struct {
Items []ACLItem `json:"items"`
}
ListACLsOutput describes the value returned from the ListACLs operation.
type ListClusterEventsInput ¶
type ListClusterEventsInput struct { ClusterIDParam StartTime *time.Time `json:"start_time,omitempty"` EndTime *time.Time `json:"end_time,omitempty"` Order ListOrder `json:"order,omitempty"` EventTypes []ClusterEventType `json:"event_types,omitempty"` Offset int `json:"offset,omitempty"` Limit int `json:"limit,omitempty"` }
ListClusterEventsInput is the input to the ListClusterEvents operation
func (ListClusterEventsInput) MarshalJSON ¶
func (l ListClusterEventsInput) MarshalJSON() ([]byte, error)
func (ListClusterEventsInput) Validate ¶
func (l ListClusterEventsInput) Validate() error
Validate implements the Validator interface for ListClusterEventsInput.
type ListClusterEventsOutput ¶
type ListClusterEventsOutput struct { Events []ClusterEvent `json:"events"` NextPage *ListClusterEventsInput `json:"next_page,omitempty"` TotalCount int `json:"total_count"` }
ListClusterEventsOutput is the output returned from the ListClusterEvents operation
type ListClustersInput ¶
type ListClustersInput nopInput
ListClustersInput is the input to the ListClusters operation
type ListClustersOutput ¶
type ListClustersOutput struct {
Clusters []*Cluster `json:"clusters"`
}
ListClustersOutput is the output returned from the ListClusters operation
type ListGroupsInput ¶
type ListGroupsInput nopInput
ListGroupsInput describes the input required for the ListGroups operation.
type ListGroupsOutput ¶
type ListGroupsOutput struct {
GroupNames []string `json:"group_names"`
}
ListGroupsOutput describes the value returned from the ListGroups operation.
type ListJobRunsInput ¶
type ListJobRunsInput struct { ActiveOnly bool `json:"active_only"` CompletedOnly bool `json:"completed_only"` JobID int `json:"job_id"` Offset int `json:"offset"` Limit int `json:"limit"` }
func (ListJobRunsInput) Encode ¶
func (l ListJobRunsInput) Encode() string
func (ListJobRunsInput) Validate ¶
func (l ListJobRunsInput) Validate() error
type ListJobRunsOutput ¶
type ListJobsInput ¶
type ListJobsInput nopInput
type ListJobsOutput ¶
type ListJobsOutput struct {
Jobs []Job `json:"jobs"`
}
type ListLibraryStatusesInput ¶
type ListLibraryStatusesInput nopInput
type ListLibraryStatusesOutput ¶
type ListLibraryStatusesOutput struct {
Statuses []ClusterLibraryStatus `json:"statuses"`
}
type ListMembersInput ¶
type ListMembersInput struct {
Group string `json:"group_name"`
}
ListMembersInput describes the input required for the ListGroupMembers operation.
func (*ListMembersInput) Decode ¶
func (r *ListMembersInput) Decode(query string) error
Decode implements the Decoder interface for .
func (ListMembersInput) Encode ¶
func (l ListMembersInput) Encode() string
Encode implements the Encoder interface for ListGroupMembersInput.
func (ListMembersInput) Validate ¶
func (l ListMembersInput) Validate() error
Validate implements the Validator interface for ListGroupMembersInput).
type ListMembersOutput ¶
type ListMembersOutput struct {
Members []Principal `json:"members"`
}
ListMembersOutput describes the value returned from the ListGroupMembers operation.
type ListNodeTypesInput ¶
type ListNodeTypesInput nopInput
ListNodeTypesInput is the input to the ListNodeTypes operation
type ListNodeTypesOutput ¶
type ListNodeTypesOutput struct {
NodeTypes []NodeType
}
ListNodeTypesOutput is the output returned from the ListNodeTypes operation
type ListObjectsInput ¶
type ListObjectsInput struct {
PathParam
}
type ListObjectsOutput ¶
type ListObjectsOutput struct {
Objects []ObjectInfo `json:"objects"`
}
type ListOrder ¶
type ListOrder uint8
func (ListOrder) MarshalJSON ¶
func (*ListOrder) UnmarshalJSON ¶
type ListParentsInput ¶
type ListParentsInput struct {
Principal
}
ListParentsInput describes the input required for the ListGroupParents operation.
type ListParentsOutput ¶
type ListParentsOutput struct {
Groups []string `json:"group_names"`
}
ListParentsOutput describes the value returned from the ListGroupParents operation.
type ListPoolsInput ¶
type ListPoolsInput nopInput
type ListPoolsOutput ¶
type ListPoolsOutput struct {
InstancePools []InstancePoolAndStats `json:"instance_pools"`
}
type ListProfilesInput ¶
type ListProfilesInput nopInput
type ListProfilesOutput ¶
type ListProfilesOutput struct {
InstanceProfiles []string `json:"instance_profiles"`
}
type ListRuntimeVersionsInput ¶
type ListRuntimeVersionsInput nopInput
ListRuntimeVersionsInput is the input to the ListRuntimeVersions operation
type ListRuntimeVersionsOutput ¶
type ListRuntimeVersionsOutput struct {
Versions []SparkVersion `json:"versions"`
}
ListRuntimeVersionsOutput is the output returned from the ListRuntimeVersions operation
type ListScopesInput ¶
type ListScopesInput nopInput
ListScopesInput describes the input required for the ListScopes operation.
type ListScopesOutput ¶
type ListScopesOutput struct {
Scopes []Scope `json:"scopes"`
}
ListScopesOutput describes the value returned from the ListScopes operation.
type ListSecretsInput ¶
type ListSecretsInput struct {
Scope string `json:"scope"`
}
ListSecretsInput describes the input required for the ListSecrets operation.
func (ListSecretsInput) Encode ¶
func (l ListSecretsInput) Encode() string
Encode implements the Encoder interface for ListSecretsInput.
func (ListSecretsInput) Validate ¶
func (l ListSecretsInput) Validate() error
Validate implements the Validator interface for ListSecretsInput.
type ListSecretsOutput ¶
type ListSecretsOutput struct {
Secrets []SecretMetadata `json:"secrets"`
}
ListSecretsOutput describes the value returned from the ListSecrets operation.
type ListTokensInput ¶
type ListTokensInput nopInput
type ListTokensOutput ¶
type ListTokensOutput struct {
TokenInfos []TokenInfo `json:"token_infos"`
}
type ListZonesInput ¶
type ListZonesInput nopInput
ListZonesInput is the input to the ListZones operation
type ListZonesOutput ¶
type ListZonesOutput struct { Zones []string `json:"zones"` DefaultZone string `json:"default_zone"` }
ListZonesOutput is the output returned from the ListZones operation
type MavenLibrary ¶
type MavenLibrary struct { Coordinates string `json:"coordinates"` Repo string `json:"repo"` Exclusions []string `json:"exclusions"` }
func (MavenLibrary) Validate ¶
func (m MavenLibrary) Validate() error
type MoveDBFSFilepathInput ¶
type MoveDBFSFilepathInput struct { SourcePath string `json:"source_path"` DestinationPath string `json:"destination_path"` }
func (MoveDBFSFilepathInput) Validate ¶
func (m MoveDBFSFilepathInput) Validate() error
type MoveDBFSFilepathOutput ¶
type MoveDBFSFilepathOutput struct{}
type NewCluster ¶
type NewCluster struct { SparkVersion string `json:"spark_version"` NodeTypeID string `json:"node_type_id"` AWSAttributes JobsAWSAttributes `json:"aws_attributes"` NumWorkers int `json:"num_workers"` }
type NodeInstanceType ¶
type NodeType ¶
type NodeType struct { Category string `json:"category"` NodeTypeID string `json:"node_type_id"` Description string `json:"description"` NumGPUs int `json:"num_gpus"` NumCores float64 `json:"num_cores"` DisplayOrder int `json:"display_order"` InstanceTypeID string `json:"instance_type_id"` MemoryMB int `json:"memory_mb"` IsDeprecated bool `json:"is_deprecated"` SupportClusterTags bool `json:"support_cluster_tags"` IsIOCacheEnabled bool `json:"is_io_cache_enabled"` IsHidden bool `json:"is_hidden"` SupportEBSVolumes bool `json:"support_ebs_volumes"` SupportPortForwarding bool `json:"support_port_forwarding"` NodeInstanceType NodeInstanceType `json:"node_instance_type"` }
type NotebookOutput ¶
type NotebookTask ¶
type ObjectInfo ¶
type ObjectInfo struct { ObjectType ObjectType `json:"object_type"` ObjectID int `json:"object_id"` Path string `json:"path"` Language *Language `json:"language,omitempty"` }
type ObjectType ¶
type ObjectType uint
const ( ObjectTypeNotebook ObjectType = iota + 1 ObjectTypeDirectory ObjectTypeLibrary )
func (ObjectType) MarshalText ¶
func (o ObjectType) MarshalText() (text []byte, err error)
func (ObjectType) String ¶
func (o ObjectType) String() string
func (*ObjectType) UnmarshalText ¶
func (o *ObjectType) UnmarshalText(data []byte) error
type ParameterPair ¶
type ParameterPair struct { Key TerminationParameter `json:"key"` Value string `json:"value"` }
type PendingInstanceError ¶
type PinClusterInput ¶
type PinClusterInput struct {
ClusterIDParam
}
PinClusterInput is the input to the PinCluster operation
type PinClusterOutput ¶
type PinClusterOutput struct{}
PinClusterOutput is the output returned from the PinCluster operation
type PoolsService ¶
type PoolsService service
func (*PoolsService) CreatePool ¶
func (s *PoolsService) CreatePool(in *CreatePoolInput) (*CreatePoolOutput, error)
func (*PoolsService) DeletePool ¶
func (s *PoolsService) DeletePool(in *DeletePoolInput) (*DeletePoolOutput, error)
func (*PoolsService) EditPool ¶
func (s *PoolsService) EditPool(in *EditPoolInput) (*EditPoolOutput, error)
func (*PoolsService) GetPool ¶
func (s *PoolsService) GetPool(in *GetPoolInput) (*GetPoolOutput, error)
func (*PoolsService) ListPools ¶
func (s *PoolsService) ListPools(in *ListPoolsInput) (*ListPoolsOutput, error)
type Principal ¶
type Principal struct { UserName string `json:"user_name,omitempty"` GroupName string `json:"group_name,omitempty"` }
Principal identifies a principal by specifying the name of either a user or a group. One or the other must be specified, but not both.
type ProfilesService ¶
type ProfilesService service
func (*ProfilesService) DeregisterProfile ¶
func (s *ProfilesService) DeregisterProfile(in *DeregisterProfileInput) (*DeregisterProfileOutput, error)
func (*ProfilesService) ListInstanceProfiles ¶
func (s *ProfilesService) ListInstanceProfiles(in *ListProfilesInput) (*ListProfilesOutput, error)
func (*ProfilesService) RegisterProfile ¶
func (s *ProfilesService) RegisterProfile(in *RegisterProfileInput) (*RegisterProfileOutput, error)
type PythonPyLibrary ¶
func (PythonPyLibrary) Validate ¶
func (p PythonPyLibrary) Validate() error
type RCranLibrary ¶
func (RCranLibrary) Validate ¶
func (r RCranLibrary) Validate() error
type ReadDBFSFileInput ¶
func (ReadDBFSFileInput) Encode ¶
func (r ReadDBFSFileInput) Encode() string
func (ReadDBFSFileInput) Validate ¶
func (r ReadDBFSFileInput) Validate() error
type ReadDBFSFileOutput ¶
type RegisterProfileInput ¶
type RegisterProfileInput struct { InstanceProfileARNParam SkipValidation bool `json:"skip_validation"` }
type RegisterProfileOutput ¶
type RegisterProfileOutput struct{}
type RemoveMemberInput ¶
RemoveMemberInput describes the input required for the RemoveGroupMember operation.
func (RemoveMemberInput) Validate ¶
func (r RemoveMemberInput) Validate() error
Validate implements the Validator interface for RemoveGroupMemberInput).
type RemoveMemberOutput ¶
type RemoveMemberOutput struct{}
RemoveMemberOutput describes the value returned from the RemoveGroupMember operation.
type ResetJobInput ¶
type ResetJobInput struct { JobIDParam NewSettings JobSettings `json:"new_settings"` }
func (ResetJobInput) Validate ¶
func (r ResetJobInput) Validate() error
type ResetJobOutput ¶
type ResetJobOutput struct{}
type ResizeCause ¶
type ResizeCause uint8
const ( ResizeCauseAutoscale ResizeCause = iota + 1 ResizeCauseUserRequest ResizeCauseAutorecovery )
type ResizeClusterInput ¶
type ResizeClusterInput struct { NumWorkers int `json:"num_workers,omitempty"` Autoscale *Autoscale `json:"autoscale,omitempty"` ClusterIDParam }
func (ResizeClusterInput) Validate ¶
func (r ResizeClusterInput) Validate() error
type ResizeClusterOutput ¶
type ResizeClusterOutput struct{}
type RestartClusterInput ¶
type RestartClusterInput struct {
ClusterIDParam
}
type RestartClusterOutput ¶
type RestartClusterOutput struct{}
type RevokeTokenInput ¶
type RevokeTokenInput struct {
TokenID string `json:"token_id"`
}
func (RevokeTokenInput) Validate ¶
func (r RevokeTokenInput) Validate() error
type RevokeTokenOutput ¶
type RevokeTokenOutput struct{}
type Run ¶
type Run struct { JobID int `json:"job_id"` RunID int `json:"run_id"` RunName string `json:"run_name,omitempty"` CreatorUserName string `json:"creator_user_name"` NumberInJob int `json:"number_in_job"` OriginalAttemptRunID int `json:"original_attempt_run_id"` State RunState `json:"state"` Schedule Schedule `json:"schedule"` Task JobTask `json:"task"` ClusterSpec ClusterSpec `json:"cluster_spec"` ClusterInstance ClusterInstance `json:"cluster_instance"` OverridingParameters RunParameters `json:"overriding_parameters"` StartTime time.Time `json:"start_time"` SetupDuration int `json:"setup_duration"` ExecutionDuration int `json:"execution_duration"` CleanupDuration int `json:"cleanup_duration"` Trigger TriggerType `json:"trigger"` }
func (Run) MarshalJSON ¶
func (*Run) UnmarshalJSON ¶
type RunIDParam ¶
type RunIDParam struct {
RunID int `json:"run_id"`
}
func (RunIDParam) Encode ¶
func (r RunIDParam) Encode() string
func (RunIDParam) Validate ¶
func (r RunIDParam) Validate() error
type RunJobInput ¶
type RunJobInput struct { JobIDParam JarParams []string `json:"jar_params,omitempty"` NotebookParams map[string]string `json:"notebook_params,omitempty"` PythonParams []string `json:"python_params,omitempty"` SparkSubmitParams []string `json:"spark_submit_params,omitempty"` }
func (RunJobInput) Validate ¶
func (r RunJobInput) Validate() error
type RunJobOutput ¶
type RunLifecycleState ¶
type RunLifecycleState uint8
const ( RunLifecycleStatePending RunLifecycleState = iota + 1 RunLifecycleStateRunning RunLifecycleStateTerminating RunLifecycleStateTerminated RunLifecycleStateSkipped RunLifecycleStateInternalError )
type RunParameters ¶
type RunResultState ¶
type RunResultState uint8
const ( RunResultStateSuccess RunResultState = iota + 1 RunResultStateFailed RunResultStateTimedout RunResultStateCanceled )
type RunState ¶
type RunState struct { LifecycleState RunLifecycleState `json:"lifecycle_state"` ResultState RunResultState `json:"result_state,omitempty"` StateMessage string `json:"state_message"` }
func (RunState) IsCompleted ¶
type S3StorageInfo ¶
type S3StorageInfo struct { // S3 destination, e.g. s3://my-bucket/some-prefix. You must set cluster an // IAM role and the role must have write access to the destination. You // cannot use AWS keys. Destination string `json:"destination"` // S3 region, e.g. us-west-2. Either region or endpoint must be set. If both // are set, endpoint is used. Region string `json:"region,omitempty"` // S3 endpoint, e.g. https://s3-us-west-2.amazonaws.com. Either region or // endpoint needs to be set. If both are set, endpoint is used. Endpoint string `json:"endpoint,omitempty"` // (Optional) Enable server side encryption, false by default. EnableEncryption bool `json:"enable_encryption,omitempty"` // (Optional) The encryption type, it could be sse-s3 or sse-kms. It is used // only when encryption is enabled and the default type is sse-s3. EncryptionType string `json:"encryption_type,omitempty"` // (Optional) KMS key used if encryption is enabled and encryption type is // set to sse-kms. KMSKey string `json:"kms_key,omitempty"` // (Optional) Set canned access control list, e.g. // bucket-owner-full-control. If canned_cal is set, the cluster IAM role // must have s3:PutObjectAcl permission on the destination bucket and // prefix. The full list of possible canned ACL can be found at // https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. // By default only the object owner gets full control. If you are using // cross account role for writing data, you may want to set // bucket-owner-full-control to make bucket owner able to read the logs. CannedACL string `json:"canned_acl,omitempty"` }
S3StorageInfo describes where and how to store information in S3
type SecretMetadata ¶
type SecretMetadata struct { Key string `json:"key"` LastUpdatedTimestamp time.Time `json:"last_updated_timestamp"` }
SecretMetadata is information about a secret, without the actual secret value
func (SecretMetadata) MarshalJSON ¶
func (s SecretMetadata) MarshalJSON() ([]byte, error)
MarshalJSON implements json.Marshaler for SecretMetadata
func (*SecretMetadata) UnmarshalJSON ¶
func (s *SecretMetadata) UnmarshalJSON(data []byte) error
UnmarshalJSON implements json.Unmarshaler for SecretMetadata
type SecretsService ¶
type SecretsService service
SecretsService provides client behavior for the Databricks Secrets API
func (*SecretsService) CreateACL ¶
func (s *SecretsService) CreateACL(in *CreateACLInput) (*CreateACLOutput, error)
CreateACL sends a create secret acl request to the Databricks Groups API using the provided CreateSecretACLInput. It returns a CreateSecretACLOutput on success, or an error otherwise.
func (*SecretsService) CreateScope ¶
func (s *SecretsService) CreateScope(in *CreateScopeInput) (*CreateScopeOutput, error)
CreateScope sends a create secret scope request to the Databricks Groups API using the provided CreateSecretScopeInput. It returns a CreateSecretScopeOutput on success, or an error otherwise.
func (*SecretsService) CreateSecret ¶
func (s *SecretsService) CreateSecret(in *CreateSecretInput) (*CreateSecretOutput, error)
CreateSecret sends a create secret request to the Databricks Groups API using the provided CreateSecretInput. It returns a CreateSecretOutput on success, or an error otherwise.
func (*SecretsService) DeleteACL ¶
func (s *SecretsService) DeleteACL(in *DeleteACLInput) (*DeleteACLOutput, error)
DeleteACL sends a delete secret acl request to the Databricks Groups API using the provided DeleteSecretACLInput. It returns a DeleteSecretACLOutput on success, or an error otherwise.
func (*SecretsService) DeleteScope ¶
func (s *SecretsService) DeleteScope(in *DeleteScopeInput) (*DeleteScopeOutput, error)
DeleteScope sends a delete secret scope request to the Databricks Groups API using the provided DeleteSecretScopeInput. It returns a DeleteSecretScopeOutput on success, or an error otherwise.
func (*SecretsService) DeleteSecret ¶
func (s *SecretsService) DeleteSecret(in *DeleteSecretInput) (*DeleteSecretOutput, error)
DeleteSecret sends a delete secret request to the Databricks Groups API using the provided DeleteSecretInput. It returns a DeleteSecretOutput on success, or an error otherwise.
func (*SecretsService) GetACL ¶
func (s *SecretsService) GetACL(in *GetACLInput) (*GetACLOutput, error)
GetACL sends a get secret acl request to the Databricks Groups API using the provided GetSecretACLInput. It returns a GetSecretACLOutput on success, or an error otherwise.
func (*SecretsService) ListACLs ¶
func (s *SecretsService) ListACLs(in *ListACLsInput) (*ListACLsOutput, error)
ListACLs sends a list secret acls request to the Databricks Groups API using the provided ListSecretACLsInput. It returns a ListSecretACLsOutput on success, or an error otherwise.
func (*SecretsService) ListScopes ¶
func (s *SecretsService) ListScopes(in *ListScopesInput) (*ListScopesOutput, error)
ListScopes sends a list secret scopes request to the Databricks Groups API using the provided ListSecretScopesInput. It returns a ListSecretScopesOutput on success, or an error otherwise.
func (*SecretsService) ListSecrets ¶
func (s *SecretsService) ListSecrets(in *ListSecretsInput) (*ListSecretsOutput, error)
ListSecrets sends a list secrets request to the Databricks Groups API using the provided ListSecretsInput. It returns a ListSecretsOutput on success, or an error otherwise.
type SparkConf ¶
type SparkConf map[string]interface{}
SparkConf aliases map[string]interface{} and holds Spark configuration key-value pairs.
type SparkJarTask ¶
type SparkNode ¶
type SparkNode struct { PrivateIP net.IP `json:"private_ip"` PublicDNS string `json:"public_dns"` NodeID string `json:"node_id"` InstanceID string `json:"instance_id"` StartTimestamp time.Time `json:"start_timestamp"` NodeAwsAttributes SparkNodeAwsAttributes `json:"node_aws_attributes"` HostPrivateIP net.IP `json:"host_private_ip"` }
func (SparkNode) MarshalJSON ¶
func (*SparkNode) UnmarshalJSON ¶
type SparkNodeAwsAttributes ¶
type SparkNodeAwsAttributes struct {
IsSpot bool `json:"is_spot"`
}
type SparkPythonTask ¶
type SparkSubmitTask ¶
type SparkSubmitTask struct {
Parameters []string `json:"parameters,omitempty"`
}
type SparkVersion ¶
type StartClusterInput ¶
type StartClusterInput struct {
ClusterIDParam
}
type StartClusterOutput ¶
type StartClusterOutput struct{}
type SubmitJobRunInput ¶
type SubmitJobRunInput struct { ExistingClusterID string `json:"existing_cluster_id,omitempty"` NewCluster *NewCluster `json:"new_cluster,omitempty"` NotebookTask *NotebookTask `json:"notebook_task,omitempty"` SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` RunName string `json:"run_name,omitempty"` Libraries []Library `json:"libraries,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` IdempotencyToken string `json:"idempotency_token,omitempty"` }
func (SubmitJobRunInput) Validate ¶
func (s SubmitJobRunInput) Validate() error
type SubmitJobRunOutput ¶
type SubmitJobRunOutput struct {
RunID int `json:"run_id"`
}
type TerminateClusterInput ¶
type TerminateClusterInput struct {
ClusterIDParam
}
type TerminateClusterOutput ¶
type TerminateClusterOutput struct{}
type TerminationCode ¶
type TerminationCode uint8
const ( TerminationCodeUserRequest TerminationCode = iota + 1 TerminationCodeJobFinished TerminationCodeInactivity TerminationCodeCloudProviderShutdown TerminationCodeCommunicationLost TerminationCodeCloudProviderLaunchFailure TerminationCodeSparkStartupFailure TerminationCodeInvalidArgument TerminationCodeUnexpectedLaunchFailure TerminationCodeInternalError TerminationCodeSparkError TerminationCodeMetastoreComponentUnhealthy TerminationCodeDBFSComponentUnhealthy TerminationCodeDriverUnreachable TerminationCodeDriverUnresponsive TerminationCodeInstanceUnreachable TerminationCodeContainerLaunchFailure TerminationCodeInstancePoolClusterFailure TerminationCodeRequestRejected TerminationCodeInitScriptFailure TerminationCodeTrialExpired )
type TerminationParameter ¶
type TerminationParameter string
const ( TerminationParameterUsername TerminationParameter = "username" TerminationParameterAWSApiErrorCode TerminationParameter = "aws_api_error_code" TerminationParameterAWSInstanceStateReason TerminationParameter = "aws_instance_state_reason" TerminationParameterAWSSpotRequestStatus TerminationParameter = "aws_spot_request_status" TerminationParameterAWSSpotRequestFaultCode TerminationParameter = "aws_spot_request_fault_code" TerminationParameterAWSImpairedStatusDetails TerminationParameter = "aws_impaired_status_details" TerminationParameterAWSInstanceStatusEvent TerminationParameter = "aws_instance_status_event" TerminationParameterAWSErrorMessage TerminationParameter = "aws_error_message" TerminationParameterDatabricksErrorMessage TerminationParameter = "databricks_error_message" TerminationParameterInactivityDurationMin TerminationParameter = "inactivity_duration_min" TerminationParameterInstanceID TerminationParameter = "instance_id" TerminationParameterInstancePoolID TerminationParameter = "instance_pool_id" TerminationParameterInstancePoolErrorCode TerminationParameter = "instance_pool_error_code" )
type TerminationReason ¶
type TerminationReason struct { Code TerminationCode `json:"code"` Parameters []ParameterPair `json:"parameters"` }
type TokenInfo ¶
type TokenInfo struct { TokenID string `json:"token_id"` CreationTime time.Time `json:"creation_time"` ExpiryTime *time.Time `json:"expiry_time"` Comment string `json:"comment"` }
func (TokenInfo) MarshalJSON ¶
MarshalJSON implements json.Marshaler for SecretMetadata
func (*TokenInfo) UnmarshalJSON ¶
UnmarshalJSON implements json.Unmarshaler for TokenInfo
type TokensService ¶
type TokensService service
func (*TokensService) CreateToken ¶
func (s *TokensService) CreateToken(in *CreateTokenInput) (*CreateTokenOutput, error)
func (*TokensService) ListTokens ¶
func (s *TokensService) ListTokens(in *ListTokensInput) (*ListTokensOutput, error)
func (*TokensService) RevokeToken ¶
func (s *TokensService) RevokeToken(in *RevokeTokenInput) (*RevokeTokenOutput, error)
type TriggerType ¶
type TriggerType uint8
const ( TriggerTypePeriodic TriggerType = iota + 1 TriggerTypeOneTime TriggerTypeRetry )
type UninstallLibraryInput ¶
type UninstallLibraryInput struct {
ClusterLibrariesParam
}
type UninstallLibraryOutput ¶
type UninstallLibraryOutput struct{}
type UnpinClusterInput ¶
type UnpinClusterInput struct {
ClusterIDParam
}
UnpinClusterInput is the input to the UnpinCluster operation
type UnpinClusterOutput ¶
type UnpinClusterOutput struct{}
UnpinClusterOutput is the output returned from the UnpinCluster operation
type Validator ¶
type Validator interface {
Validate() error
}
Validator inspects the receiver to determine if it's attributes are within known bounds, and returns an error describing a violation.
type ViewsToExport ¶
type ViewsToExport uint8
const ( ViewsToExportCode ViewsToExport = iota ViewsToExportDashboards ViewsToExportAll )
func (ViewsToExport) MarshalText ¶
func (v ViewsToExport) MarshalText() (text []byte, err error)
func (ViewsToExport) String ¶
func (v ViewsToExport) String() string
func (*ViewsToExport) UnmarshalText ¶
func (v *ViewsToExport) UnmarshalText(text []byte) error
type WorkspacesService ¶
type WorkspacesService service
func (*WorkspacesService) CreateDirectory ¶
func (s *WorkspacesService) CreateDirectory(in *CreateDirectoryInput) (*CreateDirectoryOutput, error)
func (*WorkspacesService) DeleteNode ¶
func (s *WorkspacesService) DeleteNode(in *DeleteNodeInput) (*DeleteNodeOutput, error)
func (*WorkspacesService) DescribeNode ¶
func (s *WorkspacesService) DescribeNode(in *DescribeNodeInput) (*DescribeNodeOutput, error)
func (*WorkspacesService) ExportNode ¶
func (s *WorkspacesService) ExportNode(in *ExportNodeInput) (*ExportNodeOutput, error)
func (*WorkspacesService) ImportNode ¶
func (s *WorkspacesService) ImportNode(in *ImportNodeInput) (*ImportNodeOutput, error)
func (*WorkspacesService) ListObjects ¶
func (s *WorkspacesService) ListObjects(in *ListObjectsInput) (*ListObjectsOutput, error)
type WriteDBFSFileInput ¶
type WriteDBFSFileOutput ¶
type WriteDBFSFileOutput struct{}
Source Files ¶
- client.go
- clusters_api.go
- clusters_client.go
- clusters_models.go
- dbfs_api.go
- dbfs_client.go
- dbfs_models.go
- groups_api.go
- groups_client.go
- groups_models.go
- jobs_api.go
- jobs_client.go
- jobs_models.go
- libraries_api.go
- libraries_client.go
- libraries_models.go
- pools_api.go
- pools_client.go
- pools_models.go
- profiles_api.go
- profiles_client.go
- profiles_models.go
- secrets_api.go
- secrets_client.go
- secrets_models.go
- tokens_api.go
- tokens_client.go
- tokens_models.go
- workspaces_api.go
- workspaces_client.go
- workspaces_models.go