Documentation ¶
Overview ¶
Package dataproc is a generated protocol buffer package.
It is generated from these files:
google/cloud/dataproc/v1beta2/clusters.proto google/cloud/dataproc/v1beta2/jobs.proto google/cloud/dataproc/v1beta2/operations.proto google/cloud/dataproc/v1beta2/workflow_templates.proto
It has these top-level messages:
Cluster ClusterConfig GceClusterConfig InstanceGroupConfig ManagedGroupConfig AcceleratorConfig DiskConfig LifecycleConfig NodeInitializationAction ClusterStatus SoftwareConfig ClusterMetrics CreateClusterRequest UpdateClusterRequest DeleteClusterRequest GetClusterRequest ListClustersRequest ListClustersResponse DiagnoseClusterRequest DiagnoseClusterResults LoggingConfig HadoopJob SparkJob PySparkJob QueryList HiveJob SparkSqlJob PigJob JobPlacement JobStatus JobReference YarnApplication Job JobScheduling SubmitJobRequest GetJobRequest ListJobsRequest UpdateJobRequest ListJobsResponse CancelJobRequest DeleteJobRequest ClusterOperationStatus ClusterOperationMetadata WorkflowTemplate WorkflowTemplatePlacement ManagedCluster ClusterSelector OrderedJob WorkflowMetadata ClusterOperation WorkflowGraph WorkflowNode CreateWorkflowTemplateRequest GetWorkflowTemplateRequest InstantiateWorkflowTemplateRequest UpdateWorkflowTemplateRequest ListWorkflowTemplatesRequest ListWorkflowTemplatesResponse DeleteWorkflowTemplateRequest
Index ¶
- Variables
- func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)
- func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)
- func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer)
- type AcceleratorConfig
- type CancelJobRequest
- func (*CancelJobRequest) Descriptor() ([]byte, []int)
- func (m *CancelJobRequest) GetJobId() string
- func (m *CancelJobRequest) GetProjectId() string
- func (m *CancelJobRequest) GetRegion() string
- func (*CancelJobRequest) ProtoMessage()
- func (m *CancelJobRequest) Reset()
- func (m *CancelJobRequest) String() string
- type Cluster
- func (*Cluster) Descriptor() ([]byte, []int)
- func (m *Cluster) GetClusterName() string
- func (m *Cluster) GetClusterUuid() string
- func (m *Cluster) GetConfig() *ClusterConfig
- func (m *Cluster) GetLabels() map[string]string
- func (m *Cluster) GetMetrics() *ClusterMetrics
- func (m *Cluster) GetProjectId() string
- func (m *Cluster) GetStatus() *ClusterStatus
- func (m *Cluster) GetStatusHistory() []*ClusterStatus
- func (*Cluster) ProtoMessage()
- func (m *Cluster) Reset()
- func (m *Cluster) String() string
- type ClusterConfig
- func (*ClusterConfig) Descriptor() ([]byte, []int)
- func (m *ClusterConfig) GetConfigBucket() string
- func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig
- func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction
- func (m *ClusterConfig) GetLifecycleConfig() *LifecycleConfig
- func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig
- func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig
- func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig
- func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig
- func (*ClusterConfig) ProtoMessage()
- func (m *ClusterConfig) Reset()
- func (m *ClusterConfig) String() string
- type ClusterControllerClient
- type ClusterControllerServer
- type ClusterMetrics
- type ClusterOperation
- func (*ClusterOperation) Descriptor() ([]byte, []int)
- func (m *ClusterOperation) GetDone() bool
- func (m *ClusterOperation) GetError() string
- func (m *ClusterOperation) GetOperationId() string
- func (*ClusterOperation) ProtoMessage()
- func (m *ClusterOperation) Reset()
- func (m *ClusterOperation) String() string
- type ClusterOperationMetadata
- func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)
- func (m *ClusterOperationMetadata) GetClusterName() string
- func (m *ClusterOperationMetadata) GetClusterUuid() string
- func (m *ClusterOperationMetadata) GetDescription() string
- func (m *ClusterOperationMetadata) GetLabels() map[string]string
- func (m *ClusterOperationMetadata) GetOperationType() string
- func (m *ClusterOperationMetadata) GetStatus() *ClusterOperationStatus
- func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus
- func (m *ClusterOperationMetadata) GetWarnings() []string
- func (*ClusterOperationMetadata) ProtoMessage()
- func (m *ClusterOperationMetadata) Reset()
- func (m *ClusterOperationMetadata) String() string
- type ClusterOperationStatus
- func (*ClusterOperationStatus) Descriptor() ([]byte, []int)
- func (m *ClusterOperationStatus) GetDetails() string
- func (m *ClusterOperationStatus) GetInnerState() string
- func (m *ClusterOperationStatus) GetState() ClusterOperationStatus_State
- func (m *ClusterOperationStatus) GetStateStartTime() *google_protobuf5.Timestamp
- func (*ClusterOperationStatus) ProtoMessage()
- func (m *ClusterOperationStatus) Reset()
- func (m *ClusterOperationStatus) String() string
- type ClusterOperationStatus_State
- type ClusterSelector
- type ClusterStatus
- func (*ClusterStatus) Descriptor() ([]byte, []int)
- func (m *ClusterStatus) GetDetail() string
- func (m *ClusterStatus) GetState() ClusterStatus_State
- func (m *ClusterStatus) GetStateStartTime() *google_protobuf5.Timestamp
- func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate
- func (*ClusterStatus) ProtoMessage()
- func (m *ClusterStatus) Reset()
- func (m *ClusterStatus) String() string
- type ClusterStatus_State
- type ClusterStatus_Substate
- type CreateClusterRequest
- func (*CreateClusterRequest) Descriptor() ([]byte, []int)
- func (m *CreateClusterRequest) GetCluster() *Cluster
- func (m *CreateClusterRequest) GetProjectId() string
- func (m *CreateClusterRequest) GetRegion() string
- func (*CreateClusterRequest) ProtoMessage()
- func (m *CreateClusterRequest) Reset()
- func (m *CreateClusterRequest) String() string
- type CreateWorkflowTemplateRequest
- func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
- func (m *CreateWorkflowTemplateRequest) GetParent() string
- func (m *CreateWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate
- func (*CreateWorkflowTemplateRequest) ProtoMessage()
- func (m *CreateWorkflowTemplateRequest) Reset()
- func (m *CreateWorkflowTemplateRequest) String() string
- type DeleteClusterRequest
- func (*DeleteClusterRequest) Descriptor() ([]byte, []int)
- func (m *DeleteClusterRequest) GetClusterName() string
- func (m *DeleteClusterRequest) GetClusterUuid() string
- func (m *DeleteClusterRequest) GetProjectId() string
- func (m *DeleteClusterRequest) GetRegion() string
- func (*DeleteClusterRequest) ProtoMessage()
- func (m *DeleteClusterRequest) Reset()
- func (m *DeleteClusterRequest) String() string
- type DeleteJobRequest
- func (*DeleteJobRequest) Descriptor() ([]byte, []int)
- func (m *DeleteJobRequest) GetJobId() string
- func (m *DeleteJobRequest) GetProjectId() string
- func (m *DeleteJobRequest) GetRegion() string
- func (*DeleteJobRequest) ProtoMessage()
- func (m *DeleteJobRequest) Reset()
- func (m *DeleteJobRequest) String() string
- type DeleteWorkflowTemplateRequest
- func (*DeleteWorkflowTemplateRequest) Descriptor() ([]byte, []int)
- func (m *DeleteWorkflowTemplateRequest) GetName() string
- func (m *DeleteWorkflowTemplateRequest) GetVersion() int32
- func (*DeleteWorkflowTemplateRequest) ProtoMessage()
- func (m *DeleteWorkflowTemplateRequest) Reset()
- func (m *DeleteWorkflowTemplateRequest) String() string
- type DiagnoseClusterRequest
- func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)
- func (m *DiagnoseClusterRequest) GetClusterName() string
- func (m *DiagnoseClusterRequest) GetProjectId() string
- func (m *DiagnoseClusterRequest) GetRegion() string
- func (*DiagnoseClusterRequest) ProtoMessage()
- func (m *DiagnoseClusterRequest) Reset()
- func (m *DiagnoseClusterRequest) String() string
- type DiagnoseClusterResults
- type DiskConfig
- type GceClusterConfig
- func (*GceClusterConfig) Descriptor() ([]byte, []int)
- func (m *GceClusterConfig) GetInternalIpOnly() bool
- func (m *GceClusterConfig) GetMetadata() map[string]string
- func (m *GceClusterConfig) GetNetworkUri() string
- func (m *GceClusterConfig) GetServiceAccount() string
- func (m *GceClusterConfig) GetServiceAccountScopes() []string
- func (m *GceClusterConfig) GetSubnetworkUri() string
- func (m *GceClusterConfig) GetTags() []string
- func (m *GceClusterConfig) GetZoneUri() string
- func (*GceClusterConfig) ProtoMessage()
- func (m *GceClusterConfig) Reset()
- func (m *GceClusterConfig) String() string
- type GetClusterRequest
- func (*GetClusterRequest) Descriptor() ([]byte, []int)
- func (m *GetClusterRequest) GetClusterName() string
- func (m *GetClusterRequest) GetProjectId() string
- func (m *GetClusterRequest) GetRegion() string
- func (*GetClusterRequest) ProtoMessage()
- func (m *GetClusterRequest) Reset()
- func (m *GetClusterRequest) String() string
- type GetJobRequest
- type GetWorkflowTemplateRequest
- func (*GetWorkflowTemplateRequest) Descriptor() ([]byte, []int)
- func (m *GetWorkflowTemplateRequest) GetName() string
- func (m *GetWorkflowTemplateRequest) GetVersion() int32
- func (*GetWorkflowTemplateRequest) ProtoMessage()
- func (m *GetWorkflowTemplateRequest) Reset()
- func (m *GetWorkflowTemplateRequest) String() string
- type HadoopJob
- func (*HadoopJob) Descriptor() ([]byte, []int)
- func (m *HadoopJob) GetArchiveUris() []string
- func (m *HadoopJob) GetArgs() []string
- func (m *HadoopJob) GetDriver() isHadoopJob_Driver
- func (m *HadoopJob) GetFileUris() []string
- func (m *HadoopJob) GetJarFileUris() []string
- func (m *HadoopJob) GetLoggingConfig() *LoggingConfig
- func (m *HadoopJob) GetMainClass() string
- func (m *HadoopJob) GetMainJarFileUri() string
- func (m *HadoopJob) GetProperties() map[string]string
- func (*HadoopJob) ProtoMessage()
- func (m *HadoopJob) Reset()
- func (m *HadoopJob) String() string
- func (*HadoopJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type HadoopJob_MainClass
- type HadoopJob_MainJarFileUri
- type HiveJob
- func (*HiveJob) Descriptor() ([]byte, []int)
- func (m *HiveJob) GetContinueOnFailure() bool
- func (m *HiveJob) GetJarFileUris() []string
- func (m *HiveJob) GetProperties() map[string]string
- func (m *HiveJob) GetQueries() isHiveJob_Queries
- func (m *HiveJob) GetQueryFileUri() string
- func (m *HiveJob) GetQueryList() *QueryList
- func (m *HiveJob) GetScriptVariables() map[string]string
- func (*HiveJob) ProtoMessage()
- func (m *HiveJob) Reset()
- func (m *HiveJob) String() string
- func (*HiveJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type HiveJob_QueryFileUri
- type HiveJob_QueryList
- type InstanceGroupConfig
- func (*InstanceGroupConfig) Descriptor() ([]byte, []int)
- func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig
- func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig
- func (m *InstanceGroupConfig) GetImageUri() string
- func (m *InstanceGroupConfig) GetInstanceNames() []string
- func (m *InstanceGroupConfig) GetIsPreemptible() bool
- func (m *InstanceGroupConfig) GetMachineTypeUri() string
- func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig
- func (m *InstanceGroupConfig) GetNumInstances() int32
- func (*InstanceGroupConfig) ProtoMessage()
- func (m *InstanceGroupConfig) Reset()
- func (m *InstanceGroupConfig) String() string
- type InstantiateWorkflowTemplateRequest
- func (*InstantiateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
- func (m *InstantiateWorkflowTemplateRequest) GetInstanceId() string
- func (m *InstantiateWorkflowTemplateRequest) GetName() string
- func (m *InstantiateWorkflowTemplateRequest) GetVersion() int32
- func (*InstantiateWorkflowTemplateRequest) ProtoMessage()
- func (m *InstantiateWorkflowTemplateRequest) Reset()
- func (m *InstantiateWorkflowTemplateRequest) String() string
- type Job
- func (*Job) Descriptor() ([]byte, []int)
- func (m *Job) GetDriverControlFilesUri() string
- func (m *Job) GetDriverOutputResourceUri() string
- func (m *Job) GetHadoopJob() *HadoopJob
- func (m *Job) GetHiveJob() *HiveJob
- func (m *Job) GetLabels() map[string]string
- func (m *Job) GetPigJob() *PigJob
- func (m *Job) GetPlacement() *JobPlacement
- func (m *Job) GetPysparkJob() *PySparkJob
- func (m *Job) GetReference() *JobReference
- func (m *Job) GetScheduling() *JobScheduling
- func (m *Job) GetSparkJob() *SparkJob
- func (m *Job) GetSparkSqlJob() *SparkSqlJob
- func (m *Job) GetStatus() *JobStatus
- func (m *Job) GetStatusHistory() []*JobStatus
- func (m *Job) GetTypeJob() isJob_TypeJob
- func (m *Job) GetYarnApplications() []*YarnApplication
- func (*Job) ProtoMessage()
- func (m *Job) Reset()
- func (m *Job) String() string
- func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type JobControllerClient
- type JobControllerServer
- type JobPlacement
- type JobReference
- type JobScheduling
- type JobStatus
- func (*JobStatus) Descriptor() ([]byte, []int)
- func (m *JobStatus) GetDetails() string
- func (m *JobStatus) GetState() JobStatus_State
- func (m *JobStatus) GetStateStartTime() *google_protobuf5.Timestamp
- func (m *JobStatus) GetSubstate() JobStatus_Substate
- func (*JobStatus) ProtoMessage()
- func (m *JobStatus) Reset()
- func (m *JobStatus) String() string
- type JobStatus_State
- type JobStatus_Substate
- type Job_HadoopJob
- type Job_HiveJob
- type Job_PigJob
- type Job_PysparkJob
- type Job_SparkJob
- type Job_SparkSqlJob
- type LifecycleConfig
- func (*LifecycleConfig) Descriptor() ([]byte, []int)
- func (m *LifecycleConfig) GetAutoDeleteTime() *google_protobuf5.Timestamp
- func (m *LifecycleConfig) GetAutoDeleteTtl() *google_protobuf3.Duration
- func (m *LifecycleConfig) GetIdleDeleteTtl() *google_protobuf3.Duration
- func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl
- func (*LifecycleConfig) ProtoMessage()
- func (m *LifecycleConfig) Reset()
- func (m *LifecycleConfig) String() string
- func (*LifecycleConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type LifecycleConfig_AutoDeleteTime
- type LifecycleConfig_AutoDeleteTtl
- type ListClustersRequest
- func (*ListClustersRequest) Descriptor() ([]byte, []int)
- func (m *ListClustersRequest) GetFilter() string
- func (m *ListClustersRequest) GetPageSize() int32
- func (m *ListClustersRequest) GetPageToken() string
- func (m *ListClustersRequest) GetProjectId() string
- func (m *ListClustersRequest) GetRegion() string
- func (*ListClustersRequest) ProtoMessage()
- func (m *ListClustersRequest) Reset()
- func (m *ListClustersRequest) String() string
- type ListClustersResponse
- func (*ListClustersResponse) Descriptor() ([]byte, []int)
- func (m *ListClustersResponse) GetClusters() []*Cluster
- func (m *ListClustersResponse) GetNextPageToken() string
- func (*ListClustersResponse) ProtoMessage()
- func (m *ListClustersResponse) Reset()
- func (m *ListClustersResponse) String() string
- type ListJobsRequest
- func (*ListJobsRequest) Descriptor() ([]byte, []int)
- func (m *ListJobsRequest) GetClusterName() string
- func (m *ListJobsRequest) GetFilter() string
- func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher
- func (m *ListJobsRequest) GetPageSize() int32
- func (m *ListJobsRequest) GetPageToken() string
- func (m *ListJobsRequest) GetProjectId() string
- func (m *ListJobsRequest) GetRegion() string
- func (*ListJobsRequest) ProtoMessage()
- func (m *ListJobsRequest) Reset()
- func (m *ListJobsRequest) String() string
- type ListJobsRequest_JobStateMatcher
- type ListJobsResponse
- type ListWorkflowTemplatesRequest
- func (*ListWorkflowTemplatesRequest) Descriptor() ([]byte, []int)
- func (m *ListWorkflowTemplatesRequest) GetPageSize() int32
- func (m *ListWorkflowTemplatesRequest) GetPageToken() string
- func (m *ListWorkflowTemplatesRequest) GetParent() string
- func (*ListWorkflowTemplatesRequest) ProtoMessage()
- func (m *ListWorkflowTemplatesRequest) Reset()
- func (m *ListWorkflowTemplatesRequest) String() string
- type ListWorkflowTemplatesResponse
- func (*ListWorkflowTemplatesResponse) Descriptor() ([]byte, []int)
- func (m *ListWorkflowTemplatesResponse) GetNextPageToken() string
- func (m *ListWorkflowTemplatesResponse) GetTemplates() []*WorkflowTemplate
- func (*ListWorkflowTemplatesResponse) ProtoMessage()
- func (m *ListWorkflowTemplatesResponse) Reset()
- func (m *ListWorkflowTemplatesResponse) String() string
- type LoggingConfig
- type LoggingConfig_Level
- type ManagedCluster
- func (*ManagedCluster) Descriptor() ([]byte, []int)
- func (m *ManagedCluster) GetClusterName() string
- func (m *ManagedCluster) GetConfig() *ClusterConfig
- func (m *ManagedCluster) GetLabels() map[string]string
- func (*ManagedCluster) ProtoMessage()
- func (m *ManagedCluster) Reset()
- func (m *ManagedCluster) String() string
- type ManagedGroupConfig
- func (*ManagedGroupConfig) Descriptor() ([]byte, []int)
- func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string
- func (m *ManagedGroupConfig) GetInstanceTemplateName() string
- func (*ManagedGroupConfig) ProtoMessage()
- func (m *ManagedGroupConfig) Reset()
- func (m *ManagedGroupConfig) String() string
- type NodeInitializationAction
- func (*NodeInitializationAction) Descriptor() ([]byte, []int)
- func (m *NodeInitializationAction) GetExecutableFile() string
- func (m *NodeInitializationAction) GetExecutionTimeout() *google_protobuf3.Duration
- func (*NodeInitializationAction) ProtoMessage()
- func (m *NodeInitializationAction) Reset()
- func (m *NodeInitializationAction) String() string
- type OrderedJob
- func (*OrderedJob) Descriptor() ([]byte, []int)
- func (m *OrderedJob) GetHadoopJob() *HadoopJob
- func (m *OrderedJob) GetHiveJob() *HiveJob
- func (m *OrderedJob) GetJobType() isOrderedJob_JobType
- func (m *OrderedJob) GetLabels() map[string]string
- func (m *OrderedJob) GetPigJob() *PigJob
- func (m *OrderedJob) GetPrerequisiteStepIds() []string
- func (m *OrderedJob) GetPysparkJob() *PySparkJob
- func (m *OrderedJob) GetScheduling() *JobScheduling
- func (m *OrderedJob) GetSparkJob() *SparkJob
- func (m *OrderedJob) GetSparkSqlJob() *SparkSqlJob
- func (m *OrderedJob) GetStepId() string
- func (*OrderedJob) ProtoMessage()
- func (m *OrderedJob) Reset()
- func (m *OrderedJob) String() string
- func (*OrderedJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type OrderedJob_HadoopJob
- type OrderedJob_HiveJob
- type OrderedJob_PigJob
- type OrderedJob_PysparkJob
- type OrderedJob_SparkJob
- type OrderedJob_SparkSqlJob
- type PigJob
- func (*PigJob) Descriptor() ([]byte, []int)
- func (m *PigJob) GetContinueOnFailure() bool
- func (m *PigJob) GetJarFileUris() []string
- func (m *PigJob) GetLoggingConfig() *LoggingConfig
- func (m *PigJob) GetProperties() map[string]string
- func (m *PigJob) GetQueries() isPigJob_Queries
- func (m *PigJob) GetQueryFileUri() string
- func (m *PigJob) GetQueryList() *QueryList
- func (m *PigJob) GetScriptVariables() map[string]string
- func (*PigJob) ProtoMessage()
- func (m *PigJob) Reset()
- func (m *PigJob) String() string
- func (*PigJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type PigJob_QueryFileUri
- type PigJob_QueryList
- type PySparkJob
- func (*PySparkJob) Descriptor() ([]byte, []int)
- func (m *PySparkJob) GetArchiveUris() []string
- func (m *PySparkJob) GetArgs() []string
- func (m *PySparkJob) GetFileUris() []string
- func (m *PySparkJob) GetJarFileUris() []string
- func (m *PySparkJob) GetLoggingConfig() *LoggingConfig
- func (m *PySparkJob) GetMainPythonFileUri() string
- func (m *PySparkJob) GetProperties() map[string]string
- func (m *PySparkJob) GetPythonFileUris() []string
- func (*PySparkJob) ProtoMessage()
- func (m *PySparkJob) Reset()
- func (m *PySparkJob) String() string
- type QueryList
- type SoftwareConfig
- type SparkJob
- func (*SparkJob) Descriptor() ([]byte, []int)
- func (m *SparkJob) GetArchiveUris() []string
- func (m *SparkJob) GetArgs() []string
- func (m *SparkJob) GetDriver() isSparkJob_Driver
- func (m *SparkJob) GetFileUris() []string
- func (m *SparkJob) GetJarFileUris() []string
- func (m *SparkJob) GetLoggingConfig() *LoggingConfig
- func (m *SparkJob) GetMainClass() string
- func (m *SparkJob) GetMainJarFileUri() string
- func (m *SparkJob) GetProperties() map[string]string
- func (*SparkJob) ProtoMessage()
- func (m *SparkJob) Reset()
- func (m *SparkJob) String() string
- func (*SparkJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type SparkJob_MainClass
- type SparkJob_MainJarFileUri
- type SparkSqlJob
- func (*SparkSqlJob) Descriptor() ([]byte, []int)
- func (m *SparkSqlJob) GetJarFileUris() []string
- func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig
- func (m *SparkSqlJob) GetProperties() map[string]string
- func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries
- func (m *SparkSqlJob) GetQueryFileUri() string
- func (m *SparkSqlJob) GetQueryList() *QueryList
- func (m *SparkSqlJob) GetScriptVariables() map[string]string
- func (*SparkSqlJob) ProtoMessage()
- func (m *SparkSqlJob) Reset()
- func (m *SparkSqlJob) String() string
- func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type SparkSqlJob_QueryFileUri
- type SparkSqlJob_QueryList
- type SubmitJobRequest
- func (*SubmitJobRequest) Descriptor() ([]byte, []int)
- func (m *SubmitJobRequest) GetJob() *Job
- func (m *SubmitJobRequest) GetProjectId() string
- func (m *SubmitJobRequest) GetRegion() string
- func (*SubmitJobRequest) ProtoMessage()
- func (m *SubmitJobRequest) Reset()
- func (m *SubmitJobRequest) String() string
- type UpdateClusterRequest
- func (*UpdateClusterRequest) Descriptor() ([]byte, []int)
- func (m *UpdateClusterRequest) GetCluster() *Cluster
- func (m *UpdateClusterRequest) GetClusterName() string
- func (m *UpdateClusterRequest) GetGracefulDecommissionTimeout() *google_protobuf3.Duration
- func (m *UpdateClusterRequest) GetProjectId() string
- func (m *UpdateClusterRequest) GetRegion() string
- func (m *UpdateClusterRequest) GetUpdateMask() *google_protobuf4.FieldMask
- func (*UpdateClusterRequest) ProtoMessage()
- func (m *UpdateClusterRequest) Reset()
- func (m *UpdateClusterRequest) String() string
- type UpdateJobRequest
- func (*UpdateJobRequest) Descriptor() ([]byte, []int)
- func (m *UpdateJobRequest) GetJob() *Job
- func (m *UpdateJobRequest) GetJobId() string
- func (m *UpdateJobRequest) GetProjectId() string
- func (m *UpdateJobRequest) GetRegion() string
- func (m *UpdateJobRequest) GetUpdateMask() *google_protobuf4.FieldMask
- func (*UpdateJobRequest) ProtoMessage()
- func (m *UpdateJobRequest) Reset()
- func (m *UpdateJobRequest) String() string
- type UpdateWorkflowTemplateRequest
- type WorkflowGraph
- type WorkflowMetadata
- func (*WorkflowMetadata) Descriptor() ([]byte, []int)
- func (m *WorkflowMetadata) GetClusterName() string
- func (m *WorkflowMetadata) GetCreateCluster() *ClusterOperation
- func (m *WorkflowMetadata) GetDeleteCluster() *ClusterOperation
- func (m *WorkflowMetadata) GetGraph() *WorkflowGraph
- func (m *WorkflowMetadata) GetState() WorkflowMetadata_State
- func (m *WorkflowMetadata) GetTemplate() string
- func (m *WorkflowMetadata) GetVersion() int32
- func (*WorkflowMetadata) ProtoMessage()
- func (m *WorkflowMetadata) Reset()
- func (m *WorkflowMetadata) String() string
- type WorkflowMetadata_State
- type WorkflowNode
- func (*WorkflowNode) Descriptor() ([]byte, []int)
- func (m *WorkflowNode) GetError() string
- func (m *WorkflowNode) GetJobId() string
- func (m *WorkflowNode) GetPrerequisiteStepIds() []string
- func (m *WorkflowNode) GetState() WorkflowNode_NodeState
- func (m *WorkflowNode) GetStepId() string
- func (*WorkflowNode) ProtoMessage()
- func (m *WorkflowNode) Reset()
- func (m *WorkflowNode) String() string
- type WorkflowNode_NodeState
- type WorkflowTemplate
- func (*WorkflowTemplate) Descriptor() ([]byte, []int)
- func (m *WorkflowTemplate) GetCreateTime() *google_protobuf5.Timestamp
- func (m *WorkflowTemplate) GetId() string
- func (m *WorkflowTemplate) GetJobs() []*OrderedJob
- func (m *WorkflowTemplate) GetLabels() map[string]string
- func (m *WorkflowTemplate) GetName() string
- func (m *WorkflowTemplate) GetPlacement() *WorkflowTemplatePlacement
- func (m *WorkflowTemplate) GetUpdateTime() *google_protobuf5.Timestamp
- func (m *WorkflowTemplate) GetVersion() int32
- func (*WorkflowTemplate) ProtoMessage()
- func (m *WorkflowTemplate) Reset()
- func (m *WorkflowTemplate) String() string
- type WorkflowTemplatePlacement
- func (*WorkflowTemplatePlacement) Descriptor() ([]byte, []int)
- func (m *WorkflowTemplatePlacement) GetClusterSelector() *ClusterSelector
- func (m *WorkflowTemplatePlacement) GetManagedCluster() *ManagedCluster
- func (m *WorkflowTemplatePlacement) GetPlacement() isWorkflowTemplatePlacement_Placement
- func (*WorkflowTemplatePlacement) ProtoMessage()
- func (m *WorkflowTemplatePlacement) Reset()
- func (m *WorkflowTemplatePlacement) String() string
- func (*WorkflowTemplatePlacement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type WorkflowTemplatePlacement_ClusterSelector
- type WorkflowTemplatePlacement_ManagedCluster
- type WorkflowTemplateServiceClient
- type WorkflowTemplateServiceServer
- type YarnApplication
- func (*YarnApplication) Descriptor() ([]byte, []int)
- func (m *YarnApplication) GetName() string
- func (m *YarnApplication) GetProgress() float32
- func (m *YarnApplication) GetState() YarnApplication_State
- func (m *YarnApplication) GetTrackingUrl() string
- func (*YarnApplication) ProtoMessage()
- func (m *YarnApplication) Reset()
- func (m *YarnApplication) String() string
- type YarnApplication_State
Constants ¶
This section is empty.
Variables ¶
var ClusterOperationStatus_State_name = map[int32]string{
0: "UNKNOWN",
1: "PENDING",
2: "RUNNING",
3: "DONE",
}
var ClusterOperationStatus_State_value = map[string]int32{
"UNKNOWN": 0,
"PENDING": 1,
"RUNNING": 2,
"DONE": 3,
}
var ClusterStatus_State_name = map[int32]string{
0: "UNKNOWN",
1: "CREATING",
2: "RUNNING",
3: "ERROR",
4: "DELETING",
5: "UPDATING",
}
var ClusterStatus_State_value = map[string]int32{
"UNKNOWN": 0,
"CREATING": 1,
"RUNNING": 2,
"ERROR": 3,
"DELETING": 4,
"UPDATING": 5,
}
var ClusterStatus_Substate_name = map[int32]string{
0: "UNSPECIFIED",
1: "UNHEALTHY",
2: "STALE_STATUS",
}
var ClusterStatus_Substate_value = map[string]int32{
"UNSPECIFIED": 0,
"UNHEALTHY": 1,
"STALE_STATUS": 2,
}
var JobStatus_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
1: "PENDING",
8: "SETUP_DONE",
2: "RUNNING",
3: "CANCEL_PENDING",
7: "CANCEL_STARTED",
4: "CANCELLED",
5: "DONE",
6: "ERROR",
9: "ATTEMPT_FAILURE",
}
var JobStatus_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"PENDING": 1,
"SETUP_DONE": 8,
"RUNNING": 2,
"CANCEL_PENDING": 3,
"CANCEL_STARTED": 7,
"CANCELLED": 4,
"DONE": 5,
"ERROR": 6,
"ATTEMPT_FAILURE": 9,
}
var JobStatus_Substate_name = map[int32]string{
0: "UNSPECIFIED",
1: "SUBMITTED",
2: "QUEUED",
3: "STALE_STATUS",
}
var JobStatus_Substate_value = map[string]int32{
"UNSPECIFIED": 0,
"SUBMITTED": 1,
"QUEUED": 2,
"STALE_STATUS": 3,
}
var ListJobsRequest_JobStateMatcher_name = map[int32]string{
0: "ALL",
1: "ACTIVE",
2: "NON_ACTIVE",
}
var ListJobsRequest_JobStateMatcher_value = map[string]int32{
"ALL": 0,
"ACTIVE": 1,
"NON_ACTIVE": 2,
}
var LoggingConfig_Level_name = map[int32]string{
0: "LEVEL_UNSPECIFIED",
1: "ALL",
2: "TRACE",
3: "DEBUG",
4: "INFO",
5: "WARN",
6: "ERROR",
7: "FATAL",
8: "OFF",
}
var LoggingConfig_Level_value = map[string]int32{
"LEVEL_UNSPECIFIED": 0,
"ALL": 1,
"TRACE": 2,
"DEBUG": 3,
"INFO": 4,
"WARN": 5,
"ERROR": 6,
"FATAL": 7,
"OFF": 8,
}
var WorkflowMetadata_State_name = map[int32]string{
0: "UNKNOWN",
1: "PENDING",
2: "RUNNING",
3: "DONE",
}
var WorkflowMetadata_State_value = map[string]int32{
"UNKNOWN": 0,
"PENDING": 1,
"RUNNING": 2,
"DONE": 3,
}
var WorkflowNode_NodeState_name = map[int32]string{
0: "NODE_STATUS_UNSPECIFIED",
1: "BLOCKED",
2: "RUNNABLE",
3: "RUNNING",
4: "COMPLETED",
5: "FAILED",
}
var WorkflowNode_NodeState_value = map[string]int32{
"NODE_STATUS_UNSPECIFIED": 0,
"BLOCKED": 1,
"RUNNABLE": 2,
"RUNNING": 3,
"COMPLETED": 4,
"FAILED": 5,
}
var YarnApplication_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
1: "NEW",
2: "NEW_SAVING",
3: "SUBMITTED",
4: "ACCEPTED",
5: "RUNNING",
6: "FINISHED",
7: "FAILED",
8: "KILLED",
}
var YarnApplication_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"NEW": 1,
"NEW_SAVING": 2,
"SUBMITTED": 3,
"ACCEPTED": 4,
"RUNNING": 5,
"FINISHED": 6,
"FAILED": 7,
"KILLED": 8,
}
Functions ¶
func RegisterClusterControllerServer ¶
func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)
func RegisterJobControllerServer ¶
func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)
func RegisterWorkflowTemplateServiceServer ¶
func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer)
Types ¶
type AcceleratorConfig ¶
type AcceleratorConfig struct { // Full URL, partial URI, or short name of the accelerator type resource to // expose to this instance. See [Google Compute Engine AcceleratorTypes]( // /compute/docs/reference/beta/acceleratorTypes) // // Examples // * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` // * `nvidia-tesla-k80` AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri" json:"accelerator_type_uri,omitempty"` // The number of the accelerator cards of this type exposed to this instance. AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount" json:"accelerator_count,omitempty"` }
Specifies the type and number of accelerator cards attached to the instances of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)).
func (*AcceleratorConfig) Descriptor ¶
func (*AcceleratorConfig) Descriptor() ([]byte, []int)
func (*AcceleratorConfig) GetAcceleratorCount ¶
func (m *AcceleratorConfig) GetAcceleratorCount() int32
func (*AcceleratorConfig) GetAcceleratorTypeUri ¶
func (m *AcceleratorConfig) GetAcceleratorTypeUri() string
func (*AcceleratorConfig) ProtoMessage ¶
func (*AcceleratorConfig) ProtoMessage()
func (*AcceleratorConfig) Reset ¶
func (m *AcceleratorConfig) Reset()
func (*AcceleratorConfig) String ¶
func (m *AcceleratorConfig) String() string
type CancelJobRequest ¶
type CancelJobRequest struct { // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
A request to cancel a job.
func (*CancelJobRequest) Descriptor ¶
func (*CancelJobRequest) Descriptor() ([]byte, []int)
func (*CancelJobRequest) GetJobId ¶
func (m *CancelJobRequest) GetJobId() string
func (*CancelJobRequest) GetProjectId ¶
func (m *CancelJobRequest) GetProjectId() string
func (*CancelJobRequest) GetRegion ¶
func (m *CancelJobRequest) GetRegion() string
func (*CancelJobRequest) ProtoMessage ¶
func (*CancelJobRequest) ProtoMessage()
func (*CancelJobRequest) Reset ¶
func (m *CancelJobRequest) Reset()
func (*CancelJobRequest) String ¶
func (m *CancelJobRequest) String() string
type Cluster ¶
type Cluster struct { // Required. The Google Cloud Platform project ID that the cluster belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The cluster name. Cluster names within a project must be // unique. Names of deleted clusters can be reused. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // Required. The cluster config. Note that Cloud Dataproc may set // default values, and values may change when clusters are updated. Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` // Optional. The labels to associate with this cluster. // Label **keys** must contain 1 to 63 characters, and must conform to // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). // Label **values** may be empty, but, if present, must contain 1 to 63 // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). // No more than 32 labels can be associated with a cluster. Labels map[string]string `` /* 132-byte string literal not displayed */ // Output-only. Cluster status. Status *ClusterStatus `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` // Output-only. The previous cluster status. StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` // Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc // generates this value when it creates the cluster. ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` // Contains cluster daemon metrics such as HDFS and YARN stats. // // **Beta Feature**: This report is available for testing purposes only. It may // be changed before final release. Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics" json:"metrics,omitempty"` }
Describes the identifying information, config, and status of a cluster of Google Compute Engine instances.
func (*Cluster) Descriptor ¶
func (*Cluster) GetClusterName ¶
func (*Cluster) GetClusterUuid ¶
func (*Cluster) GetConfig ¶
func (m *Cluster) GetConfig() *ClusterConfig
func (*Cluster) GetMetrics ¶
func (m *Cluster) GetMetrics() *ClusterMetrics
func (*Cluster) GetProjectId ¶
func (*Cluster) GetStatus ¶
func (m *Cluster) GetStatus() *ClusterStatus
func (*Cluster) GetStatusHistory ¶
func (m *Cluster) GetStatusHistory() []*ClusterStatus
func (*Cluster) ProtoMessage ¶
func (*Cluster) ProtoMessage()
type ClusterConfig ¶
type ClusterConfig struct { // Optional. A Google Cloud Storage staging bucket used for sharing generated // SSH keys and config. If you do not specify a staging bucket, Cloud // Dataproc will determine an appropriate Cloud Storage location (US, // ASIA, or EU) for your cluster's staging bucket according to the Google // Compute Engine zone where your cluster is deployed, and then it will create // and manage this project-level, per-location bucket for you. ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket" json:"config_bucket,omitempty"` // Required. The shared Google Compute Engine config settings for // all instances in a cluster. GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig" json:"gce_cluster_config,omitempty"` // Optional. The Google Compute Engine config settings for // the master instance in a cluster. MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig" json:"master_config,omitempty"` // Optional. The Google Compute Engine config settings for // worker instances in a cluster. WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig" json:"worker_config,omitempty"` // Optional. The Google Compute Engine config settings for // additional worker instances in a cluster. SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig" json:"secondary_worker_config,omitempty"` // Optional. The config settings for software inside the cluster. SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig" json:"software_config,omitempty"` // Optional. The config setting for auto delete cluster schedule. LifecycleConfig *LifecycleConfig `protobuf:"bytes,14,opt,name=lifecycle_config,json=lifecycleConfig" json:"lifecycle_config,omitempty"` // Optional. Commands to execute on each node after config is // completed. By default, executables are run on master and all worker nodes. // You can test a node's <code>role</code> metadata to run an executable on // a master or worker node, as shown below using `curl` (you can also use `wget`): // // ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) // if [[ "${ROLE}" == 'Master' ]]; then // ... master specific actions ... // else // ... worker specific actions ... // fi InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions" json:"initialization_actions,omitempty"` }
The cluster config.
func (*ClusterConfig) Descriptor ¶
func (*ClusterConfig) Descriptor() ([]byte, []int)
func (*ClusterConfig) GetConfigBucket ¶
func (m *ClusterConfig) GetConfigBucket() string
func (*ClusterConfig) GetGceClusterConfig ¶
func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig
func (*ClusterConfig) GetInitializationActions ¶
func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction
func (*ClusterConfig) GetLifecycleConfig ¶
func (m *ClusterConfig) GetLifecycleConfig() *LifecycleConfig
func (*ClusterConfig) GetMasterConfig ¶
func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig
func (*ClusterConfig) GetSecondaryWorkerConfig ¶
func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig
func (*ClusterConfig) GetSoftwareConfig ¶
func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig
func (*ClusterConfig) GetWorkerConfig ¶
func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig
func (*ClusterConfig) ProtoMessage ¶
func (*ClusterConfig) ProtoMessage()
func (*ClusterConfig) Reset ¶
func (m *ClusterConfig) Reset()
func (*ClusterConfig) String ¶
func (m *ClusterConfig) String() string
type ClusterControllerClient ¶
type ClusterControllerClient interface { // Creates a cluster in a project. CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Updates a cluster in a project. UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Deletes a cluster in a project. DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Gets the resource representation for a cluster in a project. GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) // Lists all regions/{region}/clusters in a project. ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) // Gets cluster diagnostic information. // After the operation completes, the Operation.response field // contains `DiagnoseClusterOutputLocation`. DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) }
func NewClusterControllerClient ¶
func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient
type ClusterControllerServer ¶
type ClusterControllerServer interface { // Creates a cluster in a project. CreateCluster(context.Context, *CreateClusterRequest) (*google_longrunning.Operation, error) // Updates a cluster in a project. UpdateCluster(context.Context, *UpdateClusterRequest) (*google_longrunning.Operation, error) // Deletes a cluster in a project. DeleteCluster(context.Context, *DeleteClusterRequest) (*google_longrunning.Operation, error) // Gets the resource representation for a cluster in a project. GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) // Lists all regions/{region}/clusters in a project. ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) // Gets cluster diagnostic information. // After the operation completes, the Operation.response field // contains `DiagnoseClusterOutputLocation`. DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*google_longrunning.Operation, error) }
type ClusterMetrics ¶
type ClusterMetrics struct { // The HDFS metrics. HdfsMetrics map[string]int64 `` /* 162-byte string literal not displayed */ // The YARN metrics. YarnMetrics map[string]int64 `` /* 162-byte string literal not displayed */ }
Contains cluster daemon metrics, such as HDFS and YARN stats.
**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
func (*ClusterMetrics) Descriptor ¶
func (*ClusterMetrics) Descriptor() ([]byte, []int)
func (*ClusterMetrics) GetHdfsMetrics ¶
func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64
func (*ClusterMetrics) GetYarnMetrics ¶
func (m *ClusterMetrics) GetYarnMetrics() map[string]int64
func (*ClusterMetrics) ProtoMessage ¶
func (*ClusterMetrics) ProtoMessage()
func (*ClusterMetrics) Reset ¶
func (m *ClusterMetrics) Reset()
func (*ClusterMetrics) String ¶
func (m *ClusterMetrics) String() string
type ClusterOperation ¶
type ClusterOperation struct { // Output only. The id of the cluster operation. OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` // Output only. Error, if operation failed. Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` // Output only. Indicates the operation is done. Done bool `protobuf:"varint,3,opt,name=done" json:"done,omitempty"` }
func (*ClusterOperation) Descriptor ¶
func (*ClusterOperation) Descriptor() ([]byte, []int)
func (*ClusterOperation) GetDone ¶
func (m *ClusterOperation) GetDone() bool
func (*ClusterOperation) GetError ¶
func (m *ClusterOperation) GetError() string
func (*ClusterOperation) GetOperationId ¶
func (m *ClusterOperation) GetOperationId() string
func (*ClusterOperation) ProtoMessage ¶
func (*ClusterOperation) ProtoMessage()
func (*ClusterOperation) Reset ¶
func (m *ClusterOperation) Reset()
func (*ClusterOperation) String ¶
func (m *ClusterOperation) String() string
type ClusterOperationMetadata ¶
type ClusterOperationMetadata struct { // Output-only. Name of the cluster for the operation. ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // Output-only. Cluster UUID for the operation. ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` // Output-only. Current operation status. Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status" json:"status,omitempty"` // Output-only. The previous operation status. StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` // Output-only. The operation type. OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType" json:"operation_type,omitempty"` // Output-only. Short description of operation. Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"` // Output-only. Labels associated with the operation Labels map[string]string `` /* 133-byte string literal not displayed */ // Output-only. Errors encountered during operation execution. Warnings []string `protobuf:"bytes,14,rep,name=warnings" json:"warnings,omitempty"` }
Metadata describing the operation.
func (*ClusterOperationMetadata) Descriptor ¶
func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)
func (*ClusterOperationMetadata) GetClusterName ¶
func (m *ClusterOperationMetadata) GetClusterName() string
func (*ClusterOperationMetadata) GetClusterUuid ¶
func (m *ClusterOperationMetadata) GetClusterUuid() string
func (*ClusterOperationMetadata) GetDescription ¶
func (m *ClusterOperationMetadata) GetDescription() string
func (*ClusterOperationMetadata) GetLabels ¶
func (m *ClusterOperationMetadata) GetLabels() map[string]string
func (*ClusterOperationMetadata) GetOperationType ¶
func (m *ClusterOperationMetadata) GetOperationType() string
func (*ClusterOperationMetadata) GetStatus ¶
func (m *ClusterOperationMetadata) GetStatus() *ClusterOperationStatus
func (*ClusterOperationMetadata) GetStatusHistory ¶
func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus
func (*ClusterOperationMetadata) GetWarnings ¶
func (m *ClusterOperationMetadata) GetWarnings() []string
func (*ClusterOperationMetadata) ProtoMessage ¶
func (*ClusterOperationMetadata) ProtoMessage()
func (*ClusterOperationMetadata) Reset ¶
func (m *ClusterOperationMetadata) Reset()
func (*ClusterOperationMetadata) String ¶
func (m *ClusterOperationMetadata) String() string
type ClusterOperationStatus ¶
type ClusterOperationStatus struct { // Output-only. A message containing the operation state. State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1beta2.ClusterOperationStatus_State" json:"state,omitempty"` // Output-only. A message containing the detailed operation state. InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState" json:"inner_state,omitempty"` // Output-only.A message containing any operation metadata details. Details string `protobuf:"bytes,3,opt,name=details" json:"details,omitempty"` // Output-only. The time this state was entered. StateStartTime *google_protobuf5.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` }
The status of the operation.
func (*ClusterOperationStatus) Descriptor ¶
func (*ClusterOperationStatus) Descriptor() ([]byte, []int)
func (*ClusterOperationStatus) GetDetails ¶
func (m *ClusterOperationStatus) GetDetails() string
func (*ClusterOperationStatus) GetInnerState ¶
func (m *ClusterOperationStatus) GetInnerState() string
func (*ClusterOperationStatus) GetState ¶
func (m *ClusterOperationStatus) GetState() ClusterOperationStatus_State
func (*ClusterOperationStatus) GetStateStartTime ¶
func (m *ClusterOperationStatus) GetStateStartTime() *google_protobuf5.Timestamp
func (*ClusterOperationStatus) ProtoMessage ¶
func (*ClusterOperationStatus) ProtoMessage()
func (*ClusterOperationStatus) Reset ¶
func (m *ClusterOperationStatus) Reset()
func (*ClusterOperationStatus) String ¶
func (m *ClusterOperationStatus) String() string
type ClusterOperationStatus_State ¶
type ClusterOperationStatus_State int32
The operation state.
const ( // Unused. ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0 // The operation has been created. ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1 // The operation is running. ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2 // The operation is done; either cancelled or completed. ClusterOperationStatus_DONE ClusterOperationStatus_State = 3 )
func (ClusterOperationStatus_State) EnumDescriptor ¶
func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)
func (ClusterOperationStatus_State) String ¶
func (x ClusterOperationStatus_State) String() string
type ClusterSelector ¶
type ClusterSelector struct { // Optional. The zone where workflow process executes. This parameter does not // affect the selection of the cluster. // // If unspecified, the zone of the first cluster matching the selector // is used. Zone string `protobuf:"bytes,1,opt,name=zone" json:"zone,omitempty"` // Required. The cluster labels. Cluster must have all labels // to match. ClusterLabels map[string]string `` /* 167-byte string literal not displayed */ }
A selector that chooses target cluster for jobs based on metadata.
func (*ClusterSelector) Descriptor ¶
func (*ClusterSelector) Descriptor() ([]byte, []int)
func (*ClusterSelector) GetClusterLabels ¶
func (m *ClusterSelector) GetClusterLabels() map[string]string
func (*ClusterSelector) GetZone ¶
func (m *ClusterSelector) GetZone() string
func (*ClusterSelector) ProtoMessage ¶
func (*ClusterSelector) ProtoMessage()
func (*ClusterSelector) Reset ¶
func (m *ClusterSelector) Reset()
func (*ClusterSelector) String ¶
func (m *ClusterSelector) String() string
type ClusterStatus ¶
type ClusterStatus struct { // Output-only. The cluster's state. State ClusterStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1beta2.ClusterStatus_State" json:"state,omitempty"` // Output-only. Optional details of cluster's state. Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` // Output-only. Time when this state was entered. StateStartTime *google_protobuf5.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` // Output-only. Additional state information that includes // status reported by the agent. Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,enum=google.cloud.dataproc.v1beta2.ClusterStatus_Substate" json:"substate,omitempty"` }
The status of a cluster and its instances.
func (*ClusterStatus) Descriptor ¶
func (*ClusterStatus) Descriptor() ([]byte, []int)
func (*ClusterStatus) GetDetail ¶
func (m *ClusterStatus) GetDetail() string
func (*ClusterStatus) GetState ¶
func (m *ClusterStatus) GetState() ClusterStatus_State
func (*ClusterStatus) GetStateStartTime ¶
func (m *ClusterStatus) GetStateStartTime() *google_protobuf5.Timestamp
func (*ClusterStatus) GetSubstate ¶
func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate
func (*ClusterStatus) ProtoMessage ¶
func (*ClusterStatus) ProtoMessage()
func (*ClusterStatus) Reset ¶
func (m *ClusterStatus) Reset()
func (*ClusterStatus) String ¶
func (m *ClusterStatus) String() string
type ClusterStatus_State ¶
type ClusterStatus_State int32
The cluster state.
const ( // The cluster state is unknown. ClusterStatus_UNKNOWN ClusterStatus_State = 0 // The cluster is being created and set up. It is not ready for use. ClusterStatus_CREATING ClusterStatus_State = 1 // The cluster is currently running and healthy. It is ready for use. ClusterStatus_RUNNING ClusterStatus_State = 2 // The cluster encountered an error. It is not ready for use. ClusterStatus_ERROR ClusterStatus_State = 3 // The cluster is being deleted. It cannot be used. ClusterStatus_DELETING ClusterStatus_State = 4 // The cluster is being updated. It continues to accept and process jobs. ClusterStatus_UPDATING ClusterStatus_State = 5 )
func (ClusterStatus_State) EnumDescriptor ¶
func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)
func (ClusterStatus_State) String ¶
func (x ClusterStatus_State) String() string
type ClusterStatus_Substate ¶
type ClusterStatus_Substate int32
const ( ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0 // The cluster is known to be in an unhealthy state // (for example, critical daemons are not running or HDFS capacity is // exhausted). // // Applies to RUNNING state. ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1 // The agent-reported status is out of date (may occur if // Cloud Dataproc loses communication with Agent). // // Applies to RUNNING state. ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2 )
func (ClusterStatus_Substate) EnumDescriptor ¶
func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int)
func (ClusterStatus_Substate) String ¶
func (x ClusterStatus_Substate) String() string
type CreateClusterRequest ¶
type CreateClusterRequest struct { // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The cluster to create. Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` }
A request to create a cluster.
func (*CreateClusterRequest) Descriptor ¶
func (*CreateClusterRequest) Descriptor() ([]byte, []int)
func (*CreateClusterRequest) GetCluster ¶
func (m *CreateClusterRequest) GetCluster() *Cluster
func (*CreateClusterRequest) GetProjectId ¶
func (m *CreateClusterRequest) GetProjectId() string
func (*CreateClusterRequest) GetRegion ¶
func (m *CreateClusterRequest) GetRegion() string
func (*CreateClusterRequest) ProtoMessage ¶
func (*CreateClusterRequest) ProtoMessage()
func (*CreateClusterRequest) Reset ¶
func (m *CreateClusterRequest) Reset()
func (*CreateClusterRequest) String ¶
func (m *CreateClusterRequest) String() string
type CreateWorkflowTemplateRequest ¶
type CreateWorkflowTemplateRequest struct { // Required. The "resource name" of the region, as described // in https://cloud.google.com/apis/design/resource_names of the form // `projects/{project_id}/regions/{region}` Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // Required. The Dataproc workflow template to create. Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template" json:"template,omitempty"` }
A request to create a workflow template.
func (*CreateWorkflowTemplateRequest) Descriptor ¶
func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
func (*CreateWorkflowTemplateRequest) GetParent ¶
func (m *CreateWorkflowTemplateRequest) GetParent() string
func (*CreateWorkflowTemplateRequest) GetTemplate ¶
func (m *CreateWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate
func (*CreateWorkflowTemplateRequest) ProtoMessage ¶
func (*CreateWorkflowTemplateRequest) ProtoMessage()
func (*CreateWorkflowTemplateRequest) Reset ¶
func (m *CreateWorkflowTemplateRequest) Reset()
func (*CreateWorkflowTemplateRequest) String ¶
func (m *CreateWorkflowTemplateRequest) String() string
type DeleteClusterRequest ¶
type DeleteClusterRequest struct { // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // Optional. Specifying the `cluster_uuid` means the RPC should fail // (with error NOT_FOUND) if cluster with specified UUID does not exist. ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` }
A request to delete a cluster.
func (*DeleteClusterRequest) Descriptor ¶
func (*DeleteClusterRequest) Descriptor() ([]byte, []int)
func (*DeleteClusterRequest) GetClusterName ¶
func (m *DeleteClusterRequest) GetClusterName() string
func (*DeleteClusterRequest) GetClusterUuid ¶
func (m *DeleteClusterRequest) GetClusterUuid() string
func (*DeleteClusterRequest) GetProjectId ¶
func (m *DeleteClusterRequest) GetProjectId() string
func (*DeleteClusterRequest) GetRegion ¶
func (m *DeleteClusterRequest) GetRegion() string
func (*DeleteClusterRequest) ProtoMessage ¶
func (*DeleteClusterRequest) ProtoMessage()
func (*DeleteClusterRequest) Reset ¶
func (m *DeleteClusterRequest) Reset()
func (*DeleteClusterRequest) String ¶
func (m *DeleteClusterRequest) String() string
type DeleteJobRequest ¶
type DeleteJobRequest struct { // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
A request to delete a job.
func (*DeleteJobRequest) Descriptor ¶
func (*DeleteJobRequest) Descriptor() ([]byte, []int)
func (*DeleteJobRequest) GetJobId ¶
func (m *DeleteJobRequest) GetJobId() string
func (*DeleteJobRequest) GetProjectId ¶
func (m *DeleteJobRequest) GetProjectId() string
func (*DeleteJobRequest) GetRegion ¶
func (m *DeleteJobRequest) GetRegion() string
func (*DeleteJobRequest) ProtoMessage ¶
func (*DeleteJobRequest) ProtoMessage()
func (*DeleteJobRequest) Reset ¶
func (m *DeleteJobRequest) Reset()
func (*DeleteJobRequest) String ¶
func (m *DeleteJobRequest) String() string
type DeleteWorkflowTemplateRequest ¶
type DeleteWorkflowTemplateRequest struct { // Required. The "resource name" of the workflow template, as described // in https://cloud.google.com/apis/design/resource_names of the form // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Optional. The version of workflow template to delete. If specified, // will only delete the template if the current server version matches // specified version. Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` }
A request to delete a workflow template.
Currently started workflows will remain running.
func (*DeleteWorkflowTemplateRequest) Descriptor ¶
func (*DeleteWorkflowTemplateRequest) Descriptor() ([]byte, []int)
func (*DeleteWorkflowTemplateRequest) GetName ¶
func (m *DeleteWorkflowTemplateRequest) GetName() string
func (*DeleteWorkflowTemplateRequest) GetVersion ¶
func (m *DeleteWorkflowTemplateRequest) GetVersion() int32
func (*DeleteWorkflowTemplateRequest) ProtoMessage ¶
func (*DeleteWorkflowTemplateRequest) ProtoMessage()
func (*DeleteWorkflowTemplateRequest) Reset ¶
func (m *DeleteWorkflowTemplateRequest) Reset()
func (*DeleteWorkflowTemplateRequest) String ¶
func (m *DeleteWorkflowTemplateRequest) String() string
type DiagnoseClusterRequest ¶
type DiagnoseClusterRequest struct { // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` }
A request to collect cluster diagnostic information.
func (*DiagnoseClusterRequest) Descriptor ¶
func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)
func (*DiagnoseClusterRequest) GetClusterName ¶
func (m *DiagnoseClusterRequest) GetClusterName() string
func (*DiagnoseClusterRequest) GetProjectId ¶
func (m *DiagnoseClusterRequest) GetProjectId() string
func (*DiagnoseClusterRequest) GetRegion ¶
func (m *DiagnoseClusterRequest) GetRegion() string
func (*DiagnoseClusterRequest) ProtoMessage ¶
func (*DiagnoseClusterRequest) ProtoMessage()
func (*DiagnoseClusterRequest) Reset ¶
func (m *DiagnoseClusterRequest) Reset()
func (*DiagnoseClusterRequest) String ¶
func (m *DiagnoseClusterRequest) String() string
type DiagnoseClusterResults ¶
type DiagnoseClusterResults struct { // Output-only. The Google Cloud Storage URI of the diagnostic output. // The output report is a plain text file with a summary of collected // diagnostics. OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` }
The location of diagnostic output.
func (*DiagnoseClusterResults) Descriptor ¶
func (*DiagnoseClusterResults) Descriptor() ([]byte, []int)
func (*DiagnoseClusterResults) GetOutputUri ¶
func (m *DiagnoseClusterResults) GetOutputUri() string
func (*DiagnoseClusterResults) ProtoMessage ¶
func (*DiagnoseClusterResults) ProtoMessage()
func (*DiagnoseClusterResults) Reset ¶
func (m *DiagnoseClusterResults) Reset()
func (*DiagnoseClusterResults) String ¶
func (m *DiagnoseClusterResults) String() string
type DiskConfig ¶
type DiskConfig struct { // Optional. Size in GB of the boot disk (default is 500GB). BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb" json:"boot_disk_size_gb,omitempty"` // Optional. Number of attached SSDs, from 0 to 4 (default is 0). // If SSDs are not attached, the boot disk is used to store runtime logs and // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. // If one or more SSDs are attached, this runtime bulk // data is spread across them, and the boot disk contains only basic // config and installed binaries. NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds" json:"num_local_ssds,omitempty"` }
Specifies the config of disk options for a group of VM instances.
func (*DiskConfig) Descriptor ¶
func (*DiskConfig) Descriptor() ([]byte, []int)
func (*DiskConfig) GetBootDiskSizeGb ¶
func (m *DiskConfig) GetBootDiskSizeGb() int32
func (*DiskConfig) GetNumLocalSsds ¶
func (m *DiskConfig) GetNumLocalSsds() int32
func (*DiskConfig) ProtoMessage ¶
func (*DiskConfig) ProtoMessage()
func (*DiskConfig) Reset ¶
func (m *DiskConfig) Reset()
func (*DiskConfig) String ¶
func (m *DiskConfig) String() string
type GceClusterConfig ¶
type GceClusterConfig struct { // Optional. The zone where the Google Compute Engine cluster will be located. // On a create request, it is required in the "global" region. If omitted // in a non-global Cloud Dataproc region, the service will pick a zone in the // corresponding Compute Engine region. On a get request, zone will always be // present. // // A full URL, partial URI, or short name are valid. Examples: // // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` // * `projects/[project_id]/zones/[zone]` // * `us-central1-f` ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri" json:"zone_uri,omitempty"` // Optional. The Google Compute Engine network to be used for machine // communications. Cannot be specified with subnetwork_uri. If neither // `network_uri` nor `subnetwork_uri` is specified, the "default" network of // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see // [Using Subnetworks](/compute/docs/subnetworks) for more information). // // A full URL, partial URI, or short name are valid. Examples: // // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` // * `projects/[project_id]/regions/global/default` // * `default` NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri" json:"network_uri,omitempty"` // Optional. The Google Compute Engine subnetwork to be used for machine // communications. Cannot be specified with network_uri. // // A full URL, partial URI, or short name are valid. Examples: // // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0` // * `projects/[project_id]/regions/us-east1/sub0` // * `sub0` SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri" json:"subnetwork_uri,omitempty"` // Optional. If true, all instances in the cluster will only have internal IP // addresses. By default, clusters are not restricted to internal IP addresses, // and will have ephemeral external IP addresses assigned to each instance. // This `internal_ip_only` restriction can only be enabled for subnetwork // enabled networks, and all off-cluster dependencies must be configured to be // accessible without external IP addresses. InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly" json:"internal_ip_only,omitempty"` // Optional. The service account of the instances. Defaults to the default // Google Compute Engine service account. Custom service accounts need // permissions equivalent to the folloing IAM roles: // // * roles/logging.logWriter // * roles/storage.objectAdmin // // (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts // for more information). // Example: `[account_id]@[project_id].iam.gserviceaccount.com` ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` // Optional. The URIs of service account scopes to be included in Google // Compute Engine instances. The following base set of scopes is always // included: // // * https://www.googleapis.com/auth/cloud.useraccounts.readonly // * https://www.googleapis.com/auth/devstorage.read_write // * https://www.googleapis.com/auth/logging.write // // If no scopes are specified, the following defaults are also provided: // // * https://www.googleapis.com/auth/bigquery // * https://www.googleapis.com/auth/bigtable.admin.table // * https://www.googleapis.com/auth/bigtable.data // * https://www.googleapis.com/auth/devstorage.full_control ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes" json:"service_account_scopes,omitempty"` // The Google Compute Engine tags to add to all instances (see // [Tagging instances](/compute/docs/label-or-tag-resources#tags)). Tags []string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` // The Google Compute Engine metadata entries to add to all instances (see // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). Metadata map[string]string `` /* 136-byte string literal not displayed */ }
Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.
func (*GceClusterConfig) Descriptor ¶
func (*GceClusterConfig) Descriptor() ([]byte, []int)
func (*GceClusterConfig) GetInternalIpOnly ¶
func (m *GceClusterConfig) GetInternalIpOnly() bool
func (*GceClusterConfig) GetMetadata ¶
func (m *GceClusterConfig) GetMetadata() map[string]string
func (*GceClusterConfig) GetNetworkUri ¶
func (m *GceClusterConfig) GetNetworkUri() string
func (*GceClusterConfig) GetServiceAccount ¶
func (m *GceClusterConfig) GetServiceAccount() string
func (*GceClusterConfig) GetServiceAccountScopes ¶
func (m *GceClusterConfig) GetServiceAccountScopes() []string
func (*GceClusterConfig) GetSubnetworkUri ¶
func (m *GceClusterConfig) GetSubnetworkUri() string
func (*GceClusterConfig) GetTags ¶
func (m *GceClusterConfig) GetTags() []string
func (*GceClusterConfig) GetZoneUri ¶
func (m *GceClusterConfig) GetZoneUri() string
func (*GceClusterConfig) ProtoMessage ¶
func (*GceClusterConfig) ProtoMessage()
func (*GceClusterConfig) Reset ¶
func (m *GceClusterConfig) Reset()
func (*GceClusterConfig) String ¶
func (m *GceClusterConfig) String() string
type GetClusterRequest ¶
type GetClusterRequest struct { // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` }
Request to get the resource representation for a cluster in a project.
func (*GetClusterRequest) Descriptor ¶
func (*GetClusterRequest) Descriptor() ([]byte, []int)
func (*GetClusterRequest) GetClusterName ¶
func (m *GetClusterRequest) GetClusterName() string
func (*GetClusterRequest) GetProjectId ¶
func (m *GetClusterRequest) GetProjectId() string
func (*GetClusterRequest) GetRegion ¶
func (m *GetClusterRequest) GetRegion() string
func (*GetClusterRequest) ProtoMessage ¶
func (*GetClusterRequest) ProtoMessage()
func (*GetClusterRequest) Reset ¶
func (m *GetClusterRequest) Reset()
func (*GetClusterRequest) String ¶
func (m *GetClusterRequest) String() string
type GetJobRequest ¶
type GetJobRequest struct { // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
A request to get the resource representation for a job in a project.
func (*GetJobRequest) Descriptor ¶
func (*GetJobRequest) Descriptor() ([]byte, []int)
func (*GetJobRequest) GetJobId ¶
func (m *GetJobRequest) GetJobId() string
func (*GetJobRequest) GetProjectId ¶
func (m *GetJobRequest) GetProjectId() string
func (*GetJobRequest) GetRegion ¶
func (m *GetJobRequest) GetRegion() string
func (*GetJobRequest) ProtoMessage ¶
func (*GetJobRequest) ProtoMessage()
func (*GetJobRequest) Reset ¶
func (m *GetJobRequest) Reset()
func (*GetJobRequest) String ¶
func (m *GetJobRequest) String() string
type GetWorkflowTemplateRequest ¶
type GetWorkflowTemplateRequest struct { // Required. The "resource name" of the workflow template, as described // in https://cloud.google.com/apis/design/resource_names of the form // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Optional. The version of workflow template to retrieve. Only previously // instatiated versions can be retrieved. // // If unspecified, retrieves the current version. Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` }
A request to fetch a workflow template.
func (*GetWorkflowTemplateRequest) Descriptor ¶
func (*GetWorkflowTemplateRequest) Descriptor() ([]byte, []int)
func (*GetWorkflowTemplateRequest) GetName ¶
func (m *GetWorkflowTemplateRequest) GetName() string
func (*GetWorkflowTemplateRequest) GetVersion ¶
func (m *GetWorkflowTemplateRequest) GetVersion() int32
func (*GetWorkflowTemplateRequest) ProtoMessage ¶
func (*GetWorkflowTemplateRequest) ProtoMessage()
func (*GetWorkflowTemplateRequest) Reset ¶
func (m *GetWorkflowTemplateRequest) Reset()
func (*GetWorkflowTemplateRequest) String ¶
func (m *GetWorkflowTemplateRequest) String() string
type HadoopJob ¶
type HadoopJob struct { // Required. Indicates the location of the driver's main class. Specify // either the jar file that contains the main class or the main class name. // To specify both, add the jar file to `jar_file_uris`, and then specify // the main class name in this property. // // Types that are valid to be assigned to Driver: // *HadoopJob_MainJarFileUri // *HadoopJob_MainClass Driver isHadoopJob_Driver `protobuf_oneof:"driver"` // Optional. The arguments to pass to the driver. Do not // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job // properties, since a collision may occur that causes an incorrect job // submission. Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` // Optional. Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied // to the working directory of Hadoop drivers and distributed tasks. Useful // for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` // Optional. HCFS URIs of archives to be extracted in the working directory of // Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` // Optional. A mapping of property names to values, used to configure Hadoop. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site and // classes in user code. Properties map[string]string `` /* 140-byte string literal not displayed */ // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
func (*HadoopJob) Descriptor ¶
func (*HadoopJob) GetArchiveUris ¶
func (*HadoopJob) GetFileUris ¶
func (*HadoopJob) GetJarFileUris ¶
func (*HadoopJob) GetLoggingConfig ¶
func (m *HadoopJob) GetLoggingConfig() *LoggingConfig
func (*HadoopJob) GetMainClass ¶
func (*HadoopJob) GetMainJarFileUri ¶
func (*HadoopJob) GetProperties ¶
func (*HadoopJob) ProtoMessage ¶
func (*HadoopJob) ProtoMessage()
type HadoopJob_MainClass ¶
type HadoopJob_MainClass struct {
MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"`
}
type HadoopJob_MainJarFileUri ¶
type HadoopJob_MainJarFileUri struct {
MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"`
}
type HiveJob ¶
type HiveJob struct { // Required. The sequence of Hive queries to execute, specified as either // an HCFS file URI or a list of queries. // // Types that are valid to be assigned to Queries: // *HiveJob_QueryFileUri // *HiveJob_QueryList Queries isHiveJob_Queries `protobuf_oneof:"queries"` // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when executing // independent parallel queries. ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` // Optional. Mapping of query variable names to values (equivalent to the // Hive command: `SET name="value";`). ScriptVariables map[string]string `` /* 173-byte string literal not displayed */ // Optional. A mapping of property names and values, used to configure Hive. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/hive/conf/hive-site.xml, and classes in user code. Properties map[string]string `` /* 140-byte string literal not displayed */ // Optional. HCFS URIs of jar files to add to the CLASSPATH of the // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes // and UDFs. JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` }
A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN.
func (*HiveJob) Descriptor ¶
func (*HiveJob) GetContinueOnFailure ¶
func (*HiveJob) GetJarFileUris ¶
func (*HiveJob) GetProperties ¶
func (*HiveJob) GetQueries ¶
func (m *HiveJob) GetQueries() isHiveJob_Queries
func (*HiveJob) GetQueryFileUri ¶
func (*HiveJob) GetQueryList ¶
func (*HiveJob) GetScriptVariables ¶
func (*HiveJob) ProtoMessage ¶
func (*HiveJob) ProtoMessage()
type HiveJob_QueryFileUri ¶
type HiveJob_QueryFileUri struct {
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"`
}
type HiveJob_QueryList ¶
type HiveJob_QueryList struct {
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"`
}
type InstanceGroupConfig ¶
type InstanceGroupConfig struct { // Optional. The number of VM instances in the instance group. // For master instance groups, must be set to 1. NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances" json:"num_instances,omitempty"` // Optional. The list of instance names. Cloud Dataproc derives the names from // `cluster_name`, `num_instances`, and the instance group if not set by user // (recommended practice is to let Cloud Dataproc derive the name). InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames" json:"instance_names,omitempty"` // Output-only. The Google Compute Engine image resource used for cluster // instances. Inferred from `SoftwareConfig.image_version`. ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` // Optional. The Google Compute Engine machine type used for cluster instances. // // A full URL, partial URI, or short name are valid. Examples: // // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` // * `n1-standard-2` MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri" json:"machine_type_uri,omitempty"` // Optional. Disk option config settings. DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig" json:"disk_config,omitempty"` // Optional. Specifies that this instance group contains preemptible instances. IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible" json:"is_preemptible,omitempty"` // Output-only. The config for Google Compute Engine Instance Group // Manager that manages this group. // This is only used for preemptible instance groups. ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig" json:"managed_group_config,omitempty"` // Optional. The Google Compute Engine accelerator configuration for these // instances. // // **Beta Feature**: This feature is still under development. It may be // changed before final release. Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators" json:"accelerators,omitempty"` }
Optional. The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.
func (*InstanceGroupConfig) Descriptor ¶
func (*InstanceGroupConfig) Descriptor() ([]byte, []int)
func (*InstanceGroupConfig) GetAccelerators ¶
func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig
func (*InstanceGroupConfig) GetDiskConfig ¶
func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig
func (*InstanceGroupConfig) GetImageUri ¶
func (m *InstanceGroupConfig) GetImageUri() string
func (*InstanceGroupConfig) GetInstanceNames ¶
func (m *InstanceGroupConfig) GetInstanceNames() []string
func (*InstanceGroupConfig) GetIsPreemptible ¶
func (m *InstanceGroupConfig) GetIsPreemptible() bool
func (*InstanceGroupConfig) GetMachineTypeUri ¶
func (m *InstanceGroupConfig) GetMachineTypeUri() string
func (*InstanceGroupConfig) GetManagedGroupConfig ¶
func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig
func (*InstanceGroupConfig) GetNumInstances ¶
func (m *InstanceGroupConfig) GetNumInstances() int32
func (*InstanceGroupConfig) ProtoMessage ¶
func (*InstanceGroupConfig) ProtoMessage()
func (*InstanceGroupConfig) Reset ¶
func (m *InstanceGroupConfig) Reset()
func (*InstanceGroupConfig) String ¶
func (m *InstanceGroupConfig) String() string
type InstantiateWorkflowTemplateRequest ¶
type InstantiateWorkflowTemplateRequest struct { // Required. The "resource name" of the workflow template, as described // in https://cloud.google.com/apis/design/resource_names of the form // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Optional. The version of workflow template to instantiate. If specified, // the workflow will be instantiated only if the current version of // the workflow template has the supplied version. // // This option cannot be used to instantiate a previous version of // workflow template. Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` // Optional. A tag that prevents multiple concurrent workflow // instances with the same tag from running. This mitigates risk of // concurrent instances started due to retries. // // It is recommended to always set this value to a // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). // // The tag must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), and hyphens (-). The maximum length is 40 characters. InstanceId string `protobuf:"bytes,3,opt,name=instance_id,json=instanceId" json:"instance_id,omitempty"` }
A request to instantiate a workflow template.
func (*InstantiateWorkflowTemplateRequest) Descriptor ¶
func (*InstantiateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
func (*InstantiateWorkflowTemplateRequest) GetInstanceId ¶
func (m *InstantiateWorkflowTemplateRequest) GetInstanceId() string
func (*InstantiateWorkflowTemplateRequest) GetName ¶
func (m *InstantiateWorkflowTemplateRequest) GetName() string
func (*InstantiateWorkflowTemplateRequest) GetVersion ¶
func (m *InstantiateWorkflowTemplateRequest) GetVersion() int32
func (*InstantiateWorkflowTemplateRequest) ProtoMessage ¶
func (*InstantiateWorkflowTemplateRequest) ProtoMessage()
func (*InstantiateWorkflowTemplateRequest) Reset ¶
func (m *InstantiateWorkflowTemplateRequest) Reset()
func (*InstantiateWorkflowTemplateRequest) String ¶
func (m *InstantiateWorkflowTemplateRequest) String() string
type Job ¶
type Job struct { // Optional. The fully qualified reference to the job, which can be used to // obtain the equivalent REST path of the job resource. If this property // is not specified when a job is created, the server generates a // <code>job_id</code>. Reference *JobReference `protobuf:"bytes,1,opt,name=reference" json:"reference,omitempty"` // Required. Job information, including how, when, and where to // run the job. Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement" json:"placement,omitempty"` // Required. The application/framework-specific portion of the job. // // Types that are valid to be assigned to TypeJob: // *Job_HadoopJob // *Job_SparkJob // *Job_PysparkJob // *Job_HiveJob // *Job_PigJob // *Job_SparkSqlJob TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` // Output-only. The job status. Additional application-specific // status information may be contained in the <code>type_job</code> // and <code>yarn_applications</code> fields. Status *JobStatus `protobuf:"bytes,8,opt,name=status" json:"status,omitempty"` // Output-only. The previous job status. StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` // Output-only. The collection of YARN applications spun up by this job. // // **Beta** Feature: This report is available for testing purposes only. It may // be changed before final release. YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications" json:"yarn_applications,omitempty"` // Output-only. A URI pointing to the location of the stdout of the job's // driver program. DriverOutputResourceUri string `` /* 128-byte string literal not displayed */ // Output-only. If present, the location of miscellaneous control files // which may be used as part of job setup and handling. If not present, // control files may be placed in the same location as `driver_output_uri`. DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri" json:"driver_control_files_uri,omitempty"` // Optional. The labels to associate with this job. // Label **keys** must contain 1 to 63 characters, and must conform to // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). // Label **values** may be empty, but, if present, must contain 1 to 63 // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). // No more than 32 labels can be associated with a job. Labels map[string]string `` /* 133-byte string literal not displayed */ // Optional. Job scheduling configuration. Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling" json:"scheduling,omitempty"` }
A Cloud Dataproc job resource.
func (*Job) Descriptor ¶
func (*Job) GetDriverControlFilesUri ¶
func (*Job) GetDriverOutputResourceUri ¶
func (*Job) GetHadoopJob ¶
func (*Job) GetHiveJob ¶
func (*Job) GetPlacement ¶
func (m *Job) GetPlacement() *JobPlacement
func (*Job) GetPysparkJob ¶
func (m *Job) GetPysparkJob() *PySparkJob
func (*Job) GetReference ¶
func (m *Job) GetReference() *JobReference
func (*Job) GetScheduling ¶
func (m *Job) GetScheduling() *JobScheduling
func (*Job) GetSparkJob ¶
func (*Job) GetSparkSqlJob ¶
func (m *Job) GetSparkSqlJob() *SparkSqlJob
func (*Job) GetStatusHistory ¶
func (*Job) GetTypeJob ¶
func (m *Job) GetTypeJob() isJob_TypeJob
func (*Job) GetYarnApplications ¶
func (m *Job) GetYarnApplications() []*YarnApplication
func (*Job) ProtoMessage ¶
func (*Job) ProtoMessage()
type JobControllerClient ¶
type JobControllerClient interface { // Submits a job to a cluster. SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) // Gets the resource representation for a job in a project. GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) // Lists regions/{region}/jobs in a project. ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) // Updates a job in a project. UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) // Starts a job cancellation request. To access the job resource // after cancellation, call // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) // Deletes the job from the project. If the job is active, the delete fails, // and the response returns `FAILED_PRECONDITION`. DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) }
func NewJobControllerClient ¶
func NewJobControllerClient(cc *grpc.ClientConn) JobControllerClient
type JobControllerServer ¶
type JobControllerServer interface { // Submits a job to a cluster. SubmitJob(context.Context, *SubmitJobRequest) (*Job, error) // Gets the resource representation for a job in a project. GetJob(context.Context, *GetJobRequest) (*Job, error) // Lists regions/{region}/jobs in a project. ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) // Updates a job in a project. UpdateJob(context.Context, *UpdateJobRequest) (*Job, error) // Starts a job cancellation request. To access the job resource // after cancellation, call // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). CancelJob(context.Context, *CancelJobRequest) (*Job, error) // Deletes the job from the project. If the job is active, the delete fails, // and the response returns `FAILED_PRECONDITION`. DeleteJob(context.Context, *DeleteJobRequest) (*google_protobuf2.Empty, error) }
type JobPlacement ¶
type JobPlacement struct { // Required. The name of the cluster where the job will be submitted. ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // Output-only. A cluster UUID generated by the Cloud Dataproc service when // the job is submitted. ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` }
Cloud Dataproc job config.
func (*JobPlacement) Descriptor ¶
func (*JobPlacement) Descriptor() ([]byte, []int)
func (*JobPlacement) GetClusterName ¶
func (m *JobPlacement) GetClusterName() string
func (*JobPlacement) GetClusterUuid ¶
func (m *JobPlacement) GetClusterUuid() string
func (*JobPlacement) ProtoMessage ¶
func (*JobPlacement) ProtoMessage()
func (*JobPlacement) Reset ¶
func (m *JobPlacement) Reset()
func (*JobPlacement) String ¶
func (m *JobPlacement) String() string
type JobReference ¶
type JobReference struct { // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Optional. The job ID, which must be unique within the project. The job ID // is generated by the server upon job submission or provided by the user as a // means to perform retries without creating duplicate jobs. The ID must // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or // hyphens (-). The maximum length is 100 characters. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` }
Encapsulates the full scoping used to reference a job.
func (*JobReference) Descriptor ¶
func (*JobReference) Descriptor() ([]byte, []int)
func (*JobReference) GetJobId ¶
func (m *JobReference) GetJobId() string
func (*JobReference) GetProjectId ¶
func (m *JobReference) GetProjectId() string
func (*JobReference) ProtoMessage ¶
func (*JobReference) ProtoMessage()
func (*JobReference) Reset ¶
func (m *JobReference) Reset()
func (*JobReference) String ¶
func (m *JobReference) String() string
type JobScheduling ¶
type JobScheduling struct { // Optional. Maximum number of times per hour a driver may be restarted as // a result of driver terminating with non-zero code before job is // reported failed. // // A job may be reported as thrashing if driver exits with non-zero code // 4 times within 10 minute window. // // Maximum value is 10. MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour" json:"max_failures_per_hour,omitempty"` }
Job scheduling options.
**Beta Feature**: These options are available for testing purposes only. They may be changed before final release.
func (*JobScheduling) Descriptor ¶
func (*JobScheduling) Descriptor() ([]byte, []int)
func (*JobScheduling) GetMaxFailuresPerHour ¶
func (m *JobScheduling) GetMaxFailuresPerHour() int32
func (*JobScheduling) ProtoMessage ¶
func (*JobScheduling) ProtoMessage()
func (*JobScheduling) Reset ¶
func (m *JobScheduling) Reset()
func (*JobScheduling) String ¶
func (m *JobScheduling) String() string
type JobStatus ¶
type JobStatus struct { // Output-only. A state message specifying the overall job state. State JobStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1beta2.JobStatus_State" json:"state,omitempty"` // Output-only. Optional job state details, such as an error // description if the state is <code>ERROR</code>. Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` // Output-only. The time when this state was entered. StateStartTime *google_protobuf5.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` // Output-only. Additional state information, which includes // status reported by the agent. Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,enum=google.cloud.dataproc.v1beta2.JobStatus_Substate" json:"substate,omitempty"` }
Cloud Dataproc job status.
func (*JobStatus) Descriptor ¶
func (*JobStatus) GetDetails ¶
func (*JobStatus) GetState ¶
func (m *JobStatus) GetState() JobStatus_State
func (*JobStatus) GetStateStartTime ¶
func (m *JobStatus) GetStateStartTime() *google_protobuf5.Timestamp
func (*JobStatus) GetSubstate ¶
func (m *JobStatus) GetSubstate() JobStatus_Substate
func (*JobStatus) ProtoMessage ¶
func (*JobStatus) ProtoMessage()
type JobStatus_State ¶
type JobStatus_State int32
The job state.
const ( // The job state is unknown. JobStatus_STATE_UNSPECIFIED JobStatus_State = 0 // The job is pending; it has been submitted, but is not yet running. JobStatus_PENDING JobStatus_State = 1 // Job has been received by the service and completed initial setup; // it will soon be submitted to the cluster. JobStatus_SETUP_DONE JobStatus_State = 8 // The job is running on the cluster. JobStatus_RUNNING JobStatus_State = 2 // A CancelJob request has been received, but is pending. JobStatus_CANCEL_PENDING JobStatus_State = 3 // Transient in-flight resources have been canceled, and the request to // cancel the running job has been issued to the cluster. JobStatus_CANCEL_STARTED JobStatus_State = 7 // The job cancellation was successful. JobStatus_CANCELLED JobStatus_State = 4 // The job has completed successfully. JobStatus_DONE JobStatus_State = 5 // The job has completed, but encountered an error. JobStatus_ERROR JobStatus_State = 6 // Job attempt has failed. The detail field contains failure details for // this attempt. // // Applies to restartable jobs only. JobStatus_ATTEMPT_FAILURE JobStatus_State = 9 )
func (JobStatus_State) EnumDescriptor ¶
func (JobStatus_State) EnumDescriptor() ([]byte, []int)
func (JobStatus_State) String ¶
func (x JobStatus_State) String() string
type JobStatus_Substate ¶
type JobStatus_Substate int32
const ( JobStatus_UNSPECIFIED JobStatus_Substate = 0 // The Job is submitted to the agent. // // Applies to RUNNING state. JobStatus_SUBMITTED JobStatus_Substate = 1 // The Job has been received and is awaiting execution (it may be waiting // for a condition to be met). See the "details" field for the reason for // the delay. // // Applies to RUNNING state. JobStatus_QUEUED JobStatus_Substate = 2 // The agent-reported status is out of date, which may be caused by a // loss of communication between the agent and Cloud Dataproc. If the // agent does not send a timely update, the job will fail. // // Applies to RUNNING state. JobStatus_STALE_STATUS JobStatus_Substate = 3 )
func (JobStatus_Substate) EnumDescriptor ¶
func (JobStatus_Substate) EnumDescriptor() ([]byte, []int)
func (JobStatus_Substate) String ¶
func (x JobStatus_Substate) String() string
type Job_HadoopJob ¶
type Job_HadoopJob struct {
HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,oneof"`
}
type Job_HiveJob ¶
type Job_HiveJob struct {
HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,oneof"`
}
type Job_PigJob ¶
type Job_PigJob struct {
PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,oneof"`
}
type Job_PysparkJob ¶
type Job_PysparkJob struct {
PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,oneof"`
}
type Job_SparkJob ¶
type Job_SparkJob struct {
SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,oneof"`
}
type Job_SparkSqlJob ¶
type Job_SparkSqlJob struct {
SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,oneof"`
}
type LifecycleConfig ¶
type LifecycleConfig struct { // Optional. The longest duration that cluster would keep alive while staying // idle; passing this threshold will cause cluster to be auto-deleted. IdleDeleteTtl *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=idle_delete_ttl,json=idleDeleteTtl" json:"idle_delete_ttl,omitempty"` // Types that are valid to be assigned to Ttl: // *LifecycleConfig_AutoDeleteTime // *LifecycleConfig_AutoDeleteTtl Ttl isLifecycleConfig_Ttl `protobuf_oneof:"ttl"` }
Specifies the cluster auto delete related schedule configuration.
func (*LifecycleConfig) Descriptor ¶
func (*LifecycleConfig) Descriptor() ([]byte, []int)
func (*LifecycleConfig) GetAutoDeleteTime ¶
func (m *LifecycleConfig) GetAutoDeleteTime() *google_protobuf5.Timestamp
func (*LifecycleConfig) GetAutoDeleteTtl ¶
func (m *LifecycleConfig) GetAutoDeleteTtl() *google_protobuf3.Duration
func (*LifecycleConfig) GetIdleDeleteTtl ¶
func (m *LifecycleConfig) GetIdleDeleteTtl() *google_protobuf3.Duration
func (*LifecycleConfig) GetTtl ¶
func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl
func (*LifecycleConfig) ProtoMessage ¶
func (*LifecycleConfig) ProtoMessage()
func (*LifecycleConfig) Reset ¶
func (m *LifecycleConfig) Reset()
func (*LifecycleConfig) String ¶
func (m *LifecycleConfig) String() string
func (*LifecycleConfig) XXX_OneofFuncs ¶
func (*LifecycleConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type LifecycleConfig_AutoDeleteTime ¶
type LifecycleConfig_AutoDeleteTime struct {
AutoDeleteTime *google_protobuf5.Timestamp `protobuf:"bytes,2,opt,name=auto_delete_time,json=autoDeleteTime,oneof"`
}
type LifecycleConfig_AutoDeleteTtl ¶
type LifecycleConfig_AutoDeleteTtl struct {
AutoDeleteTtl *google_protobuf3.Duration `protobuf:"bytes,3,opt,name=auto_delete_ttl,json=autoDeleteTtl,oneof"`
}
type ListClustersRequest ¶
type ListClustersRequest struct { // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,4,opt,name=region" json:"region,omitempty"` // Optional. A filter constraining the clusters to list. Filters are // case-sensitive and have the following syntax: // // field = value [AND [field = value]] ... // // where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, // and `[KEY]` is a label key. **value** can be `*` to match all values. // `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, // `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` // contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` // contains the `DELETING` and `ERROR` states. // `clusterName` is the name of the cluster provided at creation time. // Only the logical `AND` operator is supported; space-separated items are // treated as having an implicit `AND` operator. // // Example filter: // // status.state = ACTIVE AND clusterName = mycluster // AND labels.env = staging AND labels.starred = * Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` // Optional. The standard List page size. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` // Optional. The standard List page token. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` }
A request to list the clusters in a project.
func (*ListClustersRequest) Descriptor ¶
func (*ListClustersRequest) Descriptor() ([]byte, []int)
func (*ListClustersRequest) GetFilter ¶
func (m *ListClustersRequest) GetFilter() string
func (*ListClustersRequest) GetPageSize ¶
func (m *ListClustersRequest) GetPageSize() int32
func (*ListClustersRequest) GetPageToken ¶
func (m *ListClustersRequest) GetPageToken() string
func (*ListClustersRequest) GetProjectId ¶
func (m *ListClustersRequest) GetProjectId() string
func (*ListClustersRequest) GetRegion ¶
func (m *ListClustersRequest) GetRegion() string
func (*ListClustersRequest) ProtoMessage ¶
func (*ListClustersRequest) ProtoMessage()
func (*ListClustersRequest) Reset ¶
func (m *ListClustersRequest) Reset()
func (*ListClustersRequest) String ¶
func (m *ListClustersRequest) String() string
type ListClustersResponse ¶
type ListClustersResponse struct { // Output-only. The clusters in the project. Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` // Output-only. This token is included in the response if there are more // results to fetch. To fetch additional results, provide this value as the // `page_token` in a subsequent <code>ListClustersRequest</code>. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` }
The list of all clusters in a project.
func (*ListClustersResponse) Descriptor ¶
func (*ListClustersResponse) Descriptor() ([]byte, []int)
func (*ListClustersResponse) GetClusters ¶
func (m *ListClustersResponse) GetClusters() []*Cluster
func (*ListClustersResponse) GetNextPageToken ¶
func (m *ListClustersResponse) GetNextPageToken() string
func (*ListClustersResponse) ProtoMessage ¶
func (*ListClustersResponse) ProtoMessage()
func (*ListClustersResponse) Reset ¶
func (m *ListClustersResponse) Reset()
func (*ListClustersResponse) String ¶
func (m *ListClustersResponse) String() string
type ListJobsRequest ¶
type ListJobsRequest struct { // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,6,opt,name=region" json:"region,omitempty"` // Optional. The number of results to return in each response. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` // Optional. The page token, returned by a previous call, to request the // next page of results. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` // Optional. If set, the returned jobs list includes only jobs that were // submitted to the named cluster. ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // Optional. Specifies enumerated categories of jobs to list. // (default = match ALL jobs). // // If `filter` is provided, `jobStateMatcher` will be ignored. JobStateMatcher ListJobsRequest_JobStateMatcher `` /* 169-byte string literal not displayed */ // Optional. A filter constraining the jobs to list. Filters are // case-sensitive and have the following syntax: // // [field = value] AND [field [= value]] ... // // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label // key. **value** can be `*` to match all values. // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. // Only the logical `AND` operator is supported; space-separated items are // treated as having an implicit `AND` operator. // // Example filter: // // status.state = ACTIVE AND labels.env = staging AND labels.starred = * Filter string `protobuf:"bytes,7,opt,name=filter" json:"filter,omitempty"` }
A request to list jobs in a project.
func (*ListJobsRequest) Descriptor ¶
func (*ListJobsRequest) Descriptor() ([]byte, []int)
func (*ListJobsRequest) GetClusterName ¶
func (m *ListJobsRequest) GetClusterName() string
func (*ListJobsRequest) GetFilter ¶
func (m *ListJobsRequest) GetFilter() string
func (*ListJobsRequest) GetJobStateMatcher ¶
func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher
func (*ListJobsRequest) GetPageSize ¶
func (m *ListJobsRequest) GetPageSize() int32
func (*ListJobsRequest) GetPageToken ¶
func (m *ListJobsRequest) GetPageToken() string
func (*ListJobsRequest) GetProjectId ¶
func (m *ListJobsRequest) GetProjectId() string
func (*ListJobsRequest) GetRegion ¶
func (m *ListJobsRequest) GetRegion() string
func (*ListJobsRequest) ProtoMessage ¶
func (*ListJobsRequest) ProtoMessage()
func (*ListJobsRequest) Reset ¶
func (m *ListJobsRequest) Reset()
func (*ListJobsRequest) String ¶
func (m *ListJobsRequest) String() string
type ListJobsRequest_JobStateMatcher ¶
type ListJobsRequest_JobStateMatcher int32
A matcher that specifies categories of job states.
const ( // Match all jobs, regardless of state. ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0 // Only match jobs in non-terminal states: PENDING, RUNNING, or // CANCEL_PENDING. ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1 // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2 )
func (ListJobsRequest_JobStateMatcher) EnumDescriptor ¶
func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int)
func (ListJobsRequest_JobStateMatcher) String ¶
func (x ListJobsRequest_JobStateMatcher) String() string
type ListJobsResponse ¶
type ListJobsResponse struct { // Output-only. Jobs list. Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` // Optional. This token is included in the response if there are more results // to fetch. To fetch additional results, provide this value as the // `page_token` in a subsequent <code>ListJobsRequest</code>. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` }
A list of jobs in a project.
func (*ListJobsResponse) Descriptor ¶
func (*ListJobsResponse) Descriptor() ([]byte, []int)
func (*ListJobsResponse) GetJobs ¶
func (m *ListJobsResponse) GetJobs() []*Job
func (*ListJobsResponse) GetNextPageToken ¶
func (m *ListJobsResponse) GetNextPageToken() string
func (*ListJobsResponse) ProtoMessage ¶
func (*ListJobsResponse) ProtoMessage()
func (*ListJobsResponse) Reset ¶
func (m *ListJobsResponse) Reset()
func (*ListJobsResponse) String ¶
func (m *ListJobsResponse) String() string
type ListWorkflowTemplatesRequest ¶
type ListWorkflowTemplatesRequest struct { // Required. The "resource name" of the region, as described // in https://cloud.google.com/apis/design/resource_names of the form // `projects/{project_id}/regions/{region}` Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` // Optional. The maximum number of results to return in each response. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` // Optional. The page token, returned by a previous call, to request the // next page of results. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` }
A request to list workflow templates in a project.
func (*ListWorkflowTemplatesRequest) Descriptor ¶
func (*ListWorkflowTemplatesRequest) Descriptor() ([]byte, []int)
func (*ListWorkflowTemplatesRequest) GetPageSize ¶
func (m *ListWorkflowTemplatesRequest) GetPageSize() int32
func (*ListWorkflowTemplatesRequest) GetPageToken ¶
func (m *ListWorkflowTemplatesRequest) GetPageToken() string
func (*ListWorkflowTemplatesRequest) GetParent ¶
func (m *ListWorkflowTemplatesRequest) GetParent() string
func (*ListWorkflowTemplatesRequest) ProtoMessage ¶
func (*ListWorkflowTemplatesRequest) ProtoMessage()
func (*ListWorkflowTemplatesRequest) Reset ¶
func (m *ListWorkflowTemplatesRequest) Reset()
func (*ListWorkflowTemplatesRequest) String ¶
func (m *ListWorkflowTemplatesRequest) String() string
type ListWorkflowTemplatesResponse ¶
type ListWorkflowTemplatesResponse struct { // Output only. WorkflowTemplates list. Templates []*WorkflowTemplate `protobuf:"bytes,1,rep,name=templates" json:"templates,omitempty"` // Output only. This token is included in the response if there are more results // to fetch. To fetch additional results, provide this value as the // page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` }
A response to a request to list workflow templates in a project.
func (*ListWorkflowTemplatesResponse) Descriptor ¶
func (*ListWorkflowTemplatesResponse) Descriptor() ([]byte, []int)
func (*ListWorkflowTemplatesResponse) GetNextPageToken ¶
func (m *ListWorkflowTemplatesResponse) GetNextPageToken() string
func (*ListWorkflowTemplatesResponse) GetTemplates ¶
func (m *ListWorkflowTemplatesResponse) GetTemplates() []*WorkflowTemplate
func (*ListWorkflowTemplatesResponse) ProtoMessage ¶
func (*ListWorkflowTemplatesResponse) ProtoMessage()
func (*ListWorkflowTemplatesResponse) Reset ¶
func (m *ListWorkflowTemplatesResponse) Reset()
func (*ListWorkflowTemplatesResponse) String ¶
func (m *ListWorkflowTemplatesResponse) String() string
type LoggingConfig ¶
type LoggingConfig struct { // The per-package log levels for the driver. This may include // "root" package name to configure rootLogger. // Examples: // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' DriverLogLevels map[string]LoggingConfig_Level `` /* 231-byte string literal not displayed */ }
The runtime logging config of the job.
func (*LoggingConfig) Descriptor ¶
func (*LoggingConfig) Descriptor() ([]byte, []int)
func (*LoggingConfig) GetDriverLogLevels ¶
func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level
func (*LoggingConfig) ProtoMessage ¶
func (*LoggingConfig) ProtoMessage()
func (*LoggingConfig) Reset ¶
func (m *LoggingConfig) Reset()
func (*LoggingConfig) String ¶
func (m *LoggingConfig) String() string
type LoggingConfig_Level ¶
type LoggingConfig_Level int32
The Log4j level for job execution. When running an [Apache Hive](http://hive.apache.org/) job, Cloud Dataproc configures the Hive client to an equivalent verbosity level.
const ( // Level is unspecified. Use default level for log4j. LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0 // Use ALL level for log4j. LoggingConfig_ALL LoggingConfig_Level = 1 // Use TRACE level for log4j. LoggingConfig_TRACE LoggingConfig_Level = 2 // Use DEBUG level for log4j. LoggingConfig_DEBUG LoggingConfig_Level = 3 // Use INFO level for log4j. LoggingConfig_INFO LoggingConfig_Level = 4 // Use WARN level for log4j. LoggingConfig_WARN LoggingConfig_Level = 5 // Use ERROR level for log4j. LoggingConfig_ERROR LoggingConfig_Level = 6 // Use FATAL level for log4j. LoggingConfig_FATAL LoggingConfig_Level = 7 // Turn off log4j. LoggingConfig_OFF LoggingConfig_Level = 8 )
func (LoggingConfig_Level) EnumDescriptor ¶
func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int)
func (LoggingConfig_Level) String ¶
func (x LoggingConfig_Level) String() string
type ManagedCluster ¶
type ManagedCluster struct { // Required. The cluster name. Cluster names within a project must be // unique. Names from deleted clusters can be reused. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // Required. The cluster configuration. Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` // Optional. The labels to associate with this cluster. // // Label keys must be between 1 and 63 characters long, and must conform to // the following PCRE regular expression: // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} // // Label values must be between 1 and 63 characters long, and must conform to // the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} // // No more than 64 labels can be associated with a given cluster. Labels map[string]string `` /* 132-byte string literal not displayed */ }
Cluster that is managed by the workflow.
func (*ManagedCluster) Descriptor ¶
func (*ManagedCluster) Descriptor() ([]byte, []int)
func (*ManagedCluster) GetClusterName ¶
func (m *ManagedCluster) GetClusterName() string
func (*ManagedCluster) GetConfig ¶
func (m *ManagedCluster) GetConfig() *ClusterConfig
func (*ManagedCluster) GetLabels ¶
func (m *ManagedCluster) GetLabels() map[string]string
func (*ManagedCluster) ProtoMessage ¶
func (*ManagedCluster) ProtoMessage()
func (*ManagedCluster) Reset ¶
func (m *ManagedCluster) Reset()
func (*ManagedCluster) String ¶
func (m *ManagedCluster) String() string
type ManagedGroupConfig ¶
type ManagedGroupConfig struct { // Output-only. The name of the Instance Template used for the Managed // Instance Group. InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName" json:"instance_template_name,omitempty"` // Output-only. The name of the Instance Group Manager for this group. InstanceGroupManagerName string `` /* 130-byte string literal not displayed */ }
Specifies the resources used to actively manage an instance group.
func (*ManagedGroupConfig) Descriptor ¶
func (*ManagedGroupConfig) Descriptor() ([]byte, []int)
func (*ManagedGroupConfig) GetInstanceGroupManagerName ¶
func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string
func (*ManagedGroupConfig) GetInstanceTemplateName ¶
func (m *ManagedGroupConfig) GetInstanceTemplateName() string
func (*ManagedGroupConfig) ProtoMessage ¶
func (*ManagedGroupConfig) ProtoMessage()
func (*ManagedGroupConfig) Reset ¶
func (m *ManagedGroupConfig) Reset()
func (*ManagedGroupConfig) String ¶
func (m *ManagedGroupConfig) String() string
type NodeInitializationAction ¶
type NodeInitializationAction struct { // Required. Google Cloud Storage URI of executable file. ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile" json:"executable_file,omitempty"` // Optional. Amount of time executable has to complete. Default is // 10 minutes. Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. ExecutionTimeout *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout" json:"execution_timeout,omitempty"` }
Specifies an executable to run on a fully configured node and a timeout period for executable completion.
func (*NodeInitializationAction) Descriptor ¶
func (*NodeInitializationAction) Descriptor() ([]byte, []int)
func (*NodeInitializationAction) GetExecutableFile ¶
func (m *NodeInitializationAction) GetExecutableFile() string
func (*NodeInitializationAction) GetExecutionTimeout ¶
func (m *NodeInitializationAction) GetExecutionTimeout() *google_protobuf3.Duration
func (*NodeInitializationAction) ProtoMessage ¶
func (*NodeInitializationAction) ProtoMessage()
func (*NodeInitializationAction) Reset ¶
func (m *NodeInitializationAction) Reset()
func (*NodeInitializationAction) String ¶
func (m *NodeInitializationAction) String() string
type OrderedJob ¶
type OrderedJob struct { // Required. The step id. The id must be unique among all jobs // within the template. // // The step id is used as prefix for job id, as job `workflow-step-id` label, // and in prerequisite_step_ids field from other steps. StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId" json:"step_id,omitempty"` // Required. The job definition. // // Types that are valid to be assigned to JobType: // *OrderedJob_HadoopJob // *OrderedJob_SparkJob // *OrderedJob_PysparkJob // *OrderedJob_HiveJob // *OrderedJob_PigJob // *OrderedJob_SparkSqlJob JobType isOrderedJob_JobType `protobuf_oneof:"job_type"` // Optional. The labels to associate with this job. // // Label keys must be between 1 and 63 characters long, and must conform to // the following regular expression: // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} // // Label values must be between 1 and 63 characters long, and must conform to // the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} // // No more than 64 labels can be associated with a given job. Labels map[string]string `` /* 132-byte string literal not displayed */ // Optional. Job scheduling configuration. Scheduling *JobScheduling `protobuf:"bytes,9,opt,name=scheduling" json:"scheduling,omitempty"` // Optional. The optional list of prerequisite job step_ids. // If not specified, the job will start at the beginning of workflow. PrerequisiteStepIds []string `protobuf:"bytes,10,rep,name=prerequisite_step_ids,json=prerequisiteStepIds" json:"prerequisite_step_ids,omitempty"` }
func (*OrderedJob) Descriptor ¶
func (*OrderedJob) Descriptor() ([]byte, []int)
func (*OrderedJob) GetHadoopJob ¶
func (m *OrderedJob) GetHadoopJob() *HadoopJob
func (*OrderedJob) GetHiveJob ¶
func (m *OrderedJob) GetHiveJob() *HiveJob
func (*OrderedJob) GetJobType ¶
func (m *OrderedJob) GetJobType() isOrderedJob_JobType
func (*OrderedJob) GetLabels ¶
func (m *OrderedJob) GetLabels() map[string]string
func (*OrderedJob) GetPigJob ¶
func (m *OrderedJob) GetPigJob() *PigJob
func (*OrderedJob) GetPrerequisiteStepIds ¶
func (m *OrderedJob) GetPrerequisiteStepIds() []string
func (*OrderedJob) GetPysparkJob ¶
func (m *OrderedJob) GetPysparkJob() *PySparkJob
func (*OrderedJob) GetScheduling ¶
func (m *OrderedJob) GetScheduling() *JobScheduling
func (*OrderedJob) GetSparkJob ¶
func (m *OrderedJob) GetSparkJob() *SparkJob
func (*OrderedJob) GetSparkSqlJob ¶
func (m *OrderedJob) GetSparkSqlJob() *SparkSqlJob
func (*OrderedJob) GetStepId ¶
func (m *OrderedJob) GetStepId() string
func (*OrderedJob) ProtoMessage ¶
func (*OrderedJob) ProtoMessage()
func (*OrderedJob) Reset ¶
func (m *OrderedJob) Reset()
func (*OrderedJob) String ¶
func (m *OrderedJob) String() string
func (*OrderedJob) XXX_OneofFuncs ¶
func (*OrderedJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type OrderedJob_HadoopJob ¶
type OrderedJob_HadoopJob struct {
HadoopJob *HadoopJob `protobuf:"bytes,2,opt,name=hadoop_job,json=hadoopJob,oneof"`
}
type OrderedJob_HiveJob ¶
type OrderedJob_HiveJob struct {
HiveJob *HiveJob `protobuf:"bytes,5,opt,name=hive_job,json=hiveJob,oneof"`
}
type OrderedJob_PigJob ¶
type OrderedJob_PigJob struct {
PigJob *PigJob `protobuf:"bytes,6,opt,name=pig_job,json=pigJob,oneof"`
}
type OrderedJob_PysparkJob ¶
type OrderedJob_PysparkJob struct {
PysparkJob *PySparkJob `protobuf:"bytes,4,opt,name=pyspark_job,json=pysparkJob,oneof"`
}
type OrderedJob_SparkJob ¶
type OrderedJob_SparkJob struct {
SparkJob *SparkJob `protobuf:"bytes,3,opt,name=spark_job,json=sparkJob,oneof"`
}
type OrderedJob_SparkSqlJob ¶
type OrderedJob_SparkSqlJob struct {
SparkSqlJob *SparkSqlJob `protobuf:"bytes,7,opt,name=spark_sql_job,json=sparkSqlJob,oneof"`
}
type PigJob ¶
type PigJob struct { // Required. The sequence of Pig queries to execute, specified as an HCFS // file URI or a list of queries. // // Types that are valid to be assigned to Queries: // *PigJob_QueryFileUri // *PigJob_QueryList Queries isPigJob_Queries `protobuf_oneof:"queries"` // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when executing // independent parallel queries. ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` // Optional. Mapping of query variable names to values (equivalent to the Pig // command: `name=[value]`). ScriptVariables map[string]string `` /* 173-byte string literal not displayed */ // Optional. A mapping of property names to values, used to configure Pig. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/pig/conf/pig.properties, and classes in user code. Properties map[string]string `` /* 140-byte string literal not displayed */ // Optional. HCFS URIs of jar files to add to the CLASSPATH of // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN.
func (*PigJob) Descriptor ¶
func (*PigJob) GetContinueOnFailure ¶
func (*PigJob) GetJarFileUris ¶
func (*PigJob) GetLoggingConfig ¶
func (m *PigJob) GetLoggingConfig() *LoggingConfig
func (*PigJob) GetProperties ¶
func (*PigJob) GetQueries ¶
func (m *PigJob) GetQueries() isPigJob_Queries
func (*PigJob) GetQueryFileUri ¶
func (*PigJob) GetQueryList ¶
func (*PigJob) GetScriptVariables ¶
func (*PigJob) ProtoMessage ¶
func (*PigJob) ProtoMessage()
type PigJob_QueryFileUri ¶
type PigJob_QueryFileUri struct {
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"`
}
type PigJob_QueryList ¶
type PigJob_QueryList struct {
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"`
}
type PySparkJob ¶
type PySparkJob struct { // Required. The HCFS URI of the main Python file to use as the driver. Must // be a .py file. MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri" json:"main_python_file_uri,omitempty"` // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. Args []string `protobuf:"bytes,2,rep,name=args" json:"args,omitempty"` // Optional. HCFS file URIs of Python files to pass to the PySpark // framework. Supported file types: .py, .egg, and .zip. PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris" json:"python_file_uris,omitempty"` // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Python driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // Optional. HCFS URIs of files to be copied to the working directory of // Python drivers and distributed tasks. Useful for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` // Optional. HCFS URIs of archives to be extracted in the working directory of // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` // Optional. A mapping of property names to values, used to configure PySpark. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `` /* 140-byte string literal not displayed */ // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.
func (*PySparkJob) Descriptor ¶
func (*PySparkJob) Descriptor() ([]byte, []int)
func (*PySparkJob) GetArchiveUris ¶
func (m *PySparkJob) GetArchiveUris() []string
func (*PySparkJob) GetArgs ¶
func (m *PySparkJob) GetArgs() []string
func (*PySparkJob) GetFileUris ¶
func (m *PySparkJob) GetFileUris() []string
func (*PySparkJob) GetJarFileUris ¶
func (m *PySparkJob) GetJarFileUris() []string
func (*PySparkJob) GetLoggingConfig ¶
func (m *PySparkJob) GetLoggingConfig() *LoggingConfig
func (*PySparkJob) GetMainPythonFileUri ¶
func (m *PySparkJob) GetMainPythonFileUri() string
func (*PySparkJob) GetProperties ¶
func (m *PySparkJob) GetProperties() map[string]string
func (*PySparkJob) GetPythonFileUris ¶
func (m *PySparkJob) GetPythonFileUris() []string
func (*PySparkJob) ProtoMessage ¶
func (*PySparkJob) ProtoMessage()
func (*PySparkJob) Reset ¶
func (m *PySparkJob) Reset()
func (*PySparkJob) String ¶
func (m *PySparkJob) String() string
type QueryList ¶
type QueryList struct { // Required. The queries to execute. You do not need to terminate a query // with a semicolon. Multiple queries can be specified in one string // by separating each with a semicolon. Here is an example of an Cloud // Dataproc API snippet that uses a QueryList to specify a HiveJob: // // "hiveJob": { // "queryList": { // "queries": [ // "query1", // "query2", // "query3;query4", // ] // } // } Queries []string `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` }
A list of queries to run on a cluster.
func (*QueryList) Descriptor ¶
func (*QueryList) GetQueries ¶
func (*QueryList) ProtoMessage ¶
func (*QueryList) ProtoMessage()
type SoftwareConfig ¶
type SoftwareConfig struct { // Optional. The version of software inside the cluster. It must match the // regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the // latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion" json:"image_version,omitempty"` // Optional. The properties to set on daemon config files. // // Property keys are specified in `prefix:property` format, such as // `core:fs.defaultFS`. The following are supported prefixes // and their mappings: // // * capacity-scheduler: `capacity-scheduler.xml` // * core: `core-site.xml` // * distcp: `distcp-default.xml` // * hdfs: `hdfs-site.xml` // * hive: `hive-site.xml` // * mapred: `mapred-site.xml` // * pig: `pig.properties` // * spark: `spark-defaults.conf` // * yarn: `yarn-site.xml` // // For more information, see // [Cluster properties](/dataproc/docs/concepts/cluster-properties). Properties map[string]string `` /* 140-byte string literal not displayed */ }
Specifies the selection and config of software inside the cluster.
func (*SoftwareConfig) Descriptor ¶
func (*SoftwareConfig) Descriptor() ([]byte, []int)
func (*SoftwareConfig) GetImageVersion ¶
func (m *SoftwareConfig) GetImageVersion() string
func (*SoftwareConfig) GetProperties ¶
func (m *SoftwareConfig) GetProperties() map[string]string
func (*SoftwareConfig) ProtoMessage ¶
func (*SoftwareConfig) ProtoMessage()
func (*SoftwareConfig) Reset ¶
func (m *SoftwareConfig) Reset()
func (*SoftwareConfig) String ¶
func (m *SoftwareConfig) String() string
type SparkJob ¶
type SparkJob struct { // Required. The specification of the main method to call to drive the job. // Specify either the jar file that contains the main class or the main class // name. To pass both a main jar and a main class in that jar, add the jar to // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`. // // Types that are valid to be assigned to Driver: // *SparkJob_MainJarFileUri // *SparkJob_MainClass Driver isSparkJob_Driver `protobuf_oneof:"driver"` // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Spark driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // Optional. HCFS URIs of files to be copied to the working directory of // Spark drivers and distributed tasks. Useful for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` // Optional. HCFS URIs of archives to be extracted in the working directory // of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` // Optional. A mapping of property names to values, used to configure Spark. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `` /* 140-byte string literal not displayed */ // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN.
func (*SparkJob) Descriptor ¶
func (*SparkJob) GetArchiveUris ¶
func (*SparkJob) GetFileUris ¶
func (*SparkJob) GetJarFileUris ¶
func (*SparkJob) GetLoggingConfig ¶
func (m *SparkJob) GetLoggingConfig() *LoggingConfig
func (*SparkJob) GetMainClass ¶
func (*SparkJob) GetMainJarFileUri ¶
func (*SparkJob) GetProperties ¶
func (*SparkJob) ProtoMessage ¶
func (*SparkJob) ProtoMessage()
type SparkJob_MainClass ¶
type SparkJob_MainClass struct {
MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"`
}
type SparkJob_MainJarFileUri ¶
type SparkJob_MainJarFileUri struct {
MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"`
}
type SparkSqlJob ¶
type SparkSqlJob struct { // Required. The sequence of Spark SQL queries to execute, specified as // either an HCFS file URI or as a list of queries. // // Types that are valid to be assigned to Queries: // *SparkSqlJob_QueryFileUri // *SparkSqlJob_QueryList Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"` // Optional. Mapping of query variable names to values (equivalent to the // Spark SQL command: SET `name="value";`). ScriptVariables map[string]string `` /* 173-byte string literal not displayed */ // Optional. A mapping of property names to values, used to configure // Spark SQL's SparkConf. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Properties map[string]string `` /* 140-byte string literal not displayed */ // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` }
A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries.
func (*SparkSqlJob) Descriptor ¶
func (*SparkSqlJob) Descriptor() ([]byte, []int)
func (*SparkSqlJob) GetJarFileUris ¶
func (m *SparkSqlJob) GetJarFileUris() []string
func (*SparkSqlJob) GetLoggingConfig ¶
func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig
func (*SparkSqlJob) GetProperties ¶
func (m *SparkSqlJob) GetProperties() map[string]string
func (*SparkSqlJob) GetQueries ¶
func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries
func (*SparkSqlJob) GetQueryFileUri ¶
func (m *SparkSqlJob) GetQueryFileUri() string
func (*SparkSqlJob) GetQueryList ¶
func (m *SparkSqlJob) GetQueryList() *QueryList
func (*SparkSqlJob) GetScriptVariables ¶
func (m *SparkSqlJob) GetScriptVariables() map[string]string
func (*SparkSqlJob) ProtoMessage ¶
func (*SparkSqlJob) ProtoMessage()
func (*SparkSqlJob) Reset ¶
func (m *SparkSqlJob) Reset()
func (*SparkSqlJob) String ¶
func (m *SparkSqlJob) String() string
func (*SparkSqlJob) XXX_OneofFuncs ¶
func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type SparkSqlJob_QueryFileUri ¶
type SparkSqlJob_QueryFileUri struct {
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"`
}
type SparkSqlJob_QueryList ¶
type SparkSqlJob_QueryList struct {
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"`
}
type SubmitJobRequest ¶
type SubmitJobRequest struct { // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` // Required. The job resource. Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` }
A request to submit a job.
func (*SubmitJobRequest) Descriptor ¶
func (*SubmitJobRequest) Descriptor() ([]byte, []int)
func (*SubmitJobRequest) GetJob ¶
func (m *SubmitJobRequest) GetJob() *Job
func (*SubmitJobRequest) GetProjectId ¶
func (m *SubmitJobRequest) GetProjectId() string
func (*SubmitJobRequest) GetRegion ¶
func (m *SubmitJobRequest) GetRegion() string
func (*SubmitJobRequest) ProtoMessage ¶
func (*SubmitJobRequest) ProtoMessage()
func (*SubmitJobRequest) Reset ¶
func (m *SubmitJobRequest) Reset()
func (*SubmitJobRequest) String ¶
func (m *SubmitJobRequest) String() string
type UpdateClusterRequest ¶
type UpdateClusterRequest struct { // Required. The ID of the Google Cloud Platform project the // cluster belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,5,opt,name=region" json:"region,omitempty"` // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` // Required. The changes to the cluster. Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` // Optional. Timeout for graceful YARN decomissioning. Graceful // decommissioning allows removing nodes from the cluster without // interrupting jobs in progress. Timeout specifies how long to wait for jobs // in progress to finish before forcefully removing nodes (and potentially // interrupting jobs). Default timeout is 0 (for forceful decommission), and // the maximum allowed timeout is 1 day. // // Only supported on Dataproc image versions 1.2 and higher. GracefulDecommissionTimeout *google_protobuf3.Duration `` /* 137-byte string literal not displayed */ // Required. Specifies the path, relative to <code>Cluster</code>, of // the field to update. For example, to change the number of workers // in a cluster to 5, the <code>update_mask</code> parameter would be // specified as <code>config.worker_config.num_instances</code>, // and the `PATCH` request body would specify the new value, as follows: // // { // "config":{ // "workerConfig":{ // "numInstances":"5" // } // } // } // Similarly, to change the number of preemptible workers in a cluster to 5, the // <code>update_mask</code> parameter would be <code>config.secondary_worker_config.num_instances</code>, // and the `PATCH` request body would be set as follows: // // { // "config":{ // "secondaryWorkerConfig":{ // "numInstances":"5" // } // } // } // <strong>Note:</strong> currently only some fields can be updated: // |Mask|Purpose| // |`labels`|Updates labels| // |`config.worker_config.num_instances`|Resize primary worker group| // |`config.secondary_worker_config.num_instances`|Resize secondary worker group| UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` }
A request to update a cluster.
func (*UpdateClusterRequest) Descriptor ¶
func (*UpdateClusterRequest) Descriptor() ([]byte, []int)
func (*UpdateClusterRequest) GetCluster ¶
func (m *UpdateClusterRequest) GetCluster() *Cluster
func (*UpdateClusterRequest) GetClusterName ¶
func (m *UpdateClusterRequest) GetClusterName() string
func (*UpdateClusterRequest) GetGracefulDecommissionTimeout ¶
func (m *UpdateClusterRequest) GetGracefulDecommissionTimeout() *google_protobuf3.Duration
func (*UpdateClusterRequest) GetProjectId ¶
func (m *UpdateClusterRequest) GetProjectId() string
func (*UpdateClusterRequest) GetRegion ¶
func (m *UpdateClusterRequest) GetRegion() string
func (*UpdateClusterRequest) GetUpdateMask ¶
func (m *UpdateClusterRequest) GetUpdateMask() *google_protobuf4.FieldMask
func (*UpdateClusterRequest) ProtoMessage ¶
func (*UpdateClusterRequest) ProtoMessage()
func (*UpdateClusterRequest) Reset ¶
func (m *UpdateClusterRequest) Reset()
func (*UpdateClusterRequest) String ¶
func (m *UpdateClusterRequest) String() string
type UpdateJobRequest ¶
type UpdateJobRequest struct { // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` // Required. The job ID. JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"` // Required. The changes to the job. Job *Job `protobuf:"bytes,4,opt,name=job" json:"job,omitempty"` // Required. Specifies the path, relative to <code>Job</code>, of // the field to update. For example, to update the labels of a Job the // <code>update_mask</code> parameter would be specified as // <code>labels</code>, and the `PATCH` request body would specify the new // value. <strong>Note:</strong> Currently, <code>labels</code> is the only // field that can be updated. UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` }
A request to update a job.
func (*UpdateJobRequest) Descriptor ¶
func (*UpdateJobRequest) Descriptor() ([]byte, []int)
func (*UpdateJobRequest) GetJob ¶
func (m *UpdateJobRequest) GetJob() *Job
func (*UpdateJobRequest) GetJobId ¶
func (m *UpdateJobRequest) GetJobId() string
func (*UpdateJobRequest) GetProjectId ¶
func (m *UpdateJobRequest) GetProjectId() string
func (*UpdateJobRequest) GetRegion ¶
func (m *UpdateJobRequest) GetRegion() string
func (*UpdateJobRequest) GetUpdateMask ¶
func (m *UpdateJobRequest) GetUpdateMask() *google_protobuf4.FieldMask
func (*UpdateJobRequest) ProtoMessage ¶
func (*UpdateJobRequest) ProtoMessage()
func (*UpdateJobRequest) Reset ¶
func (m *UpdateJobRequest) Reset()
func (*UpdateJobRequest) String ¶
func (m *UpdateJobRequest) String() string
type UpdateWorkflowTemplateRequest ¶
type UpdateWorkflowTemplateRequest struct { // Required. The updated workflow template. // // The `template.version` field must match the current version. Template *WorkflowTemplate `protobuf:"bytes,1,opt,name=template" json:"template,omitempty"` }
A request to update a workflow template.
func (*UpdateWorkflowTemplateRequest) Descriptor ¶
func (*UpdateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
func (*UpdateWorkflowTemplateRequest) GetTemplate ¶
func (m *UpdateWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate
func (*UpdateWorkflowTemplateRequest) ProtoMessage ¶
func (*UpdateWorkflowTemplateRequest) ProtoMessage()
func (*UpdateWorkflowTemplateRequest) Reset ¶
func (m *UpdateWorkflowTemplateRequest) Reset()
func (*UpdateWorkflowTemplateRequest) String ¶
func (m *UpdateWorkflowTemplateRequest) String() string
type WorkflowGraph ¶
type WorkflowGraph struct { // Output only. The workflow nodes. Nodes []*WorkflowNode `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` }
The workflow graph.
func (*WorkflowGraph) Descriptor ¶
func (*WorkflowGraph) Descriptor() ([]byte, []int)
func (*WorkflowGraph) GetNodes ¶
func (m *WorkflowGraph) GetNodes() []*WorkflowNode
func (*WorkflowGraph) ProtoMessage ¶
func (*WorkflowGraph) ProtoMessage()
func (*WorkflowGraph) Reset ¶
func (m *WorkflowGraph) Reset()
func (*WorkflowGraph) String ¶
func (m *WorkflowGraph) String() string
type WorkflowMetadata ¶
type WorkflowMetadata struct { // Output only. The "resource name" of the template. Template string `protobuf:"bytes,1,opt,name=template" json:"template,omitempty"` // Output only. The version of template at the time of // workflow instantiation. Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` // Output only. The create cluster operation metadata. CreateCluster *ClusterOperation `protobuf:"bytes,3,opt,name=create_cluster,json=createCluster" json:"create_cluster,omitempty"` // Output only. The workflow graph. Graph *WorkflowGraph `protobuf:"bytes,4,opt,name=graph" json:"graph,omitempty"` // Output only. The delete cluster operation metadata. DeleteCluster *ClusterOperation `protobuf:"bytes,5,opt,name=delete_cluster,json=deleteCluster" json:"delete_cluster,omitempty"` // Output only. The workflow state. State WorkflowMetadata_State `protobuf:"varint,6,opt,name=state,enum=google.cloud.dataproc.v1beta2.WorkflowMetadata_State" json:"state,omitempty"` // Output only. The name of the managed cluster. ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` }
A Cloud Dataproc workflow template resource.
func (*WorkflowMetadata) Descriptor ¶
func (*WorkflowMetadata) Descriptor() ([]byte, []int)
func (*WorkflowMetadata) GetClusterName ¶
func (m *WorkflowMetadata) GetClusterName() string
func (*WorkflowMetadata) GetCreateCluster ¶
func (m *WorkflowMetadata) GetCreateCluster() *ClusterOperation
func (*WorkflowMetadata) GetDeleteCluster ¶
func (m *WorkflowMetadata) GetDeleteCluster() *ClusterOperation
func (*WorkflowMetadata) GetGraph ¶
func (m *WorkflowMetadata) GetGraph() *WorkflowGraph
func (*WorkflowMetadata) GetState ¶
func (m *WorkflowMetadata) GetState() WorkflowMetadata_State
func (*WorkflowMetadata) GetTemplate ¶
func (m *WorkflowMetadata) GetTemplate() string
func (*WorkflowMetadata) GetVersion ¶
func (m *WorkflowMetadata) GetVersion() int32
func (*WorkflowMetadata) ProtoMessage ¶
func (*WorkflowMetadata) ProtoMessage()
func (*WorkflowMetadata) Reset ¶
func (m *WorkflowMetadata) Reset()
func (*WorkflowMetadata) String ¶
func (m *WorkflowMetadata) String() string
type WorkflowMetadata_State ¶
type WorkflowMetadata_State int32
The operation state.
const ( // Unused. WorkflowMetadata_UNKNOWN WorkflowMetadata_State = 0 // The operation has been created. WorkflowMetadata_PENDING WorkflowMetadata_State = 1 // The operation is running. WorkflowMetadata_RUNNING WorkflowMetadata_State = 2 // The operation is done; either cancelled or completed. WorkflowMetadata_DONE WorkflowMetadata_State = 3 )
func (WorkflowMetadata_State) EnumDescriptor ¶
func (WorkflowMetadata_State) EnumDescriptor() ([]byte, []int)
func (WorkflowMetadata_State) String ¶
func (x WorkflowMetadata_State) String() string
type WorkflowNode ¶
type WorkflowNode struct { // Output only. The name of the node. StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId" json:"step_id,omitempty"` // Output only. Node's prerequisite nodes. PrerequisiteStepIds []string `protobuf:"bytes,2,rep,name=prerequisite_step_ids,json=prerequisiteStepIds" json:"prerequisite_step_ids,omitempty"` // Output only. The job id; populated after the node enters RUNNING state. JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"` // Output only. The node state. State WorkflowNode_NodeState `protobuf:"varint,5,opt,name=state,enum=google.cloud.dataproc.v1beta2.WorkflowNode_NodeState" json:"state,omitempty"` // Output only. The error detail. Error string `protobuf:"bytes,6,opt,name=error" json:"error,omitempty"` }
The workflow node.
func (*WorkflowNode) Descriptor ¶
func (*WorkflowNode) Descriptor() ([]byte, []int)
func (*WorkflowNode) GetError ¶
func (m *WorkflowNode) GetError() string
func (*WorkflowNode) GetJobId ¶
func (m *WorkflowNode) GetJobId() string
func (*WorkflowNode) GetPrerequisiteStepIds ¶
func (m *WorkflowNode) GetPrerequisiteStepIds() []string
func (*WorkflowNode) GetState ¶
func (m *WorkflowNode) GetState() WorkflowNode_NodeState
func (*WorkflowNode) GetStepId ¶
func (m *WorkflowNode) GetStepId() string
func (*WorkflowNode) ProtoMessage ¶
func (*WorkflowNode) ProtoMessage()
func (*WorkflowNode) Reset ¶
func (m *WorkflowNode) Reset()
func (*WorkflowNode) String ¶
func (m *WorkflowNode) String() string
type WorkflowNode_NodeState ¶
type WorkflowNode_NodeState int32
const ( WorkflowNode_NODE_STATUS_UNSPECIFIED WorkflowNode_NodeState = 0 // The node is awaiting prerequisite node to finish. WorkflowNode_BLOCKED WorkflowNode_NodeState = 1 // The node is runnable but not running. WorkflowNode_RUNNABLE WorkflowNode_NodeState = 2 // The node is running. WorkflowNode_RUNNING WorkflowNode_NodeState = 3 // The node completed successfully. WorkflowNode_COMPLETED WorkflowNode_NodeState = 4 // The node failed. A node can be marked FAILED because // its ancestor or peer failed. WorkflowNode_FAILED WorkflowNode_NodeState = 5 )
func (WorkflowNode_NodeState) EnumDescriptor ¶
func (WorkflowNode_NodeState) EnumDescriptor() ([]byte, []int)
func (WorkflowNode_NodeState) String ¶
func (x WorkflowNode_NodeState) String() string
type WorkflowTemplate ¶
type WorkflowTemplate struct { // Required. The template id. Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` // Output only. The "resource name" of the template, as described // in https://cloud.google.com/apis/design/resource_names of the form // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Optional. Used to perform a consistent read-modify-write. // // This field should be left blank for a `CreateWorkflowTemplate` request. It // is required for an `UpdateWorkflowTemplate` request, and must match the // current server version. A typical update template flow would fetch the // current template with a `GetWorkflowTemplate` request, which will return // the current template with the `version` field filled in with the // current server version. The user updates other fields in the template, // then returns it as part of the `UpdateWorkflowTemplate` request. Version int32 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` // Output only. The time template was created. CreateTime *google_protobuf5.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` // Output only. The time template was last updated. UpdateTime *google_protobuf5.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` // Optional. The labels to associate with this template. These labels // will be propagated to all jobs and clusters created by the workflow // instance. // // Label **keys** must contain 1 to 63 characters, and must conform to // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). // // Label **values** may be empty, but, if present, must contain 1 to 63 // characters, and must conform to // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). // // No more than 32 labels can be associated with a template. Labels map[string]string `` /* 132-byte string literal not displayed */ // Required. WorkflowTemplate scheduling information. Placement *WorkflowTemplatePlacement `protobuf:"bytes,7,opt,name=placement" json:"placement,omitempty"` // Required. The Directed Acyclic Graph of Jobs to submit. Jobs []*OrderedJob `protobuf:"bytes,8,rep,name=jobs" json:"jobs,omitempty"` }
A Cloud Dataproc workflow template resource.
func (*WorkflowTemplate) Descriptor ¶
func (*WorkflowTemplate) Descriptor() ([]byte, []int)
func (*WorkflowTemplate) GetCreateTime ¶
func (m *WorkflowTemplate) GetCreateTime() *google_protobuf5.Timestamp
func (*WorkflowTemplate) GetId ¶
func (m *WorkflowTemplate) GetId() string
func (*WorkflowTemplate) GetJobs ¶
func (m *WorkflowTemplate) GetJobs() []*OrderedJob
func (*WorkflowTemplate) GetLabels ¶
func (m *WorkflowTemplate) GetLabels() map[string]string
func (*WorkflowTemplate) GetName ¶
func (m *WorkflowTemplate) GetName() string
func (*WorkflowTemplate) GetPlacement ¶
func (m *WorkflowTemplate) GetPlacement() *WorkflowTemplatePlacement
func (*WorkflowTemplate) GetUpdateTime ¶
func (m *WorkflowTemplate) GetUpdateTime() *google_protobuf5.Timestamp
func (*WorkflowTemplate) GetVersion ¶
func (m *WorkflowTemplate) GetVersion() int32
func (*WorkflowTemplate) ProtoMessage ¶
func (*WorkflowTemplate) ProtoMessage()
func (*WorkflowTemplate) Reset ¶
func (m *WorkflowTemplate) Reset()
func (*WorkflowTemplate) String ¶
func (m *WorkflowTemplate) String() string
type WorkflowTemplatePlacement ¶
type WorkflowTemplatePlacement struct { // Types that are valid to be assigned to Placement: // *WorkflowTemplatePlacement_ManagedCluster // *WorkflowTemplatePlacement_ClusterSelector Placement isWorkflowTemplatePlacement_Placement `protobuf_oneof:"placement"` }
Specifies workflow execution target.
Either `managed_cluster` or `cluster_selector` is required.
func (*WorkflowTemplatePlacement) Descriptor ¶
func (*WorkflowTemplatePlacement) Descriptor() ([]byte, []int)
func (*WorkflowTemplatePlacement) GetClusterSelector ¶
func (m *WorkflowTemplatePlacement) GetClusterSelector() *ClusterSelector
func (*WorkflowTemplatePlacement) GetManagedCluster ¶
func (m *WorkflowTemplatePlacement) GetManagedCluster() *ManagedCluster
func (*WorkflowTemplatePlacement) GetPlacement ¶
func (m *WorkflowTemplatePlacement) GetPlacement() isWorkflowTemplatePlacement_Placement
func (*WorkflowTemplatePlacement) ProtoMessage ¶
func (*WorkflowTemplatePlacement) ProtoMessage()
func (*WorkflowTemplatePlacement) Reset ¶
func (m *WorkflowTemplatePlacement) Reset()
func (*WorkflowTemplatePlacement) String ¶
func (m *WorkflowTemplatePlacement) String() string
func (*WorkflowTemplatePlacement) XXX_OneofFuncs ¶
func (*WorkflowTemplatePlacement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type WorkflowTemplatePlacement_ClusterSelector ¶
type WorkflowTemplatePlacement_ClusterSelector struct {
ClusterSelector *ClusterSelector `protobuf:"bytes,2,opt,name=cluster_selector,json=clusterSelector,oneof"`
}
type WorkflowTemplatePlacement_ManagedCluster ¶
type WorkflowTemplatePlacement_ManagedCluster struct {
ManagedCluster *ManagedCluster `protobuf:"bytes,1,opt,name=managed_cluster,json=managedCluster,oneof"`
}
type WorkflowTemplateServiceClient ¶
type WorkflowTemplateServiceClient interface { // Creates new workflow template. CreateWorkflowTemplate(ctx context.Context, in *CreateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) // Retrieves the latest workflow template. // // Can retrieve previously instantiated template by specifying optional // version parameter. GetWorkflowTemplate(ctx context.Context, in *GetWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) // Instantiates a template and begins execution. // // The returned Operation can be used to track execution of // workflow by polling // [google.cloud.dataproc.v1beta2.OperationService.GetOperation][]. // The Operation will complete when entire workflow is finished. // // The running workflow can be aborted via // [google.cloud.dataproc.v1beta2.OperationService.CancelOperation][]. // // The [google.cloud.dataproc.v1beta2.Operation.metadata][] will always be // [google.cloud.dataproc.v1beta2.WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. // // The [google.cloud.dataproc.v1beta2.Operation.result][] will always be // [google.protobuf.Empty][google.protobuf.Empty]. InstantiateWorkflowTemplate(ctx context.Context, in *InstantiateWorkflowTemplateRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) // Updates (replaces) workflow template. The updated template // must contain version that matches the current server version. UpdateWorkflowTemplate(ctx context.Context, in *UpdateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) // Lists workflows that match the specified filter in the request. ListWorkflowTemplates(ctx context.Context, in *ListWorkflowTemplatesRequest, opts ...grpc.CallOption) (*ListWorkflowTemplatesResponse, error) // Deletes a workflow template. It does not cancel in-progress workflows. DeleteWorkflowTemplate(ctx context.Context, in *DeleteWorkflowTemplateRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) }
func NewWorkflowTemplateServiceClient ¶
func NewWorkflowTemplateServiceClient(cc *grpc.ClientConn) WorkflowTemplateServiceClient
type WorkflowTemplateServiceServer ¶
type WorkflowTemplateServiceServer interface { // Creates new workflow template. CreateWorkflowTemplate(context.Context, *CreateWorkflowTemplateRequest) (*WorkflowTemplate, error) // Retrieves the latest workflow template. // // Can retrieve previously instantiated template by specifying optional // version parameter. GetWorkflowTemplate(context.Context, *GetWorkflowTemplateRequest) (*WorkflowTemplate, error) // Instantiates a template and begins execution. // // The returned Operation can be used to track execution of // workflow by polling // [google.cloud.dataproc.v1beta2.OperationService.GetOperation][]. // The Operation will complete when entire workflow is finished. // // The running workflow can be aborted via // [google.cloud.dataproc.v1beta2.OperationService.CancelOperation][]. // // The [google.cloud.dataproc.v1beta2.Operation.metadata][] will always be // [google.cloud.dataproc.v1beta2.WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. // // The [google.cloud.dataproc.v1beta2.Operation.result][] will always be // [google.protobuf.Empty][google.protobuf.Empty]. InstantiateWorkflowTemplate(context.Context, *InstantiateWorkflowTemplateRequest) (*google_longrunning.Operation, error) // Updates (replaces) workflow template. The updated template // must contain version that matches the current server version. UpdateWorkflowTemplate(context.Context, *UpdateWorkflowTemplateRequest) (*WorkflowTemplate, error) // Lists workflows that match the specified filter in the request. ListWorkflowTemplates(context.Context, *ListWorkflowTemplatesRequest) (*ListWorkflowTemplatesResponse, error) // Deletes a workflow template. It does not cancel in-progress workflows. DeleteWorkflowTemplate(context.Context, *DeleteWorkflowTemplateRequest) (*google_protobuf2.Empty, error) }
type YarnApplication ¶
type YarnApplication struct { // Required. The application name. Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Required. The application state. State YarnApplication_State `protobuf:"varint,2,opt,name=state,enum=google.cloud.dataproc.v1beta2.YarnApplication_State" json:"state,omitempty"` // Required. The numerical progress of the application, from 1 to 100. Progress float32 `protobuf:"fixed32,3,opt,name=progress" json:"progress,omitempty"` // Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or // TimelineServer that provides application-specific information. The URL uses // the internal hostname, and requires a proxy server for resolution and, // possibly, access. TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl" json:"tracking_url,omitempty"` }
A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
func (*YarnApplication) Descriptor ¶
func (*YarnApplication) Descriptor() ([]byte, []int)
func (*YarnApplication) GetName ¶
func (m *YarnApplication) GetName() string
func (*YarnApplication) GetProgress ¶
func (m *YarnApplication) GetProgress() float32
func (*YarnApplication) GetState ¶
func (m *YarnApplication) GetState() YarnApplication_State
func (*YarnApplication) GetTrackingUrl ¶
func (m *YarnApplication) GetTrackingUrl() string
func (*YarnApplication) ProtoMessage ¶
func (*YarnApplication) ProtoMessage()
func (*YarnApplication) Reset ¶
func (m *YarnApplication) Reset()
func (*YarnApplication) String ¶
func (m *YarnApplication) String() string
type YarnApplication_State ¶
type YarnApplication_State int32
The application state, corresponding to <code>YarnProtos.YarnApplicationStateProto</code>.
const ( // Status is unspecified. YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0 // Status is NEW. YarnApplication_NEW YarnApplication_State = 1 // Status is NEW_SAVING. YarnApplication_NEW_SAVING YarnApplication_State = 2 // Status is SUBMITTED. YarnApplication_SUBMITTED YarnApplication_State = 3 // Status is ACCEPTED. YarnApplication_ACCEPTED YarnApplication_State = 4 // Status is RUNNING. YarnApplication_RUNNING YarnApplication_State = 5 // Status is FINISHED. YarnApplication_FINISHED YarnApplication_State = 6 // Status is FAILED. YarnApplication_FAILED YarnApplication_State = 7 // Status is KILLED. YarnApplication_KILLED YarnApplication_State = 8 )
func (YarnApplication_State) EnumDescriptor ¶
func (YarnApplication_State) EnumDescriptor() ([]byte, []int)
func (YarnApplication_State) String ¶
func (x YarnApplication_State) String() string