plugins

package
v1.10.7-b1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 18, 2024 License: Apache-2.0 Imports: 5 Imported by: 12

Documentation

Index

Constants

This section is empty.

Variables

View Source
var SparkApplication_Type_name = map[int32]string{
	0: "PYTHON",
	1: "JAVA",
	2: "SCALA",
	3: "R",
}
View Source
var SparkApplication_Type_value = map[string]int32{
	"PYTHON": 0,
	"JAVA":   1,
	"SCALA":  2,
	"R":      3,
}

Functions

This section is empty.

Types

type ArrayJob

type ArrayJob struct {
	// Defines the maximum number of instances to bring up concurrently at any given point. Note that this is an
	// optimistic restriction and that, due to network partitioning or other failures, the actual number of currently
	// running instances might be more. This has to be a positive number if assigned. Default value is size.
	Parallelism int64 `protobuf:"varint,1,opt,name=parallelism,proto3" json:"parallelism,omitempty"`
	// Defines the number of instances to launch at most. This number should match the size of the input if the job
	// requires processing of all input data. This has to be a positive number.
	// In the case this is not defined, the back-end will determine the size at run-time by reading the inputs.
	Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
	// Types that are valid to be assigned to SuccessCriteria:
	//	*ArrayJob_MinSuccesses
	//	*ArrayJob_MinSuccessRatio
	SuccessCriteria      isArrayJob_SuccessCriteria `protobuf_oneof:"success_criteria"`
	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
	XXX_unrecognized     []byte                     `json:"-"`
	XXX_sizecache        int32                      `json:"-"`
}

Describes a job that can process independent pieces of data concurrently. Multiple copies of the runnable component will be executed concurrently.

func (*ArrayJob) Descriptor

func (*ArrayJob) Descriptor() ([]byte, []int)

func (*ArrayJob) GetMinSuccessRatio

func (m *ArrayJob) GetMinSuccessRatio() float32

func (*ArrayJob) GetMinSuccesses

func (m *ArrayJob) GetMinSuccesses() int64

func (*ArrayJob) GetParallelism

func (m *ArrayJob) GetParallelism() int64

func (*ArrayJob) GetSize

func (m *ArrayJob) GetSize() int64

func (*ArrayJob) GetSuccessCriteria

func (m *ArrayJob) GetSuccessCriteria() isArrayJob_SuccessCriteria

func (*ArrayJob) ProtoMessage

func (*ArrayJob) ProtoMessage()

func (*ArrayJob) Reset

func (m *ArrayJob) Reset()

func (*ArrayJob) String

func (m *ArrayJob) String() string

func (*ArrayJob) XXX_DiscardUnknown

func (m *ArrayJob) XXX_DiscardUnknown()

func (*ArrayJob) XXX_Marshal

func (m *ArrayJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ArrayJob) XXX_Merge

func (m *ArrayJob) XXX_Merge(src proto.Message)

func (*ArrayJob) XXX_OneofWrappers

func (*ArrayJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*ArrayJob) XXX_Size

func (m *ArrayJob) XXX_Size() int

func (*ArrayJob) XXX_Unmarshal

func (m *ArrayJob) XXX_Unmarshal(b []byte) error

type ArrayJob_MinSuccessRatio

type ArrayJob_MinSuccessRatio struct {
	MinSuccessRatio float32 `protobuf:"fixed32,4,opt,name=min_success_ratio,json=minSuccessRatio,proto3,oneof"`
}

type ArrayJob_MinSuccesses

type ArrayJob_MinSuccesses struct {
	MinSuccesses int64 `protobuf:"varint,3,opt,name=min_successes,json=minSuccesses,proto3,oneof"`
}

type DaskJob

type DaskJob struct {
	// Spec for the scheduler pod.
	Scheduler *DaskScheduler `protobuf:"bytes,1,opt,name=scheduler,proto3" json:"scheduler,omitempty"`
	// Spec of the default worker group.
	Workers              *DaskWorkerGroup `protobuf:"bytes,2,opt,name=workers,proto3" json:"workers,omitempty"`
	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
	XXX_unrecognized     []byte           `json:"-"`
	XXX_sizecache        int32            `json:"-"`
}

Custom Proto for Dask Plugin.

func (*DaskJob) Descriptor

func (*DaskJob) Descriptor() ([]byte, []int)

func (*DaskJob) GetScheduler

func (m *DaskJob) GetScheduler() *DaskScheduler

func (*DaskJob) GetWorkers

func (m *DaskJob) GetWorkers() *DaskWorkerGroup

func (*DaskJob) ProtoMessage

func (*DaskJob) ProtoMessage()

func (*DaskJob) Reset

func (m *DaskJob) Reset()

func (*DaskJob) String

func (m *DaskJob) String() string

func (*DaskJob) XXX_DiscardUnknown

func (m *DaskJob) XXX_DiscardUnknown()

func (*DaskJob) XXX_Marshal

func (m *DaskJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DaskJob) XXX_Merge

func (m *DaskJob) XXX_Merge(src proto.Message)

func (*DaskJob) XXX_Size

func (m *DaskJob) XXX_Size() int

func (*DaskJob) XXX_Unmarshal

func (m *DaskJob) XXX_Unmarshal(b []byte) error

type DaskScheduler

type DaskScheduler struct {
	// Optional image to use. If unset, will use the default image.
	Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"`
	// Resources assigned to the scheduler pod.
	Resources            *core.Resources `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

Specification for the scheduler pod.

func (*DaskScheduler) Descriptor

func (*DaskScheduler) Descriptor() ([]byte, []int)

func (*DaskScheduler) GetImage

func (m *DaskScheduler) GetImage() string

func (*DaskScheduler) GetResources

func (m *DaskScheduler) GetResources() *core.Resources

func (*DaskScheduler) ProtoMessage

func (*DaskScheduler) ProtoMessage()

func (*DaskScheduler) Reset

func (m *DaskScheduler) Reset()

func (*DaskScheduler) String

func (m *DaskScheduler) String() string

func (*DaskScheduler) XXX_DiscardUnknown

func (m *DaskScheduler) XXX_DiscardUnknown()

func (*DaskScheduler) XXX_Marshal

func (m *DaskScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DaskScheduler) XXX_Merge

func (m *DaskScheduler) XXX_Merge(src proto.Message)

func (*DaskScheduler) XXX_Size

func (m *DaskScheduler) XXX_Size() int

func (*DaskScheduler) XXX_Unmarshal

func (m *DaskScheduler) XXX_Unmarshal(b []byte) error

type DaskWorkerGroup

type DaskWorkerGroup struct {
	// Number of workers in the group.
	NumberOfWorkers uint32 `protobuf:"varint,1,opt,name=number_of_workers,json=numberOfWorkers,proto3" json:"number_of_workers,omitempty"`
	// Optional image to use for the pods of the worker group. If unset, will use the default image.
	Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
	// Resources assigned to the all pods of the worker group.
	// As per https://kubernetes.dask.org/en/latest/kubecluster.html?highlight=limit#best-practices
	// it is advised to only set limits. If requests are not explicitly set, the plugin will make
	// sure to set requests==limits.
	// The plugin sets ` --memory-limit` as well as `--nthreads` for the workers according to the limit.
	Resources            *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

func (*DaskWorkerGroup) Descriptor

func (*DaskWorkerGroup) Descriptor() ([]byte, []int)

func (*DaskWorkerGroup) GetImage

func (m *DaskWorkerGroup) GetImage() string

func (*DaskWorkerGroup) GetNumberOfWorkers

func (m *DaskWorkerGroup) GetNumberOfWorkers() uint32

func (*DaskWorkerGroup) GetResources

func (m *DaskWorkerGroup) GetResources() *core.Resources

func (*DaskWorkerGroup) ProtoMessage

func (*DaskWorkerGroup) ProtoMessage()

func (*DaskWorkerGroup) Reset

func (m *DaskWorkerGroup) Reset()

func (*DaskWorkerGroup) String

func (m *DaskWorkerGroup) String() string

func (*DaskWorkerGroup) XXX_DiscardUnknown

func (m *DaskWorkerGroup) XXX_DiscardUnknown()

func (*DaskWorkerGroup) XXX_Marshal

func (m *DaskWorkerGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DaskWorkerGroup) XXX_Merge

func (m *DaskWorkerGroup) XXX_Merge(src proto.Message)

func (*DaskWorkerGroup) XXX_Size

func (m *DaskWorkerGroup) XXX_Size() int

func (*DaskWorkerGroup) XXX_Unmarshal

func (m *DaskWorkerGroup) XXX_Unmarshal(b []byte) error

type DistributedMPITrainingTask

type DistributedMPITrainingTask struct {
	// number of worker spawned in the cluster for this job
	NumWorkers int32 `protobuf:"varint,1,opt,name=num_workers,json=numWorkers,proto3" json:"num_workers,omitempty"`
	// number of launcher replicas spawned in the cluster for this job
	// The launcher pod invokes mpirun and communicates with worker pods through MPI.
	NumLauncherReplicas int32 `protobuf:"varint,2,opt,name=num_launcher_replicas,json=numLauncherReplicas,proto3" json:"num_launcher_replicas,omitempty"`
	// number of slots per worker used in hostfile.
	// The available slots (GPUs) in each pod.
	Slots                int32    `protobuf:"varint,3,opt,name=slots,proto3" json:"slots,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator

func (*DistributedMPITrainingTask) Descriptor

func (*DistributedMPITrainingTask) Descriptor() ([]byte, []int)

func (*DistributedMPITrainingTask) GetNumLauncherReplicas

func (m *DistributedMPITrainingTask) GetNumLauncherReplicas() int32

func (*DistributedMPITrainingTask) GetNumWorkers

func (m *DistributedMPITrainingTask) GetNumWorkers() int32

func (*DistributedMPITrainingTask) GetSlots

func (m *DistributedMPITrainingTask) GetSlots() int32

func (*DistributedMPITrainingTask) ProtoMessage

func (*DistributedMPITrainingTask) ProtoMessage()

func (*DistributedMPITrainingTask) Reset

func (m *DistributedMPITrainingTask) Reset()

func (*DistributedMPITrainingTask) String

func (m *DistributedMPITrainingTask) String() string

func (*DistributedMPITrainingTask) XXX_DiscardUnknown

func (m *DistributedMPITrainingTask) XXX_DiscardUnknown()

func (*DistributedMPITrainingTask) XXX_Marshal

func (m *DistributedMPITrainingTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistributedMPITrainingTask) XXX_Merge

func (m *DistributedMPITrainingTask) XXX_Merge(src proto.Message)

func (*DistributedMPITrainingTask) XXX_Size

func (m *DistributedMPITrainingTask) XXX_Size() int

func (*DistributedMPITrainingTask) XXX_Unmarshal

func (m *DistributedMPITrainingTask) XXX_Unmarshal(b []byte) error

type DistributedPyTorchTrainingTask

type DistributedPyTorchTrainingTask struct {
	// number of worker replicas spawned in the cluster for this job
	Workers int32 `protobuf:"varint,1,opt,name=workers,proto3" json:"workers,omitempty"`
	// config for an elastic pytorch job
	//
	ElasticConfig        *ElasticConfig `protobuf:"bytes,2,opt,name=elastic_config,json=elasticConfig,proto3" json:"elastic_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator

func (*DistributedPyTorchTrainingTask) Descriptor

func (*DistributedPyTorchTrainingTask) Descriptor() ([]byte, []int)

func (*DistributedPyTorchTrainingTask) GetElasticConfig

func (m *DistributedPyTorchTrainingTask) GetElasticConfig() *ElasticConfig

func (*DistributedPyTorchTrainingTask) GetWorkers

func (m *DistributedPyTorchTrainingTask) GetWorkers() int32

func (*DistributedPyTorchTrainingTask) ProtoMessage

func (*DistributedPyTorchTrainingTask) ProtoMessage()

func (*DistributedPyTorchTrainingTask) Reset

func (m *DistributedPyTorchTrainingTask) Reset()

func (*DistributedPyTorchTrainingTask) String

func (*DistributedPyTorchTrainingTask) XXX_DiscardUnknown

func (m *DistributedPyTorchTrainingTask) XXX_DiscardUnknown()

func (*DistributedPyTorchTrainingTask) XXX_Marshal

func (m *DistributedPyTorchTrainingTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistributedPyTorchTrainingTask) XXX_Merge

func (m *DistributedPyTorchTrainingTask) XXX_Merge(src proto.Message)

func (*DistributedPyTorchTrainingTask) XXX_Size

func (m *DistributedPyTorchTrainingTask) XXX_Size() int

func (*DistributedPyTorchTrainingTask) XXX_Unmarshal

func (m *DistributedPyTorchTrainingTask) XXX_Unmarshal(b []byte) error

type DistributedTensorflowTrainingTask

type DistributedTensorflowTrainingTask struct {
	// number of worker replicas spawned in the cluster for this job
	Workers int32 `protobuf:"varint,1,opt,name=workers,proto3" json:"workers,omitempty"`
	// PS -> Parameter server
	// number of ps replicas spawned in the cluster for this job
	PsReplicas int32 `protobuf:"varint,2,opt,name=ps_replicas,json=psReplicas,proto3" json:"ps_replicas,omitempty"`
	// number of chief replicas spawned in the cluster for this job
	ChiefReplicas int32 `protobuf:"varint,3,opt,name=chief_replicas,json=chiefReplicas,proto3" json:"chief_replicas,omitempty"`
	// number of evaluator replicas spawned in the cluster for this job
	EvaluatorReplicas    int32    `protobuf:"varint,4,opt,name=evaluator_replicas,json=evaluatorReplicas,proto3" json:"evaluator_replicas,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator

func (*DistributedTensorflowTrainingTask) Descriptor

func (*DistributedTensorflowTrainingTask) Descriptor() ([]byte, []int)

func (*DistributedTensorflowTrainingTask) GetChiefReplicas

func (m *DistributedTensorflowTrainingTask) GetChiefReplicas() int32

func (*DistributedTensorflowTrainingTask) GetEvaluatorReplicas added in v1.9.16

func (m *DistributedTensorflowTrainingTask) GetEvaluatorReplicas() int32

func (*DistributedTensorflowTrainingTask) GetPsReplicas

func (m *DistributedTensorflowTrainingTask) GetPsReplicas() int32

func (*DistributedTensorflowTrainingTask) GetWorkers

func (m *DistributedTensorflowTrainingTask) GetWorkers() int32

func (*DistributedTensorflowTrainingTask) ProtoMessage

func (*DistributedTensorflowTrainingTask) ProtoMessage()

func (*DistributedTensorflowTrainingTask) Reset

func (*DistributedTensorflowTrainingTask) String

func (*DistributedTensorflowTrainingTask) XXX_DiscardUnknown

func (m *DistributedTensorflowTrainingTask) XXX_DiscardUnknown()

func (*DistributedTensorflowTrainingTask) XXX_Marshal

func (m *DistributedTensorflowTrainingTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistributedTensorflowTrainingTask) XXX_Merge

func (*DistributedTensorflowTrainingTask) XXX_Size

func (m *DistributedTensorflowTrainingTask) XXX_Size() int

func (*DistributedTensorflowTrainingTask) XXX_Unmarshal

func (m *DistributedTensorflowTrainingTask) XXX_Unmarshal(b []byte) error

type ElasticConfig

type ElasticConfig struct {
	RdzvBackend          string   `protobuf:"bytes,1,opt,name=rdzv_backend,json=rdzvBackend,proto3" json:"rdzv_backend,omitempty"`
	MinReplicas          int32    `protobuf:"varint,2,opt,name=min_replicas,json=minReplicas,proto3" json:"min_replicas,omitempty"`
	MaxReplicas          int32    `protobuf:"varint,3,opt,name=max_replicas,json=maxReplicas,proto3" json:"max_replicas,omitempty"`
	NprocPerNode         int32    `protobuf:"varint,4,opt,name=nproc_per_node,json=nprocPerNode,proto3" json:"nproc_per_node,omitempty"`
	MaxRestarts          int32    `protobuf:"varint,5,opt,name=max_restarts,json=maxRestarts,proto3" json:"max_restarts,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Custom proto for torch elastic config for distributed training using https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go

func (*ElasticConfig) Descriptor

func (*ElasticConfig) Descriptor() ([]byte, []int)

func (*ElasticConfig) GetMaxReplicas

func (m *ElasticConfig) GetMaxReplicas() int32

func (*ElasticConfig) GetMaxRestarts

func (m *ElasticConfig) GetMaxRestarts() int32

func (*ElasticConfig) GetMinReplicas

func (m *ElasticConfig) GetMinReplicas() int32

func (*ElasticConfig) GetNprocPerNode

func (m *ElasticConfig) GetNprocPerNode() int32

func (*ElasticConfig) GetRdzvBackend

func (m *ElasticConfig) GetRdzvBackend() string

func (*ElasticConfig) ProtoMessage

func (*ElasticConfig) ProtoMessage()

func (*ElasticConfig) Reset

func (m *ElasticConfig) Reset()

func (*ElasticConfig) String

func (m *ElasticConfig) String() string

func (*ElasticConfig) XXX_DiscardUnknown

func (m *ElasticConfig) XXX_DiscardUnknown()

func (*ElasticConfig) XXX_Marshal

func (m *ElasticConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ElasticConfig) XXX_Merge

func (m *ElasticConfig) XXX_Merge(src proto.Message)

func (*ElasticConfig) XXX_Size

func (m *ElasticConfig) XXX_Size() int

func (*ElasticConfig) XXX_Unmarshal

func (m *ElasticConfig) XXX_Unmarshal(b []byte) error

type HeadGroupSpec

type HeadGroupSpec struct {
	// Optional. RayStartParams are the params of the start command: address, object-store-memory.
	// Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start
	RayStartParams       map[string]string `` /* 193-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

HeadGroupSpec are the spec for the head pod

func (*HeadGroupSpec) Descriptor

func (*HeadGroupSpec) Descriptor() ([]byte, []int)

func (*HeadGroupSpec) GetRayStartParams

func (m *HeadGroupSpec) GetRayStartParams() map[string]string

func (*HeadGroupSpec) ProtoMessage

func (*HeadGroupSpec) ProtoMessage()

func (*HeadGroupSpec) Reset

func (m *HeadGroupSpec) Reset()

func (*HeadGroupSpec) String

func (m *HeadGroupSpec) String() string

func (*HeadGroupSpec) XXX_DiscardUnknown

func (m *HeadGroupSpec) XXX_DiscardUnknown()

func (*HeadGroupSpec) XXX_Marshal

func (m *HeadGroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HeadGroupSpec) XXX_Merge

func (m *HeadGroupSpec) XXX_Merge(src proto.Message)

func (*HeadGroupSpec) XXX_Size

func (m *HeadGroupSpec) XXX_Size() int

func (*HeadGroupSpec) XXX_Unmarshal

func (m *HeadGroupSpec) XXX_Unmarshal(b []byte) error

type HiveQuery

type HiveQuery struct {
	Query                string   `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
	TimeoutSec           uint32   `protobuf:"varint,2,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"`
	RetryCount           uint32   `protobuf:"varint,3,opt,name=retryCount,proto3" json:"retryCount,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Defines a query to execute on a hive cluster.

func (*HiveQuery) Descriptor

func (*HiveQuery) Descriptor() ([]byte, []int)

func (*HiveQuery) GetQuery

func (m *HiveQuery) GetQuery() string

func (*HiveQuery) GetRetryCount

func (m *HiveQuery) GetRetryCount() uint32

func (*HiveQuery) GetTimeoutSec

func (m *HiveQuery) GetTimeoutSec() uint32

func (*HiveQuery) ProtoMessage

func (*HiveQuery) ProtoMessage()

func (*HiveQuery) Reset

func (m *HiveQuery) Reset()

func (*HiveQuery) String

func (m *HiveQuery) String() string

func (*HiveQuery) XXX_DiscardUnknown

func (m *HiveQuery) XXX_DiscardUnknown()

func (*HiveQuery) XXX_Marshal

func (m *HiveQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HiveQuery) XXX_Merge

func (m *HiveQuery) XXX_Merge(src proto.Message)

func (*HiveQuery) XXX_Size

func (m *HiveQuery) XXX_Size() int

func (*HiveQuery) XXX_Unmarshal

func (m *HiveQuery) XXX_Unmarshal(b []byte) error

type HiveQueryCollection

type HiveQueryCollection struct {
	Queries              []*HiveQuery `protobuf:"bytes,2,rep,name=queries,proto3" json:"queries,omitempty"`
	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
	XXX_unrecognized     []byte       `json:"-"`
	XXX_sizecache        int32        `json:"-"`
}

Defines a collection of hive queries.

func (*HiveQueryCollection) Descriptor

func (*HiveQueryCollection) Descriptor() ([]byte, []int)

func (*HiveQueryCollection) GetQueries

func (m *HiveQueryCollection) GetQueries() []*HiveQuery

func (*HiveQueryCollection) ProtoMessage

func (*HiveQueryCollection) ProtoMessage()

func (*HiveQueryCollection) Reset

func (m *HiveQueryCollection) Reset()

func (*HiveQueryCollection) String

func (m *HiveQueryCollection) String() string

func (*HiveQueryCollection) XXX_DiscardUnknown

func (m *HiveQueryCollection) XXX_DiscardUnknown()

func (*HiveQueryCollection) XXX_Marshal

func (m *HiveQueryCollection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HiveQueryCollection) XXX_Merge

func (m *HiveQueryCollection) XXX_Merge(src proto.Message)

func (*HiveQueryCollection) XXX_Size

func (m *HiveQueryCollection) XXX_Size() int

func (*HiveQueryCollection) XXX_Unmarshal

func (m *HiveQueryCollection) XXX_Unmarshal(b []byte) error

type PrestoQuery

type PrestoQuery struct {
	RoutingGroup         string   `protobuf:"bytes,1,opt,name=routing_group,json=routingGroup,proto3" json:"routing_group,omitempty"`
	Catalog              string   `protobuf:"bytes,2,opt,name=catalog,proto3" json:"catalog,omitempty"`
	Schema               string   `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"`
	Statement            string   `protobuf:"bytes,4,opt,name=statement,proto3" json:"statement,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field of a Presto task's TaskTemplate

func (*PrestoQuery) Descriptor

func (*PrestoQuery) Descriptor() ([]byte, []int)

func (*PrestoQuery) GetCatalog

func (m *PrestoQuery) GetCatalog() string

func (*PrestoQuery) GetRoutingGroup

func (m *PrestoQuery) GetRoutingGroup() string

func (*PrestoQuery) GetSchema

func (m *PrestoQuery) GetSchema() string

func (*PrestoQuery) GetStatement

func (m *PrestoQuery) GetStatement() string

func (*PrestoQuery) ProtoMessage

func (*PrestoQuery) ProtoMessage()

func (*PrestoQuery) Reset

func (m *PrestoQuery) Reset()

func (*PrestoQuery) String

func (m *PrestoQuery) String() string

func (*PrestoQuery) XXX_DiscardUnknown

func (m *PrestoQuery) XXX_DiscardUnknown()

func (*PrestoQuery) XXX_Marshal

func (m *PrestoQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PrestoQuery) XXX_Merge

func (m *PrestoQuery) XXX_Merge(src proto.Message)

func (*PrestoQuery) XXX_Size

func (m *PrestoQuery) XXX_Size() int

func (*PrestoQuery) XXX_Unmarshal

func (m *PrestoQuery) XXX_Unmarshal(b []byte) error

type QuboleHiveJob

type QuboleHiveJob struct {
	ClusterLabel         string               `protobuf:"bytes,1,opt,name=cluster_label,json=clusterLabel,proto3" json:"cluster_label,omitempty"`
	QueryCollection      *HiveQueryCollection `protobuf:"bytes,2,opt,name=query_collection,json=queryCollection,proto3" json:"query_collection,omitempty"` // Deprecated: Do not use.
	Tags                 []string             `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty"`
	Query                *HiveQuery           `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field of a hive task's TaskTemplate

func (*QuboleHiveJob) Descriptor

func (*QuboleHiveJob) Descriptor() ([]byte, []int)

func (*QuboleHiveJob) GetClusterLabel

func (m *QuboleHiveJob) GetClusterLabel() string

func (*QuboleHiveJob) GetQuery

func (m *QuboleHiveJob) GetQuery() *HiveQuery

func (*QuboleHiveJob) GetQueryCollection deprecated

func (m *QuboleHiveJob) GetQueryCollection() *HiveQueryCollection

Deprecated: Do not use.

func (*QuboleHiveJob) GetTags

func (m *QuboleHiveJob) GetTags() []string

func (*QuboleHiveJob) ProtoMessage

func (*QuboleHiveJob) ProtoMessage()

func (*QuboleHiveJob) Reset

func (m *QuboleHiveJob) Reset()

func (*QuboleHiveJob) String

func (m *QuboleHiveJob) String() string

func (*QuboleHiveJob) XXX_DiscardUnknown

func (m *QuboleHiveJob) XXX_DiscardUnknown()

func (*QuboleHiveJob) XXX_Marshal

func (m *QuboleHiveJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*QuboleHiveJob) XXX_Merge

func (m *QuboleHiveJob) XXX_Merge(src proto.Message)

func (*QuboleHiveJob) XXX_Size

func (m *QuboleHiveJob) XXX_Size() int

func (*QuboleHiveJob) XXX_Unmarshal

func (m *QuboleHiveJob) XXX_Unmarshal(b []byte) error

type RayCluster

type RayCluster struct {
	// HeadGroupSpecs are the spec for the head pod
	HeadGroupSpec *HeadGroupSpec `protobuf:"bytes,1,opt,name=head_group_spec,json=headGroupSpec,proto3" json:"head_group_spec,omitempty"`
	// WorkerGroupSpecs are the specs for the worker pods
	WorkerGroupSpec []*WorkerGroupSpec `protobuf:"bytes,2,rep,name=worker_group_spec,json=workerGroupSpec,proto3" json:"worker_group_spec,omitempty"`
	// Whether to enable autoscaling.
	EnableAutoscaling    bool     `protobuf:"varint,3,opt,name=enable_autoscaling,json=enableAutoscaling,proto3" json:"enable_autoscaling,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Define Ray cluster defines the desired state of RayCluster

func (*RayCluster) Descriptor

func (*RayCluster) Descriptor() ([]byte, []int)

func (*RayCluster) GetEnableAutoscaling added in v1.10.7

func (m *RayCluster) GetEnableAutoscaling() bool

func (*RayCluster) GetHeadGroupSpec

func (m *RayCluster) GetHeadGroupSpec() *HeadGroupSpec

func (*RayCluster) GetWorkerGroupSpec

func (m *RayCluster) GetWorkerGroupSpec() []*WorkerGroupSpec

func (*RayCluster) ProtoMessage

func (*RayCluster) ProtoMessage()

func (*RayCluster) Reset

func (m *RayCluster) Reset()

func (*RayCluster) String

func (m *RayCluster) String() string

func (*RayCluster) XXX_DiscardUnknown

func (m *RayCluster) XXX_DiscardUnknown()

func (*RayCluster) XXX_Marshal

func (m *RayCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RayCluster) XXX_Merge

func (m *RayCluster) XXX_Merge(src proto.Message)

func (*RayCluster) XXX_Size

func (m *RayCluster) XXX_Size() int

func (*RayCluster) XXX_Unmarshal

func (m *RayCluster) XXX_Unmarshal(b []byte) error

type RayJob

type RayJob struct {
	// RayClusterSpec is the cluster template to run the job
	RayCluster *RayCluster `protobuf:"bytes,1,opt,name=ray_cluster,json=rayCluster,proto3" json:"ray_cluster,omitempty"`
	// runtime_env is base64 encoded.
	// Ray runtime environments: https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments
	RuntimeEnv string `protobuf:"bytes,2,opt,name=runtime_env,json=runtimeEnv,proto3" json:"runtime_env,omitempty"`
	// shutdown_after_job_finishes specifies whether the RayCluster should be deleted after the RayJob finishes.
	ShutdownAfterJobFinishes bool `` /* 138-byte string literal not displayed */
	// ttl_seconds_after_finished specifies the number of seconds after which the RayCluster will be deleted after the RayJob finishes.
	TtlSecondsAfterFinished int32    `` /* 135-byte string literal not displayed */
	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
	XXX_unrecognized        []byte   `json:"-"`
	XXX_sizecache           int32    `json:"-"`
}

RayJobSpec defines the desired state of RayJob

func (*RayJob) Descriptor

func (*RayJob) Descriptor() ([]byte, []int)

func (*RayJob) GetRayCluster

func (m *RayJob) GetRayCluster() *RayCluster

func (*RayJob) GetRuntimeEnv

func (m *RayJob) GetRuntimeEnv() string

func (*RayJob) GetShutdownAfterJobFinishes added in v1.10.7

func (m *RayJob) GetShutdownAfterJobFinishes() bool

func (*RayJob) GetTtlSecondsAfterFinished added in v1.10.7

func (m *RayJob) GetTtlSecondsAfterFinished() int32

func (*RayJob) ProtoMessage

func (*RayJob) ProtoMessage()

func (*RayJob) Reset

func (m *RayJob) Reset()

func (*RayJob) String

func (m *RayJob) String() string

func (*RayJob) XXX_DiscardUnknown

func (m *RayJob) XXX_DiscardUnknown()

func (*RayJob) XXX_Marshal

func (m *RayJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RayJob) XXX_Merge

func (m *RayJob) XXX_Merge(src proto.Message)

func (*RayJob) XXX_Size

func (m *RayJob) XXX_Size() int

func (*RayJob) XXX_Unmarshal

func (m *RayJob) XXX_Unmarshal(b []byte) error

type SparkApplication

type SparkApplication struct {
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

func (*SparkApplication) Descriptor

func (*SparkApplication) Descriptor() ([]byte, []int)

func (*SparkApplication) ProtoMessage

func (*SparkApplication) ProtoMessage()

func (*SparkApplication) Reset

func (m *SparkApplication) Reset()

func (*SparkApplication) String

func (m *SparkApplication) String() string

func (*SparkApplication) XXX_DiscardUnknown

func (m *SparkApplication) XXX_DiscardUnknown()

func (*SparkApplication) XXX_Marshal

func (m *SparkApplication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SparkApplication) XXX_Merge

func (m *SparkApplication) XXX_Merge(src proto.Message)

func (*SparkApplication) XXX_Size

func (m *SparkApplication) XXX_Size() int

func (*SparkApplication) XXX_Unmarshal

func (m *SparkApplication) XXX_Unmarshal(b []byte) error

type SparkApplication_Type

type SparkApplication_Type int32
const (
	SparkApplication_PYTHON SparkApplication_Type = 0
	SparkApplication_JAVA   SparkApplication_Type = 1
	SparkApplication_SCALA  SparkApplication_Type = 2
	SparkApplication_R      SparkApplication_Type = 3
)

func (SparkApplication_Type) EnumDescriptor

func (SparkApplication_Type) EnumDescriptor() ([]byte, []int)

func (SparkApplication_Type) String

func (x SparkApplication_Type) String() string

type SparkJob

type SparkJob struct {
	ApplicationType     SparkApplication_Type `` /* 128-byte string literal not displayed */
	MainApplicationFile string                `protobuf:"bytes,2,opt,name=mainApplicationFile,proto3" json:"mainApplicationFile,omitempty"`
	MainClass           string                `protobuf:"bytes,3,opt,name=mainClass,proto3" json:"mainClass,omitempty"`
	SparkConf           map[string]string     `` /* 159-byte string literal not displayed */
	HadoopConf          map[string]string     `` /* 161-byte string literal not displayed */
	ExecutorPath        string                `protobuf:"bytes,6,opt,name=executorPath,proto3" json:"executorPath,omitempty"`
	// Databricks job configuration.
	// Config structure can be found here. https://docs.databricks.com/dev-tools/api/2.0/jobs.html#request-structure.
	DatabricksConf *_struct.Struct `protobuf:"bytes,7,opt,name=databricksConf,proto3" json:"databricksConf,omitempty"`
	// Databricks access token. https://docs.databricks.com/dev-tools/api/latest/authentication.html
	// This token can be set in either flytepropeller or flytekit.
	DatabricksToken string `protobuf:"bytes,8,opt,name=databricksToken,proto3" json:"databricksToken,omitempty"`
	// Domain name of your deployment. Use the form <account>.cloud.databricks.com.
	// This instance name can be set in either flytepropeller or flytekit.
	DatabricksInstance   string   `protobuf:"bytes,9,opt,name=databricksInstance,proto3" json:"databricksInstance,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Custom Proto for Spark Plugin.

func (*SparkJob) Descriptor

func (*SparkJob) Descriptor() ([]byte, []int)

func (*SparkJob) GetApplicationType

func (m *SparkJob) GetApplicationType() SparkApplication_Type

func (*SparkJob) GetDatabricksConf

func (m *SparkJob) GetDatabricksConf() *_struct.Struct

func (*SparkJob) GetDatabricksInstance

func (m *SparkJob) GetDatabricksInstance() string

func (*SparkJob) GetDatabricksToken

func (m *SparkJob) GetDatabricksToken() string

func (*SparkJob) GetExecutorPath

func (m *SparkJob) GetExecutorPath() string

func (*SparkJob) GetHadoopConf

func (m *SparkJob) GetHadoopConf() map[string]string

func (*SparkJob) GetMainApplicationFile

func (m *SparkJob) GetMainApplicationFile() string

func (*SparkJob) GetMainClass

func (m *SparkJob) GetMainClass() string

func (*SparkJob) GetSparkConf

func (m *SparkJob) GetSparkConf() map[string]string

func (*SparkJob) ProtoMessage

func (*SparkJob) ProtoMessage()

func (*SparkJob) Reset

func (m *SparkJob) Reset()

func (*SparkJob) String

func (m *SparkJob) String() string

func (*SparkJob) XXX_DiscardUnknown

func (m *SparkJob) XXX_DiscardUnknown()

func (*SparkJob) XXX_Marshal

func (m *SparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SparkJob) XXX_Merge

func (m *SparkJob) XXX_Merge(src proto.Message)

func (*SparkJob) XXX_Size

func (m *SparkJob) XXX_Size() int

func (*SparkJob) XXX_Unmarshal

func (m *SparkJob) XXX_Unmarshal(b []byte) error

type Waitable

type Waitable struct {
	WfExecId             *core.WorkflowExecutionIdentifier `protobuf:"bytes,1,opt,name=wf_exec_id,json=wfExecId,proto3" json:"wf_exec_id,omitempty"`
	Phase                core.WorkflowExecution_Phase      `protobuf:"varint,2,opt,name=phase,proto3,enum=flyteidl.core.WorkflowExecution_Phase" json:"phase,omitempty"`
	WorkflowId           string                            `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{}                          `json:"-"`
	XXX_unrecognized     []byte                            `json:"-"`
	XXX_sizecache        int32                             `json:"-"`
}

Represents an Execution that was launched and could be waited on.

func (*Waitable) Descriptor

func (*Waitable) Descriptor() ([]byte, []int)

func (*Waitable) GetPhase

func (m *Waitable) GetPhase() core.WorkflowExecution_Phase

func (*Waitable) GetWfExecId

func (m *Waitable) GetWfExecId() *core.WorkflowExecutionIdentifier

func (*Waitable) GetWorkflowId

func (m *Waitable) GetWorkflowId() string

func (*Waitable) ProtoMessage

func (*Waitable) ProtoMessage()

func (*Waitable) Reset

func (m *Waitable) Reset()

func (*Waitable) String

func (m *Waitable) String() string

func (*Waitable) XXX_DiscardUnknown

func (m *Waitable) XXX_DiscardUnknown()

func (*Waitable) XXX_Marshal

func (m *Waitable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Waitable) XXX_Merge

func (m *Waitable) XXX_Merge(src proto.Message)

func (*Waitable) XXX_Size

func (m *Waitable) XXX_Size() int

func (*Waitable) XXX_Unmarshal

func (m *Waitable) XXX_Unmarshal(b []byte) error

type WorkerGroupSpec

type WorkerGroupSpec struct {
	// Required. RayCluster can have multiple worker groups, and it distinguishes them by name
	GroupName string `protobuf:"bytes,1,opt,name=group_name,json=groupName,proto3" json:"group_name,omitempty"`
	// Required. Desired replicas of the worker group. Defaults to 1.
	Replicas int32 `protobuf:"varint,2,opt,name=replicas,proto3" json:"replicas,omitempty"`
	// Optional. Min replicas of the worker group. MinReplicas defaults to 1.
	MinReplicas int32 `protobuf:"varint,3,opt,name=min_replicas,json=minReplicas,proto3" json:"min_replicas,omitempty"`
	// Optional. Max replicas of the worker group. MaxReplicas defaults to maxInt32
	MaxReplicas int32 `protobuf:"varint,4,opt,name=max_replicas,json=maxReplicas,proto3" json:"max_replicas,omitempty"`
	// Optional. RayStartParams are the params of the start command: address, object-store-memory.
	// Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start
	RayStartParams       map[string]string `` /* 193-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

WorkerGroupSpec are the specs for the worker pods

func (*WorkerGroupSpec) Descriptor

func (*WorkerGroupSpec) Descriptor() ([]byte, []int)

func (*WorkerGroupSpec) GetGroupName

func (m *WorkerGroupSpec) GetGroupName() string

func (*WorkerGroupSpec) GetMaxReplicas

func (m *WorkerGroupSpec) GetMaxReplicas() int32

func (*WorkerGroupSpec) GetMinReplicas

func (m *WorkerGroupSpec) GetMinReplicas() int32

func (*WorkerGroupSpec) GetRayStartParams

func (m *WorkerGroupSpec) GetRayStartParams() map[string]string

func (*WorkerGroupSpec) GetReplicas

func (m *WorkerGroupSpec) GetReplicas() int32

func (*WorkerGroupSpec) ProtoMessage

func (*WorkerGroupSpec) ProtoMessage()

func (*WorkerGroupSpec) Reset

func (m *WorkerGroupSpec) Reset()

func (*WorkerGroupSpec) String

func (m *WorkerGroupSpec) String() string

func (*WorkerGroupSpec) XXX_DiscardUnknown

func (m *WorkerGroupSpec) XXX_DiscardUnknown()

func (*WorkerGroupSpec) XXX_Marshal

func (m *WorkerGroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkerGroupSpec) XXX_Merge

func (m *WorkerGroupSpec) XXX_Merge(src proto.Message)

func (*WorkerGroupSpec) XXX_Size

func (m *WorkerGroupSpec) XXX_Size() int

func (*WorkerGroupSpec) XXX_Unmarshal

func (m *WorkerGroupSpec) XXX_Unmarshal(b []byte) error

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL