Documentation ¶
Overview ¶
Package v1alpha1 contains API Schema definitions for the kaito v1alpha1 API group +kubebuilder:object:generate=true +k8s:defaulter-gen=TypeMeta +groupName=kaito.sh
Index ¶
- Constants
- Variables
- func ValidateDNSSubdomain(name string) bool
- type AdapterSpec
- type ConditionType
- type Config
- type DataDestination
- type DataSource
- type GPUConfig
- type InferenceSpec
- type ModelImageAccessMode
- type ModelName
- type PresetMeta
- type PresetOptions
- type PresetSpec
- type ResourceSpec
- type TrainingConfig
- type TuningMethod
- type TuningSpec
- type Workspace
- func (in *Workspace) DeepCopy() *Workspace
- func (in *Workspace) DeepCopyInto(out *Workspace)
- func (in *Workspace) DeepCopyObject() runtime.Object
- func (w *Workspace) SetDefaults(_ context.Context)
- func (w *Workspace) SupportedVerbs() []admissionregistrationv1.OperationType
- func (w *Workspace) Validate(ctx context.Context) (errs *apis.FieldError)
- type WorkspaceList
- type WorkspaceStatus
Constants ¶
const ( // WorkspaceConditionTypeMachineStatus is the state when checking machine status. WorkspaceConditionTypeMachineStatus = ConditionType("MachineReady") // WorkspaceConditionTypeNodeClaimStatus is the state when checking nodeClaim status. WorkspaceConditionTypeNodeClaimStatus = ConditionType("NodeClaimReady") // WorkspaceConditionTypeResourceStatus is the state when Resource has been created. WorkspaceConditionTypeResourceStatus = ConditionType("ResourceReady") // WorkspaceConditionTypeInferenceStatus is the state when Inference service has been ready. WorkspaceConditionTypeInferenceStatus = ConditionType("InferenceReady") // WorkspaceConditionTypeTuningJobStatus is the state when the tuning job starts normally. WorkspaceConditionTypeTuningJobStatus ConditionType = ConditionType("JobStarted") //WorkspaceConditionTypeDeleting is the Workspace state when starts to get deleted. WorkspaceConditionTypeDeleting = ConditionType("WorkspaceDeleting") //WorkspaceConditionTypeSucceeded is the Workspace state that summarizes all operations' states. //For inference, the "True" condition means the inference service is ready to serve requests. //For fine tuning, the "True" condition means the tuning job completes successfully. WorkspaceConditionTypeSucceeded ConditionType = ConditionType("WorkspaceSucceeded") )
const ( // KAITOPrefix Kubernetes Data Mining prefix. KAITOPrefix = "kaito.sh/" // AnnotationEnableLB determines whether kaito creates LoadBalancer type service for testing. AnnotationEnableLB = KAITOPrefix + "enablelb" // LabelWorkspaceName is the label for workspace name. LabelWorkspaceName = KAITOPrefix + "workspace" // LabelWorkspaceName is the label for workspace namespace. LabelWorkspaceNamespace = KAITOPrefix + "workspacenamespace" // WorkspaceRevisionAnnotation is the Annotations for revision number WorkspaceRevisionAnnotation = "workspace.kaito.io/revision" )
const ( N_SERIES_PREFIX = "Standard_N" D_SERIES_PREFIX = "Standard_D" DefaultLoraConfigMapTemplate = "lora-params-template" DefaultQloraConfigMapTemplate = "qlora-params-template" MaxAdaptersNumber = 10 )
Variables ¶
var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "kaito.sh", Version: "v1alpha1"} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} )
var SupportedGPUConfigs = map[string]GPUConfig{ "Standard_NC6": {SKU: "Standard_NC6", GPUCount: 1, GPUMem: 12, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia470CudaDriver"}, "Standard_NC12": {SKU: "Standard_NC12", GPUCount: 2, GPUMem: 24, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia470CudaDriver"}, "Standard_NC24": {SKU: "Standard_NC24", GPUCount: 4, GPUMem: 48, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia470CudaDriver"}, "Standard_NC24r": {SKU: "Standard_NC24r", GPUCount: 4, GPUMem: 48, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia470CudaDriver"}, "Standard_NV6": {SKU: "Standard_NV6", GPUCount: 1, GPUMem: 8, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV12": {SKU: "Standard_NV12", GPUCount: 2, GPUMem: 16, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV24": {SKU: "Standard_NV24", GPUCount: 4, GPUMem: 32, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV12s_v3": {SKU: "Standard_NV12s_v3", GPUCount: 1, GPUMem: 8, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV24s_v3": {SKU: "Standard_NV24s_v3", GPUCount: 2, GPUMem: 16, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV48s_v3": {SKU: "Standard_NV48s_v3", GPUCount: 4, GPUMem: 32, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_ND6s": {SKU: "Standard_ND6s", GPUCount: 1, GPUMem: 24, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_ND12s": {SKU: "Standard_ND12s", GPUCount: 2, GPUMem: 48, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_ND24s": {SKU: "Standard_ND24s", GPUCount: 4, GPUMem: 96, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_ND24rs": {SKU: "Standard_ND24rs", GPUCount: 4, GPUMem: 96, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC6s_v2": {SKU: "Standard_NC6s_v2", GPUCount: 1, GPUMem: 16, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC12s_v2": {SKU: "Standard_NC12s_v2", GPUCount: 2, GPUMem: 32, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC24s_v2": {SKU: "Standard_NC24s_v2", GPUCount: 4, GPUMem: 64, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC24rs_v2": {SKU: "Standard_NC24rs_v2", GPUCount: 4, GPUMem: 64, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC6s_v3": {SKU: "Standard_NC6s_v3", GPUCount: 1, GPUMem: 16, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC12s_v3": {SKU: "Standard_NC12s_v3", GPUCount: 2, GPUMem: 32, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC24s_v3": {SKU: "Standard_NC24s_v3", GPUCount: 4, GPUMem: 64, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC24rs_v3": {SKU: "Standard_NC24rs_v3", GPUCount: 4, GPUMem: 64, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_ND40rs_v2": {SKU: "Standard_ND40rs_v2", GPUCount: 8, GPUMem: 256, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC4as_T4_v3": {SKU: "Standard_NC4as_T4_v3", GPUCount: 1, GPUMem: 16, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC8as_T4_v3": {SKU: "Standard_NC8as_T4_v3", GPUCount: 1, GPUMem: 16, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC16as_T4_v3": {SKU: "Standard_NC16as_T4_v3", GPUCount: 1, GPUMem: 16, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC64as_T4_v3": {SKU: "Standard_NC64as_T4_v3", GPUCount: 4, GPUMem: 64, SupportedOS: []string{"Mariner", "Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_ND96asr_v4": {SKU: "Standard_ND96asr_v4", GPUCount: 8, GPUMem: 320, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_ND96amsr_A100_v4": {SKU: "Standard_ND96amsr_A100_v4", GPUCount: 8, GPUMem: 640, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC24ads_A100_v4": {SKU: "Standard_NC24ads_A100_v4", GPUCount: 1, GPUMem: 80, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC48ads_A100_v4": {SKU: "Standard_NC48ads_A100_v4", GPUCount: 2, GPUMem: 160, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NC96ads_A100_v4": {SKU: "Standard_NC96ads_A100_v4", GPUCount: 4, GPUMem: 320, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia525CudaDriver"}, "Standard_NV6ads_A10_v5": {SKU: "Standard_NV6ads_A10_v5", GPUCount: 1, GPUMem: 4, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV12ads_A10_v5": {SKU: "Standard_NV12ads_A10_v5", GPUCount: 1, GPUMem: 8, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV18ads_A10_v5": {SKU: "Standard_NV18ads_A10_v5", GPUCount: 1, GPUMem: 12, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV36ads_A10_v5": {SKU: "Standard_NV36ads_A10_v5", GPUCount: 1, GPUMem: 24, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV36adms_A10_v5": {SKU: "Standard_NV36adms_A10_v5", GPUCount: 1, GPUMem: 24, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, "Standard_NV72ads_A10_v5": {SKU: "Standard_NV72ads_A10_v5", GPUCount: 2, GPUMem: 48, SupportedOS: []string{"Ubuntu"}, GPUDriver: "Nvidia510GridDriver"}, }
Functions ¶
func ValidateDNSSubdomain ¶ added in v0.3.0
Types ¶
type AdapterSpec ¶ added in v0.3.0
type AdapterSpec struct { // Source describes where to obtain the adapter data. // +optional Source *DataSource `json:"source,omitempty"` // Strength specifies the default multiplier for applying the adapter weights to the raw model weights. // It is usually a float number between 0 and 1. It is defined as a string type to be language agnostic. // +optional Strength *string `json:"strength,omitempty"` }
func (*AdapterSpec) DeepCopy ¶ added in v0.3.0
func (in *AdapterSpec) DeepCopy() *AdapterSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdapterSpec.
func (*AdapterSpec) DeepCopyInto ¶ added in v0.3.0
func (in *AdapterSpec) DeepCopyInto(out *AdapterSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Config ¶ added in v0.3.0
type Config struct {
TrainingConfig TrainingConfig `yaml:"training_config"`
}
func UnmarshalTrainingConfig ¶ added in v0.3.0
func UnmarshalTrainingConfig(cm *corev1.ConfigMap) (*Config, *apis.FieldError)
func (*Config) DeepCopy ¶ added in v0.3.0
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
func (*Config) DeepCopyInto ¶ added in v0.3.0
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataDestination ¶ added in v0.3.0
type DataDestination struct { // The mounted volume that is used to save the output data. // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:Schemaless // +optional Volume *v1.VolumeSource `json:"volumeSource,omitempty"` // Name of the image where the output data is pushed to. // +optional Image string `json:"image,omitempty"` // ImagePushSecret is the name of the secret in the same namespace that contains the authentication // information that is needed for running `docker push`. // +optional ImagePushSecret string `json:"imagePushSecret,omitempty"` }
func (*DataDestination) DeepCopy ¶ added in v0.3.0
func (in *DataDestination) DeepCopy() *DataDestination
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDestination.
func (*DataDestination) DeepCopyInto ¶ added in v0.3.0
func (in *DataDestination) DeepCopyInto(out *DataDestination)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataSource ¶ added in v0.3.0
type DataSource struct { // The name of the dataset. The same name will be used as a container name. // It must be a valid DNS subdomain value, Name string `json:"name,omitempty"` // URLs specifies the links to the public data sources. E.g., files in a public github repository. // +optional URLs []string `json:"urls,omitempty"` // The mounted volume that contains the data. // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:Schemaless // +optional Volume *v1.VolumeSource `json:"volumeSource,omitempty"` // The name of the image that contains the source data. The assumption is that the source data locates in the // `data` directory in the image. // +optional Image string `json:"image,omitempty"` // ImagePullSecrets is a list of secret names in the same namespace used for pulling the data image. // +optional ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` }
func (*DataSource) DeepCopy ¶ added in v0.3.0
func (in *DataSource) DeepCopy() *DataSource
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource.
func (*DataSource) DeepCopyInto ¶ added in v0.3.0
func (in *DataSource) DeepCopyInto(out *DataSource)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type GPUConfig ¶ added in v0.1.0
func (*GPUConfig) DeepCopy ¶ added in v0.1.0
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUConfig.
func (*GPUConfig) DeepCopyInto ¶ added in v0.1.0
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type InferenceSpec ¶
type InferenceSpec struct { // Preset describes the base model that will be deployed with preset configurations. // +optional Preset *PresetSpec `json:"preset,omitempty"` // Template specifies the Pod template used to run the inference service. Users can specify custom Pod settings // if the preset configurations cannot meet the requirements. Note that if Preset is specified, Template should not // be specified and vice versa. // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:Schemaless // +optional Template *v1.PodTemplateSpec `json:"template,omitempty"` // Adapters are integrated into the base model for inference. // Users can specify multiple adapters for the model and the respective weight of using each of them. // +optional Adapters []AdapterSpec `json:"adapters,omitempty"` }
func (*InferenceSpec) DeepCopy ¶
func (in *InferenceSpec) DeepCopy() *InferenceSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InferenceSpec.
func (*InferenceSpec) DeepCopyInto ¶
func (in *InferenceSpec) DeepCopyInto(out *InferenceSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ModelImageAccessMode ¶
type ModelImageAccessMode string
+kubebuilder:validation:Enum=public;private
const ( ModelImageAccessModePublic ModelImageAccessMode = "public" ModelImageAccessModePrivate ModelImageAccessMode = "private" )
type PresetMeta ¶
type PresetMeta struct { // Name of the supported models with preset configurations. Name ModelName `json:"name"` // AccessMode specifies whether the containerized model image is accessible via public registry // or private registry. This field defaults to "public" if not specified. // If this field is "private", user needs to provide the private image information in PresetOptions. // +kubebuilder:default:="public" // +optional AccessMode ModelImageAccessMode `json:"accessMode,omitempty"` }
func (*PresetMeta) DeepCopy ¶
func (in *PresetMeta) DeepCopy() *PresetMeta
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetMeta.
func (*PresetMeta) DeepCopyInto ¶
func (in *PresetMeta) DeepCopyInto(out *PresetMeta)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PresetOptions ¶
type PresetOptions struct { // Image is the name of the containerized model image. // +optional Image string `json:"image,omitempty"` // ImagePullSecrets is a list of secret names in the same namespace used for pulling the model image. // +optional ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` }
func (*PresetOptions) DeepCopy ¶
func (in *PresetOptions) DeepCopy() *PresetOptions
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetOptions.
func (*PresetOptions) DeepCopyInto ¶
func (in *PresetOptions) DeepCopyInto(out *PresetOptions)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PresetSpec ¶
type PresetSpec struct { PresetMeta `json:",inline"` // +optional PresetOptions `json:"presetOptions,omitempty"` }
PresetSpec provides the information for rendering preset configurations to run the model inference service.
func (*PresetSpec) DeepCopy ¶
func (in *PresetSpec) DeepCopy() *PresetSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetSpec.
func (*PresetSpec) DeepCopyInto ¶
func (in *PresetSpec) DeepCopyInto(out *PresetSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ResourceSpec ¶
type ResourceSpec struct { // Count is the required number of GPU nodes. // +optional // +kubebuilder:default:=1 Count *int `json:"count,omitempty"` // InstanceType specifies the GPU node SKU. // This field defaults to "Standard_NC12s_v3" if not specified. // +optional // +kubebuilder:default:="Standard_NC12s_v3" InstanceType string `json:"instanceType,omitempty"` // LabelSelector specifies the required labels for the GPU nodes. LabelSelector *metav1.LabelSelector `json:"labelSelector"` // PreferredNodes is an optional node list specified by the user. // If a node in the list does not have the required labels, it will be ignored. // +optional PreferredNodes []string `json:"preferredNodes,omitempty"` }
ResourceSpec describes the resource requirement of running the workload. If the number of nodes in the cluster that meet the InstanceType and LabelSelector requirements is small than the Count, controller will provision new nodes before deploying the workload. The final list of nodes used to run the workload is presented in workspace Status.
func (*ResourceSpec) DeepCopy ¶
func (in *ResourceSpec) DeepCopy() *ResourceSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec.
func (*ResourceSpec) DeepCopyInto ¶
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type TrainingConfig ¶ added in v0.3.0
type TrainingConfig struct { ModelConfig map[string]runtime.RawExtension `yaml:"ModelConfig"` QuantizationConfig map[string]runtime.RawExtension `yaml:"QuantizationConfig"` LoraConfig map[string]runtime.RawExtension `yaml:"LoraConfig"` TrainingArguments map[string]runtime.RawExtension `yaml:"TrainingArguments"` DatasetConfig map[string]runtime.RawExtension `yaml:"DatasetConfig"` DataCollator map[string]runtime.RawExtension `yaml:"DataCollator"` }
func (*TrainingConfig) DeepCopy ¶ added in v0.3.0
func (in *TrainingConfig) DeepCopy() *TrainingConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingConfig.
func (*TrainingConfig) DeepCopyInto ¶ added in v0.3.0
func (in *TrainingConfig) DeepCopyInto(out *TrainingConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*TrainingConfig) UnmarshalYAML ¶ added in v0.3.0
func (t *TrainingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
UnmarshalYAML custom method
type TuningMethod ¶ added in v0.3.0
type TuningMethod string
const ( TuningMethodLora TuningMethod = "lora" TuningMethodQLora TuningMethod = "qlora" )
type TuningSpec ¶ added in v0.3.0
type TuningSpec struct { // Preset describes which model to load for tuning. // +optional Preset *PresetSpec `json:"preset,omitempty"` // Method specifies the Parameter-Efficient Fine-Tuning(PEFT) method, such as lora, qlora, used for the tuning. // +optional Method TuningMethod `json:"method,omitempty"` // Config specifies the name of a custom ConfigMap that contains tuning arguments. // If specified, the ConfigMap must be in the same namespace as the Workspace custom resource. // If not specified, a default Config is used based on the specified tuning method. // +optional Config string `json:"config,omitempty"` // Input describes the input used by the tuning method. Input *DataSource `json:"input"` // Output specified where to store the tuning output. Output *DataDestination `json:"output"` }
func (*TuningSpec) DeepCopy ¶ added in v0.3.0
func (in *TuningSpec) DeepCopy() *TuningSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TuningSpec.
func (*TuningSpec) DeepCopyInto ¶ added in v0.3.0
func (in *TuningSpec) DeepCopyInto(out *TuningSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Workspace ¶
type Workspace struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Resource ResourceSpec `json:"resource,omitempty"` Inference *InferenceSpec `json:"inference,omitempty"` Tuning *TuningSpec `json:"tuning,omitempty"` Status WorkspaceStatus `json:"status,omitempty"` }
Workspace is the Schema for the workspaces API +kubebuilder:object:root=true +kubebuilder:subresource:status +kubebuilder:resource:path=workspaces,scope=Namespaced,categories=workspace,shortName={wk,wks} +kubebuilder:storageversion +kubebuilder:printcolumn:name="Instance",type="string",JSONPath=".resource.instanceType",description="" +kubebuilder:printcolumn:name="ResourceReady",type="string",JSONPath=".status.conditions[?(@.type==\"ResourceReady\")].status",description="" +kubebuilder:printcolumn:name="InferenceReady",type="string",JSONPath=".status.conditions[?(@.type==\"InferenceReady\")].status",description="" +kubebuilder:printcolumn:name="JobStarted",type="string",JSONPath=".status.conditions[?(@.type==\"JobStarted\")].status",description="" +kubebuilder:printcolumn:name="WorkspaceSucceeded",type="string",JSONPath=".status.conditions[?(@.type==\"WorkspaceSucceeded\")].status",description="" +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
func (*Workspace) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace.
func (*Workspace) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Workspace) DeepCopyObject ¶
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*Workspace) SetDefaults ¶
SetDefaults for the Workspace
func (*Workspace) SupportedVerbs ¶
func (w *Workspace) SupportedVerbs() []admissionregistrationv1.OperationType
type WorkspaceList ¶
type WorkspaceList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Workspace `json:"items"` }
WorkspaceList contains a list of Workspace +kubebuilder:object:root=true
func (*WorkspaceList) DeepCopy ¶
func (in *WorkspaceList) DeepCopy() *WorkspaceList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList.
func (*WorkspaceList) DeepCopyInto ¶
func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*WorkspaceList) DeepCopyObject ¶
func (in *WorkspaceList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type WorkspaceStatus ¶
type WorkspaceStatus struct { // WorkerNodes is the list of nodes chosen to run the workload based on the workspace resource requirement. // +optional WorkerNodes []string `json:"workerNodes,omitempty"` // Conditions report the current conditions of the workspace. // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` }
WorkspaceStatus defines the observed state of Workspace
func (*WorkspaceStatus) DeepCopy ¶
func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus.
func (*WorkspaceStatus) DeepCopyInto ¶
func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.