Documentation ¶
Index ¶
- Constants
- func ExceedsCompare(a string) string
- func FieldString(str string) float64
- func NewGpuResource(name v1.ResourceName, rl *v1.ResourceList) *resource.Quantity
- func NodeCapacity(node *v1.Node) v1.ResourceList
- func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList, err error)
- type CPUResources
- type CpuResource
- type GPUResources
- type KubeClient
- func (k *KubeClient) GetActivePodByNodename(ctx context.Context, node corev1.Node) (*corev1.PodList, error)
- func (k *KubeClient) GetNodeMetricsFromMetricsAPI(ctx context.Context, resourceName string, selector labels.Selector) (*metricsapi.NodeMetricsList, error)
- func (k *KubeClient) GetNodeResources(ctx context.Context, resourceName string, resourceType []string, sortBy string, ...) ([][]string, error)
- func (k *KubeClient) GetNodes(ctx context.Context, resourceName string, selector labels.Selector) (map[string]corev1.Node, error)
- func (k *KubeClient) GetPodByPodname(ctx context.Context, podName string, namespace string) (*corev1.Pod, error)
- func (k *KubeClient) GetPodMetricsFromMetricsAPI(ctx context.Context, namespace, resourceName string, allNamespaces bool, ...) (*metricsapi.PodMetricsList, error)
- func (k *KubeClient) GetPodResources(ctx context.Context, podmetrics []metricsapi.PodMetrics, namespace string, ...) ([][]string, error)
- func (k *KubeClient) PodMetricses(ctx context.Context) (*metricsV1beta1api.PodMetricsList, error)
- type MemoryResource
- type MemoryResources
- type NodeAllocatedResources
- type PodAllocatedResources
- type PodResources
Constants ¶
const ( // nvidia.com/gpu, number ResourceNvidiaGpuCounts v1.ResourceName = "nvidia.com/gpu" // aliyun.com/gpu-count, number ResourceAliyunGpuCounts v1.ResourceName = "aliyun.com/gpu-count" // aliyun.com/gpu-mem, number ResourceAliyunGpuMem v1.ResourceName = "aliyun.com/gpu-mem" )
Variables ¶
This section is empty.
Functions ¶
func NewGpuResource ¶ added in v0.2.0
func NewGpuResource(name v1.ResourceName, rl *v1.ResourceList) *resource.Quantity
NewGpuResource returns the list of NewGpuResource
func PodRequestsAndLimits ¶
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList, err error)
PodRequestsAndLimits returns a dictionary of all defined resources summed up for all containers of the pod. If pod overhead is non-nil, the pod overhead is added to the total container resource requests and to the total container limits which have a non-zero quantity.
Types ¶
type CPUResources ¶
type CPUResources struct { // CPUUsages is number of allocated milicores. CPUUsages *CpuResource // CPURequests is number of allocated milicores. CPURequests *CpuResource // CPURequestsFraction is a fraction of CPU, that is allocated. CPURequestsFraction float64 `json:"cpuRequestsFraction"` // CPULimits is defined CPU limit. CPULimits *CpuResource // CPULimitsFraction is a fraction of defined CPU limit, can be over 100%, i.e. // overcommitted. CPULimitsFraction float64 `json:"cpuLimitsFraction"` // CPUCapacity is specified node CPU capacity in milicores. CPUCapacity *CpuResource }
CPUResources describes node allocated resources.
type CpuResource ¶
type GPUResources ¶ added in v0.2.0
type GPUResources struct { // NvidiaGpuCountsRequests is a fraction of NvidiaGpuCountsRequests, that is allocated. NvidiaGpuCountsRequests int64 // NvidiaGpuCountsRequestsFraction is a fraction of NvidiaGpuCountsRequests, that is allocated. NvidiaGpuCountsRequestsFraction float64 `json:"NvidiaGpuCountsRequestsFraction"` // NvidiaGpuCountsLimits is defined NvidiaGpuCounts limit. NvidiaGpuCountsLimits int64 // NvidiaGpuCountsLimitsFraction is a fraction of defined NvidiaGpuCounts limit, can be over 100%, i.e. // overcommitted. NvidiaGpuCountsLimitsFraction float64 `json:"NvidiaGpuCountsLimitsFraction"` // NvidiaGpuCountsCapacity is maximum number of pods, that can be allocated on the node. NvidiaGpuCountsCapacity int64 `json:"nvidiaGpuCountsCapacity"` // AliyunGpuMemRequests is a fraction of AliyunGpuMemRequests, that is allocated. AliyunGpuMemRequests int64 // AliyunGpuMemRequestsFraction is a fraction of AliyunGpuMemRequests, that is allocated. AliyunGpuMemRequestsFraction float64 `json:"aliyunGpuMemsRequestsFraction"` // AliyunGpuMemLimits is defined AliyunGpuMem limit. AliyunGpuMemLimits int64 // NvidiaGpuCountsLimitsFraction is a fraction of defined NvidiaGpuCounts limit, can be over 100%, i.e. // overcommitted. AliyunGpuMemLimitsFraction float64 `json:"aliyunGpuMemLimitsFraction"` // AliyunGpuMemCapacity is maximum number of pods, that can be allocated on the node. AliyunGpuMemCapacity int64 `json:"aliyunGpuMemCapacity"` }
GPUResources describes node allocated resources.
type KubeClient ¶
type KubeClient struct {
// contains filtered or unexported fields
}
KubeClient provides methods to get all required metrics from Kubernetes
func NewClient ¶
func NewClient(config *rest.Config) (*KubeClient, error)
NewClient creates a new client to get data from kubernetes masters
func (*KubeClient) GetActivePodByNodename ¶
func (k *KubeClient) GetActivePodByNodename(ctx context.Context, node corev1.Node) (*corev1.PodList, error)
GetActivePodByNodename
func (*KubeClient) GetNodeMetricsFromMetricsAPI ¶
func (k *KubeClient) GetNodeMetricsFromMetricsAPI(ctx context.Context, resourceName string, selector labels.Selector) (*metricsapi.NodeMetricsList, error)
GetNodeMetricsFromMetricsAPI with context
func (*KubeClient) GetNodeResources ¶
func (k *KubeClient) GetNodeResources(ctx context.Context, resourceName string, resourceType []string, sortBy string, selector labels.Selector) ([][]string, error)
NodeResources
func (*KubeClient) GetNodes ¶
func (k *KubeClient) GetNodes(ctx context.Context, resourceName string, selector labels.Selector) (map[string]corev1.Node, error)
GetNodes
func (*KubeClient) GetPodByPodname ¶
func (k *KubeClient) GetPodByPodname(ctx context.Context, podName string, namespace string) (*corev1.Pod, error)
GetActivePodByPodname
func (*KubeClient) GetPodMetricsFromMetricsAPI ¶
func (k *KubeClient) GetPodMetricsFromMetricsAPI(ctx context.Context, namespace, resourceName string, allNamespaces bool, labelSelector labels.Selector, fieldSelector fields.Selector) (*metricsapi.PodMetricsList, error)
GetPodMetricsFromMetricsAPI
func (*KubeClient) GetPodResources ¶
func (*KubeClient) PodMetricses ¶
func (k *KubeClient) PodMetricses(ctx context.Context) (*metricsV1beta1api.PodMetricsList, error)
PodMetricses returns all pods' usage metrics
type MemoryResource ¶
func (*MemoryResource) String ¶
func (r *MemoryResource) String() string
func (*MemoryResource) ToQuantity ¶
func (r *MemoryResource) ToQuantity() *resource.Quantity
ToQuantity
type MemoryResources ¶
type MemoryResources struct { // MemoryUsages is a fraction of memory, that is allocated. MemoryUsages *MemoryResource // MemoryRequests is a fraction of memory, that is allocated. MemoryRequests *MemoryResource // MemoryRequestsFraction is a fraction of memory, that is allocated. MemoryRequestsFraction float64 `json:"memoryRequestsFraction"` // MemoryLimits is defined memory limit. MemoryLimits *MemoryResource // MemoryLimitsFraction is a fraction of defined memory limit, can be over 100%, i.e. // overcommitted. MemoryLimitsFraction float64 `json:"memoryLimitsFraction"` // MemoryCapacity is specified node memory capacity in bytes. MemoryCapacity *MemoryResource }
MemoryResources describes node allocated resources.
type NodeAllocatedResources ¶
type NodeAllocatedResources struct { CPUResources MemoryResources GPUResources PodResources }
NodeAllocatedResources describes node allocated resources.
type PodAllocatedResources ¶
type PodAllocatedResources struct { // CPUUsages is number of allocated milicores. CPUUsages *CpuResource // CPURequestsFraction is a fraction of CPU, that is allocated. CPUUsagesFraction float64 `json:"cpuUsagesFraction"` // CPURequests is number of allocated milicores. CPURequests *CpuResource // CPULimits is defined CPU limit. CPULimits *CpuResource // MemoryUsages is a fraction of memory, that is allocated. MemoryUsages *MemoryResource // MemoryRequestsFraction is a fraction of memory, that is allocated. MemoryUsagesFraction float64 `json:"memoryUsagesFraction"` // MemoryRequests is a fraction of memory, that is allocated. MemoryRequests *MemoryResource // MemoryLimits is defined memory limit. MemoryLimits *MemoryResource // NvidiaGpuCountsRequests is a fraction of NvidiaGpuCounts, that is allocated. NvidiaGpuCountsRequests int64 // NvidiaGpuCountsLimits is defined NvidiaGpuCounts limit. NvidiaGpuCountsLimits int64 // AliyunGpuMemRequests is a fraction of AliyunGpuMem, that is allocated. AliyunGpuMemRequests int64 // AliyunGpuMemLimits is defined AliyunGpuMem limit. AliyunGpuMemLimits int64 }
PodAllocatedResources describes node allocated resources.
type PodResources ¶
type PodResources struct { // AllocatedPods in number of currently allocated pods on the node. AllocatedPods int `json:"allocatedPods"` // PodCapacity is maximum number of pods, that can be allocated on the node. PodCapacity int64 `json:"podCapacity"` // PodFraction is a fraction of pods, that can be allocated on given node. PodFraction float64 `json:"podFraction"` }
PodResources describes node allocated resources.