Documentation ¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
var AzAwareTightlyPack = SparkBinPackFunction(func( ctx context.Context, driverResources, executorResources *resources.Resources, executorCount int, driverNodePriorityOrder, executorNodePriorityOrder []string, nodesSchedulingMetadata resources.NodeGroupSchedulingMetadata) (string, []string, bool) { driverNode, executorNodes, hasCapacity := SingleAZTightlyPack(ctx, driverResources, executorResources, executorCount, driverNodePriorityOrder, executorNodePriorityOrder, nodesSchedulingMetadata) if hasCapacity { return driverNode, executorNodes, hasCapacity } return SparkBinPack(ctx, driverResources, executorResources, executorCount, driverNodePriorityOrder, executorNodePriorityOrder, nodesSchedulingMetadata, tightlyPackExecutors) })
AzAwareTightlyPack is a SparkBinPackFunction that tries to put the driver pod to as prior nodes as possible before trying to tightly pack executors while also trying to ensure that we can fit everything in a single AZ. If it cannot fit into a single AZ it falls back to the TightlyPack SparkBinPackFunction.
var DistributeEvenly = SparkBinPackFunction(func( ctx context.Context, driverResources, executorResources *resources.Resources, executorCount int, driverNodePriorityOrder, executorNodePriorityOrder []string, nodesSchedulingMetadata resources.NodeGroupSchedulingMetadata) (string, []string, bool) { return SparkBinPack(ctx, driverResources, executorResources, executorCount, driverNodePriorityOrder, executorNodePriorityOrder, nodesSchedulingMetadata, distributeExecutorsEvenly) })
DistributeEvenly is a SparkBinPackFunction that tries to put the driver pod to as prior nodes as possible before trying to distribute executors evenly
var SingleAZTightlyPack = SparkBinPackFunction(func( ctx context.Context, driverResources, executorResources *resources.Resources, executorCount int, driverNodePriorityOrder, executorNodePriorityOrder []string, nodesSchedulingMetadata resources.NodeGroupSchedulingMetadata) (string, []string, bool) { driverNodePriorityOrderByZone := groupNodesByZone(driverNodePriorityOrder, nodesSchedulingMetadata) executorNodePriorityOrderByZone := groupNodesByZone(executorNodePriorityOrder, nodesSchedulingMetadata) for zone, driverNodePriorityOrderForZone := range driverNodePriorityOrderByZone { executorNodePriorityOrderForZone, ok := executorNodePriorityOrderByZone[zone] if !ok { continue } driverNode, executorNodes, hasCapacity := SparkBinPack(ctx, driverResources, executorResources, executorCount, driverNodePriorityOrderForZone, executorNodePriorityOrderForZone, nodesSchedulingMetadata, tightlyPackExecutors) if hasCapacity { return driverNode, executorNodes, hasCapacity } } return "", nil, false })
SingleAZTightlyPack is a SparkBinPackFunction that tries to put the driver pod to as prior nodes as possible before trying to tightly pack executors while also ensuring that we can fit everything in a single AZ. If it cannot fit into a single AZ binpacking fails
var TightlyPack = SparkBinPackFunction(func( ctx context.Context, driverResources, executorResources *resources.Resources, executorCount int, driverNodePriorityOrder, executorNodePriorityOrder []string, nodesSchedulingMetadata resources.NodeGroupSchedulingMetadata) (string, []string, bool) { return SparkBinPack(ctx, driverResources, executorResources, executorCount, driverNodePriorityOrder, executorNodePriorityOrder, nodesSchedulingMetadata, tightlyPackExecutors) })
TightlyPack is a SparkBinPackFunction that tries to put the driver pod to as prior nodes as possible before trying to tightly pack executors
Functions ¶
func SparkBinPack ¶
func SparkBinPack( ctx context.Context, driverResources, executorResources *resources.Resources, executorCount int, driverNodePriorityOrder, executorNodePriorityOrder []string, nodesSchedulingMetadata resources.NodeGroupSchedulingMetadata, distributeExecutors GenericBinPackFunction) (driverNode string, executorNodes []string, hasCapacity bool)
SparkBinPack places the driver first and calls distributeExecutors function to place executors
Types ¶
type GenericBinPackFunction ¶
type GenericBinPackFunction func( ctx context.Context, itemResources *resources.Resources, itemCount int, nodePriorityOrder []string, nodesSchedulingMetadata resources.NodeGroupSchedulingMetadata, reservedResources resources.NodeGroupResources) (nodes []string, hasCapacity bool)
GenericBinPackFunction is a function type for assigning nodes to a batch of equivalent pods
type SparkBinPackFunction ¶
type SparkBinPackFunction func( ctx context.Context, driverResources, executorResources *resources.Resources, executorCount int, driverNodePriorityOrder, executorNodePriorityOrder []string, nodesSchedulingMetadata resources.NodeGroupSchedulingMetadata) (driverNode string, executorNodes []string, hasCapacity bool)
SparkBinPackFunction is a function type for assigning nodes to spark drivers and executors