queueing

package
v1.32.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 15, 2025 License: Apache-2.0 Imports: 22 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var CoreResourceEnqueueTestCases = []*CoreResourceEnqueueTestCase{
	{
		Name:         "Pod without a required toleration to a node isn't requeued to activeQ",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),
			st.MakePod().Name("pod2").Toleration(v1.TaintNodeNotReady).Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod2"),
	},
	{
		Name:         "Pod rejected by the PodAffinity plugin is requeued when a new Node is created and turned to ready",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Label("anti", "anti").Name("pod1").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Label("anti", "anti").Name("pod2").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node2").Label("node", "fake-node2").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create a new node: %w", err)
			}

			node.Spec.Taints = nil
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to remove taints off the node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{
				{Resource: framework.Node, ActionType: framework.Add}:             1,
				{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod2"),
	},
	{
		Name:         "Pod rejected by the NodeAffinity plugin is requeued when a Node's label is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Label("group", "a").Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),

			st.MakePod().Name("pod2").NodeAffinityIn("group", []string{"c"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node1").Label("group", "b").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name: "Pod rejected by the NodeAffinity plugin is not requeued when an updated Node haven't changed the 'match' verdict",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("node1").Label("group", "a").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
			st.MakeNode().Name("node2").Label("group", "b").Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"a"}, st.NodeSelectorTypeMatchExpressions).Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("node1").Label("group", "a").Label("node", "fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
		},
		WantRequeuedPods:          sets.Set[string]{},
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected by the NodeAffinity plugin is requeued when a Node is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Label("group", "a").Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),

			st.MakePod().Name("pod2").NodeAffinityIn("group", []string{"c"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, st.MakeNode().Name("fake-node2").Label("group", "b").Obj(), metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name:         "Pod updated with toleration requeued to activeQ",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Container("image").Obj(),
			st.MakePod().Name("pod2").Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Container("image").Toleration("taint-key").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the pod: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: unschedulablePod, ActionType: framework.UpdatePodTolerations}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name:         "Pod rejected by the TaintToleration plugin is requeued when the Node's taint is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Toleration("taint-key").Container("image").Obj(),
			st.MakePod().Name("pod2").Toleration("taint-key2").Container("image").Obj(),
			st.MakePod().Name("pod3").Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node").Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the Node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected by the TaintToleration plugin is requeued when a Node that has the correspoding taint is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Toleration("taint-key").Container("image").Obj(),
			st.MakePod().Name("pod2").Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, st.MakeNode().Name("fake-node2").Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj(), metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create the Node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name:         "Pod rejected by the NodeResourcesFit plugin is requeued when the Pod is updated to scale down",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).UpdateResize(testCtx.Ctx, "pod1", st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Container("image").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to resize the pod: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: unschedulablePod, ActionType: framework.UpdatePodScaleDown}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name:         "Pod rejected by the NodeResourcesFit plugin is requeued when a Pod is deleted",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Container("image").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
				return nil, fmt.Errorf("failed to delete pod1: %w", err)
			}
			return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
		},
		WantRequeuedPods: sets.New("pod2"),
	},
	{
		Name:         "Pod rejected by the NodeResourcesFit plugin is requeued when a Node is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),

			st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "5"}).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node2").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create a new node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name:         "Pod rejected by the NodeResourcesFit plugin is requeued when a Node is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),

			st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "5"}).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update fake-node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name: "Pod rejected by the NodeResourcesFit plugin isn't requeued when a Node is updated without increase in the requested resources",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(),
			st.MakeNode().Name("fake-node2").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Label("group", "b").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4", v1.ResourceMemory: "4000"}).Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update fake-node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
		},
		WantRequeuedPods:          sets.Set[string]{},
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name: "Pod rejected by the NodeResourcesFit plugin is requeued when a Node is updated with increase in the requested resources",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(),
			st.MakeNode().Name("fake-node2").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Label("group", "b").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "5"}).Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update fake-node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name: "Pod rejected by the NodeResourcesFit plugin is requeued when a Node is updated with increase in the allowed pods number",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourcePods: "2"}).Obj(),
			st.MakeNode().Name("fake-node2").Capacity(map[v1.ResourceName]string{v1.ResourcePods: "1"}).Label("group", "b").Obj(),
		},
		InitialPods: []*v1.Pod{

			st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Node("fake-node2").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod2").NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourcePods: "3"}).Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update fake-node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Updating pod label doesn't retry scheduling if the Pod was rejected by TaintToleration",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("key", "val").Container("image").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the pod: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: unschedulablePod, ActionType: framework.UpdatePodLabel}: 1}, nil
		},
		WantRequeuedPods: sets.Set[string]{},

		EnableSchedulingQueueHint: sets.New(true),
	},
	{

		Name: "Pod rejected by the PreFilter of NodeAffinity plugin and Filter of NodeResourcesFit is requeued based on both plugins",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
			st.MakeNode().Name("fake-node2").Label("node", "fake-node2").Label("zone", "zone1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Label("unscheduled", "plugins").Name("pod1").NodeAffinityIn("metadata.name", []string{"fake-node"}, st.NodeSelectorTypeMatchFields).Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			pInfo, ok := testCtx.Scheduler.SchedulingQueue.GetPod("pod1", testCtx.NS.Name)
			if !ok || pInfo == nil {
				return nil, fmt.Errorf("pod1 is not found in the scheduling queue")
			}

			if pInfo.Pod.Name != "pod1" {
				return nil, fmt.Errorf("unexpected pod info: %#v", pInfo)
			}

			if pInfo.UnschedulablePlugins.Difference(sets.New(names.NodeAffinity, names.NodeResourcesFit)).Len() != 0 {
				return nil, fmt.Errorf("unexpected unschedulable plugin(s) is registered in pod1: %v", pInfo.UnschedulablePlugins.UnsortedList())
			}

			return nil, nil
		},
	},
	{
		Name:         "Pod rejected by the PodAffinity plugin is requeued when deleting the existed pod's label to make it match the podAntiAffinity",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("anti1", "anti1").Label("anti2", "anti2").Container("image").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod2").Label("anti1", "anti1").PodAntiAffinityExists("anti1", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Obj(),
			st.MakePod().Name("pod3").Label("anti2", "anti2").PodAntiAffinityExists("anti2", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Obj(),
		},

		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("anti2", "anti2").Container("image").Node("fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pod1: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: assignedPod, ActionType: framework.UpdatePodLabel}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected by the PodAffinity plugin is requeued when updating the existed pod's label to make it match the pod's podAffinity",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod2").PodAffinityExists("aaa", "node", st.PodAffinityWithRequiredReq).Container("image").Obj(),
			st.MakePod().Name("pod3").PodAffinityExists("bbb", "node", st.PodAffinityWithRequiredReq).Container("image").Obj(),
		},

		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("aaa", "bbb").Container("image").Node("fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pod1: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: assignedPod, ActionType: framework.UpdatePodLabel}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},

	{
		Name:         "Pod rejected by the PodAffinity plugin is requeued when updating the label of the node to make it match the pod affinity",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod1").PodAffinityExists("bbb", "zone", st.PodAffinityWithRequiredReq).Container("image").Obj(),
			st.MakePod().Name("pod2").PodAffinityExists("ccc", "region", st.PodAffinityWithRequiredReq).Container("image").Obj(),
		},

		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node").Label("zone", "zone1").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pod1: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with hostport by the NodePorts plugin is requeued when pod with common hostport is deleted",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Node("fake-node").Obj(),
			st.MakePod().Name("pod2").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8081}}).Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod3").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Obj(),
			st.MakePod().Name("pod4").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8081}}).Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
				return nil, fmt.Errorf("failed to delete Pod: %w", err)
			}
			return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod3"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with hostport by the NodePorts plugin is requeued when new node is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").ContainerPort([]v1.ContainerPort{{ContainerPort: 8080, HostPort: 8080}}).Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node2").Label("node", "fake-node2").Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create a new node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected by the NodeUnschedulable plugin is requeued when the node is turned to ready",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Unschedulable(true).Obj()},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node").Unschedulable(false).Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name:         "Pod rejected by the NodeUnschedulable plugin is requeued when a new node is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Unschedulable(true).Obj()},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node2").Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create a new node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod1"),
	},
	{
		Name:         "Pod rejected by the NodeUnschedulable plugin isn't requeued when another unschedulable node is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Unschedulable(true).Obj()},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node2").Unschedulable(true).Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create a new node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods: sets.Set[string]{},

		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pods with PodTopologySpread should be requeued when a Pod with matching label is scheduled",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node").Obj(),
			st.MakePod().Name("pod2").Label("key2", "val").Container("image").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod4").Label("key2", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key2").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			pod := st.MakePod().Name("pod5").Label("key1", "val").Node("fake-node").Container("image").Obj()
			if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Create(testCtx.Ctx, pod, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create Pod %q: %w", pod.Name, err)
			}

			return map[framework.ClusterEvent]uint64{framework.EventAssignedPodAdd: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod3"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pods with PodTopologySpread should be requeued when a scheduled Pod label is updated to match the selector",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node").Obj(),
			st.MakePod().Name("pod2").Label("key2", "val").Container("image").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod4").Label("key2", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key2").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("key3", "val").Container("image").Node("fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update the pod: %w", err)
			}
			return map[framework.ClusterEvent]uint64{framework.EventAssignedPodUpdate: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod3"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pods with PodTopologySpread should be requeued when a scheduled Pod with matching label is deleted",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node").Obj(),
			st.MakePod().Name("pod2").Label("key2", "val").Container("image").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(2)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod4").Label("key2", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key2").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
				return nil, fmt.Errorf("failed to delete Pod: %w", err)
			}
			return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod3"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name: "Pods with PodTopologySpread should be requeued when a Node with topology label is created",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
			st.MakeNode().Name("fake-node2").Label("zone", "fake-zone").Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").Container("image").Node("fake-node1").Obj(),
			st.MakePod().Name("pod2").Label("key1", "val").Container("image").Node("fake-node2").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(2)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(2)), nil, nil, nil).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node3").Label("node", "fake-node").Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create a new node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod3"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name: "Pods with PodTopologySpread should be requeued when a Node is updated to have the topology label",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Label("node", "fake-node").Label("region", "fake-node").Label("service", "service-a").Obj(),
			st.MakeNode().Name("fake-node2").Label("node", "fake-node").Label("region", "fake-node").Label("service", "service-a").Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
			st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "region", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
			st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "service", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod5").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod6").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod7").Label("key1", "val").SpreadConstraint(1, "region", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod8").Label("key1", "val").SpreadConstraint(1, "other", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod9").Label("key1", "val").SpreadConstraint(1, "service", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Label("region", "fake-node").Label("service", "service-b").Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod5", "pod6", "pod9"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name: "Pods with PodTopologySpread should be requeued when a Node with a topology label is deleted (QHint: enabled)",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
			st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
			st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node2", metav1.DeleteOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Delete}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod4"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name: "Pods with PodTopologySpread should be requeued when a Node with a topology label is deleted (QHint: disabled)",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
			st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
			st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			if err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node2", metav1.DeleteOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Delete}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod3", "pod4"),
		EnableSchedulingQueueHint: sets.New(false),
	},
	{
		Name: "Pods with PodTopologySpread should be requeued when a NodeTaint of a Node with a topology label has been updated",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node1").Label("node", "fake-node").Obj(),
			st.MakeNode().Name("fake-node2").Label("zone", "fake-node").Obj(),
			st.MakeNode().Name("fake-node3").Label("zone", "fake-node").Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node1").Obj(),
			st.MakePod().Name("pod2").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), nil, nil, nil, nil).Container("image").Node("fake-node2").Obj(),
		},
		Pods: []*v1.Pod{

			st.MakePod().Name("pod3").Label("key1", "val").SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Obj(),
			st.MakePod().Name("pod4").Label("key1", "val").SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("key1").Obj(), ptr.To(int32(3)), nil, nil, nil).Container("image").Toleration("aaa").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			node := st.MakeNode().Name("fake-node3").Label("zone", "fake-node").Taints([]v1.Taint{{Key: "aaa", Value: "bbb", Effect: v1.TaintEffectNoSchedule}}).Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
		},
		WantRequeuedPods: sets.New("pod4"),
	},
	{
		Name:         "Pod rejected with node by the VolumeZone plugin is requeued when the PV is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pv2 := st.MakePersistentVolume().Name("pv2").Label(v1.LabelTopologyZone, "us-west1-a").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Create(testCtx.Ctx, pv2, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create pv2: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeZone plugin is requeued when the PV is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
			st.MakePersistentVolume().
				Name("pv2").
				Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pv2 := st.MakePersistentVolume().Name("pv2").Label(v1.LabelTopologyZone, "us-west1-a").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv2, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pv2: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeZone plugin is requeued when the PVC bound to the pod is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
			st.MakePersistentVolume().
				Name("pv2").
				Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
			st.MakePod().Name("pod3").Container("image").PVC("pvc3").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pvc2 := st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc2, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to add pvc2: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeZone plugin is requeued when the PVC bound to the pod is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
			st.MakePersistentVolume().
				Name("pv2").
				Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
			st.MakePod().Name("pod3").Container("image").PVC("pvc3").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pvc2 := st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Update(testCtx.Ctx, pvc2, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pvc2: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeZone plugin is requeued when the Storage class is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			sc1 := st.MakeStorageClass().
				Name("sc1").
				VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
				Provisioner("p").
				Obj()
			if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Create(testCtx.Ctx, sc1, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create sc1: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeZone plugin is not requeued when the PV is updated but the topology is same",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
			st.MakePersistentVolume().
				Name("pv2").
				Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pv2 := st.MakePersistentVolume().Name("pv2").
				Labels(map[string]string{v1.LabelTopologyZone: "us-east1", "unrelated": "unrelated"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv2, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pv2: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.Set[string]{},
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected by the VolumeRestriction plugin is requeued when the PVC bound to the pod is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pvc2 := st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc2, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to add pvc1: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected by the VolumeRestriction plugin is requeued when the pod is deleted",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc1").Obj(),
			st.MakePod().Name("pod3").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
				return nil, fmt.Errorf("failed to delete pod1: %w", err)
			}
			return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name: "Pod rejected with node by the VolumeBinding plugin is requeued when the Node is created",
		InitialNodes: []*v1.Node{
			st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-east-1b").Obj(),
		},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				NodeAffinityIn(v1.LabelTopologyZone, []string{"us-east-1a"}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			node := st.MakeNode().Name("fake-node2").Label(v1.LabelTopologyZone, "us-east-1a").Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name: "Pod rejected with node by the VolumeBinding plugin is requeued when the Node is updated",
		InitialNodes: []*v1.Node{
			st.MakeNode().
				Name("fake-node").
				Label("node", "fake-node").
				Label("aaa", "bbb").
				Obj(),
		},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				NodeAffinityIn("aaa", []string{"ccc"}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			node := st.MakeNode().Name("fake-node").Label("node", "fake-node").Label("aaa", "ccc").Obj()
			if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update node: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the PV is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label("aaa", "bbb").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				NodeAffinityIn("aaa", []string{"ccc"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				VolumeName("pv1").
				Annotation(volume.AnnBindCompleted, "true").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			if err := testCtx.ClientSet.CoreV1().PersistentVolumes().Delete(testCtx.Ctx, "pv1", metav1.DeleteOptions{}); err != nil {
				return nil, fmt.Errorf("failed to delete pv1: %w", err)
			}
			pv1 := st.MakePersistentVolume().
				Name("pv1").
				NodeAffinityIn("aaa", []string{"bbb"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Create(testCtx.Ctx, pv1, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create pv: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the PV is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-east-1a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				NodeAffinityIn(v1.LabelFailureDomainBetaZone, []string{"us-east-1a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				VolumeName("pv1").
				Annotation(volume.AnnBindCompleted, "true").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pv1 := st.MakePersistentVolume().
				Name("pv1").
				NodeAffinityIn(v1.LabelTopologyZone, []string{"us-east-1a"}).
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv1, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pv: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the PVC is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			if err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Delete(testCtx.Ctx, "pvc1", metav1.DeleteOptions{}); err != nil {
				return nil, fmt.Errorf("failed to delete pvc1: %w", err)
			}
			pvc1 := st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc1, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create pvc: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the PVC is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			pvc1 := st.MakePersistentVolumeClaim().
				Name("pvc1").
				VolumeName("pv1").
				Annotation(volume.AnnBindCompleted, "true").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj()

			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Update(testCtx.Ctx, pvc1, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update pvc: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the StorageClass is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				StorageClassName("sc1").
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				StorageClassName(ptr.To("sc1")).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			sc1 := st.MakeStorageClass().
				Name("sc1").
				VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
				Provisioner("p").
				Obj()
			if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Create(testCtx.Ctx, sc1, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create storageclass: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the StorageClass's AllowedTopologies is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west-1a").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				StorageClassName("sc1").
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				StorageClassName(ptr.To("sc1")).
				Obj(),
		},
		InitialStorageClasses: []*storagev1.StorageClass{
			st.MakeStorageClass().
				Name("sc1").
				VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
				Provisioner("p").
				AllowedTopologies([]v1.TopologySelectorTerm{
					{
						MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
							{Key: v1.LabelTopologyZone, Values: []string{"us-west-1c"}},
						},
					},
				}).Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			sc1 := st.MakeStorageClass().
				Name("sc1").
				VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
				Provisioner("p").
				AllowedTopologies([]v1.TopologySelectorTerm{
					{
						MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
							{Key: v1.LabelTopologyZone, Values: []string{"us-west-1a"}},
						},
					},
				}).
				Obj()
			if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Update(testCtx.Ctx, sc1, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update storageclass: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is not requeued when the StorageClass is updated but the AllowedTopologies is same",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west-1c").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				StorageClassName("sc1").
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				StorageClassName(ptr.To("sc1")).
				Obj(),
		},
		InitialStorageClasses: []*storagev1.StorageClass{
			st.MakeStorageClass().
				Name("sc1").
				Label("key", "value").
				VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
				Provisioner("p").
				AllowedTopologies([]v1.TopologySelectorTerm{
					{
						MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
							{Key: v1.LabelTopologyZone, Values: []string{"us-west-1a"}},
						},
					},
				}).Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			sc1 := st.MakeStorageClass().
				Name("sc1").
				Label("key", "updated").
				VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
				Provisioner("p").
				AllowedTopologies([]v1.TopologySelectorTerm{
					{
						MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
							{Key: v1.LabelTopologyZone, Values: []string{"us-west-1a"}},
						},
					},
				}).
				Obj()
			if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Update(testCtx.Ctx, sc1, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update storageclass: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.Set[string]{},
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the CSINode is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().
				Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			csinode1 := st.MakeCSINode().Name("fake-node").Obj()

			if _, err := testCtx.ClientSet.StorageV1().CSINodes().Create(testCtx.Ctx, csinode1, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create CSINode: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSINode, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the CSINode's MigratedPluginsAnnotation is updated",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				StorageClassName("sc1").
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				StorageClassName(ptr.To("sc1")).
				Obj(),
		},
		InitialCSINodes: []*storagev1.CSINode{
			st.MakeCSINode().Name("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			csinode, err := testCtx.ClientSet.StorageV1().CSINodes().Get(testCtx.Ctx, "fake-node", metav1.GetOptions{})
			if err != nil {
				return nil, fmt.Errorf("failed to get CSINode: %w", err)
			}

			metav1.SetMetaDataAnnotation(&csinode.ObjectMeta, v1.MigratedPluginsAnnotationKey, "value")
			if _, err := testCtx.ClientSet.StorageV1().CSINodes().Update(testCtx.Ctx, csinode, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update CSINode: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSINode, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the CSIDriver's StorageCapacity gets disabled",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialCSIDrivers: []*storagev1.CSIDriver{
			st.MakeCSIDriver().Name("csidriver").StorageCapacity(ptr.To(true)).Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Volume(
				v1.Volume{
					Name: "volume",
					VolumeSource: v1.VolumeSource{
						CSI: &v1.CSIVolumeSource{
							Driver: "csidriver",
						},
					},
				},
			).Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			csidriver, err := testCtx.ClientSet.StorageV1().CSIDrivers().Get(testCtx.Ctx, "csidriver", metav1.GetOptions{})
			if err != nil {
				return nil, fmt.Errorf("failed to get CSIDriver: %w", err)
			}
			csidriver.Spec.StorageCapacity = ptr.To(false)

			if _, err := testCtx.ClientSet.StorageV1().CSIDrivers().Update(testCtx.Ctx, csidriver, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update CSIDriver: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSIDriver, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is not requeued when the CSIDriver is updated but the storage capacity is originally enabled",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialCSIDrivers: []*storagev1.CSIDriver{
			st.MakeCSIDriver().Name("csidriver").StorageCapacity(ptr.To(false)).Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Volume(
				v1.Volume{
					Name: "volume",
					VolumeSource: v1.VolumeSource{
						CSI: &v1.CSIVolumeSource{
							Driver: "csidriver",
						},
					},
				},
			).Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			csidriver, err := testCtx.ClientSet.StorageV1().CSIDrivers().Get(testCtx.Ctx, "csidriver", metav1.GetOptions{})
			if err != nil {
				return nil, fmt.Errorf("failed to get CSIDriver: %w", err)
			}
			csidriver.Spec.StorageCapacity = ptr.To(true)

			if _, err := testCtx.ClientSet.StorageV1().CSIDrivers().Update(testCtx.Ctx, csidriver, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update CSIDriver: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSIDriver, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.Set[string]{},
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the CSIStorageCapacity is created",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				StorageClassName("sc1").
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				StorageClassName(ptr.To("sc1")).
				Obj(),
		},
		InitialStorageClasses: []*storagev1.StorageClass{
			st.MakeStorageClass().
				Name("sc1").
				VolumeBindingMode(storagev1.VolumeBindingImmediate).
				Provisioner("p").
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			csc := st.MakeCSIStorageCapacity().Name("csc").StorageClassName("sc1").Capacity(resource.NewQuantity(10, resource.BinarySI)).Obj()
			if _, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Create(testCtx.Ctx, csc, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create CSIStorageCapacity: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSIStorageCapacity, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is requeued when the CSIStorageCapacity is increased",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				StorageClassName("sc1").
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				StorageClassName(ptr.To("sc1")).
				Obj(),
		},
		InitialStorageCapacities: []*storagev1.CSIStorageCapacity{
			st.MakeCSIStorageCapacity().Name("csc").StorageClassName("sc1").Capacity(resource.NewQuantity(10, resource.BinarySI)).Obj(),
		},
		InitialStorageClasses: []*storagev1.StorageClass{
			st.MakeStorageClass().
				Name("sc1").
				VolumeBindingMode(storagev1.VolumeBindingImmediate).
				Provisioner("p").
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			csc, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Get(testCtx.Ctx, "csc", metav1.GetOptions{})
			if err != nil {
				return nil, fmt.Errorf("failed to get CSIStorageCapacity: %w", err)
			}
			csc.Capacity = resource.NewQuantity(20, resource.BinarySI)

			if _, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Update(testCtx.Ctx, csc, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update CSIStorageCapacity: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSIStorageCapacity, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true, false),
	},
	{
		Name:         "Pod rejected with node by the VolumeBinding plugin is not requeued when the CSIStorageCapacity is updated but the volumelimit is not increased",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialStorageCapacities: []*storagev1.CSIStorageCapacity{
			st.MakeCSIStorageCapacity().Name("csc").StorageClassName("sc1").Capacity(resource.NewQuantity(10, resource.BinarySI)).Obj(),
		},
		InitialStorageClasses: []*storagev1.StorageClass{
			st.MakeStorageClass().
				Name("sc1").
				VolumeBindingMode(storagev1.VolumeBindingImmediate).
				Provisioner("p").
				Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {

			csc, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Get(testCtx.Ctx, "csc", metav1.GetOptions{})
			if err != nil {
				return nil, fmt.Errorf("failed to get CSIStorageCapacity: %w", err)
			}
			csc.Capacity = resource.NewQuantity(5, resource.BinarySI)

			if _, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Update(testCtx.Ctx, csc, metav1.UpdateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to update CSIStorageCapacity: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSIStorageCapacity, ActionType: framework.Update}: 1}, nil
		},
		WantRequeuedPods:          sets.Set[string]{},
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected the CSI plugin is requeued when the CSINode is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				PersistentVolumeSource(v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: "csidriver", VolumeHandle: "volumehandle"}}).
				Obj()},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialCSIDrivers: []*storagev1.CSIDriver{
			st.MakeCSIDriver().Name("csidriver").StorageCapacity(ptr.To(true)).Obj(),
		},
		InitialCSINodes: []*storagev1.CSINode{
			st.MakeCSINode().Name("fake-node").Driver(storagev1.CSINodeDriver{Name: "csidriver", NodeID: "fake-node", Allocatable: &storagev1.VolumeNodeResources{
				Count: ptr.To(int32(0)),
			}}).Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			csinode1 := st.MakeCSINode().Name("csinode").Obj()
			if _, err := testCtx.ClientSet.StorageV1().CSINodes().Create(testCtx.Ctx, csinode1, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to create CSINode: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.CSINode, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod1"),
		EnableSchedulingQueueHint: sets.New(true),
	},
	{
		Name:         "Pod rejected with PVC by the CSI plugin is requeued when the pod having related PVC is deleted",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				PersistentVolumeSource(v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: "csidriver", VolumeHandle: "volumehandle1"}}).
				Obj(),
			st.MakePersistentVolume().Name("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				PersistentVolumeSource(v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: "csidriver", VolumeHandle: "volumehandle2"}}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialCSIDrivers: []*storagev1.CSIDriver{
			st.MakeCSIDriver().Name("csidriver").StorageCapacity(ptr.To(true)).Obj(),
		},
		InitialCSINodes: []*storagev1.CSINode{
			st.MakeCSINode().Name("fake-node").Driver(storagev1.CSINodeDriver{Name: "csidriver", NodeID: "fake-node", Allocatable: &storagev1.VolumeNodeResources{
				Count: ptr.To(int32(1)),
			}}).Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
				return nil, fmt.Errorf("failed to delete Pod: %w", err)
			}
			return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
		},
		WantRequeuedPods: sets.New("pod2"),
	},
	{
		Name:         "Pod rejected with PVC by the CSI plugin is requeued when the related PVC is added",
		InitialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
		InitialPVs: []*v1.PersistentVolume{
			st.MakePersistentVolume().Name("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				PersistentVolumeSource(v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: "csidriver", VolumeHandle: "volumehandle1"}}).
				Obj(),
			st.MakePersistentVolume().Name("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				PersistentVolumeSource(v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: "csidriver", VolumeHandle: "volumehandle2"}}).
				Obj(),
			st.MakePersistentVolume().Name("pv3").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
				Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
				PersistentVolumeSource(v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: "csidriver", VolumeHandle: "volumehandle3"}}).
				Obj(),
		},
		InitialPVCs: []*v1.PersistentVolumeClaim{
			st.MakePersistentVolumeClaim().
				Name("pvc1").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv1").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),

			st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
			st.MakePersistentVolumeClaim().
				Name("pvc3").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv3").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj(),
		},
		InitialCSIDrivers: []*storagev1.CSIDriver{
			st.MakeCSIDriver().Name("csidriver").StorageCapacity(ptr.To(true)).Obj(),
		},
		InitialCSINodes: []*storagev1.CSINode{
			st.MakeCSINode().Name("fake-node").Driver(storagev1.CSINodeDriver{Name: "csidriver", NodeID: "fake-node", Allocatable: &storagev1.VolumeNodeResources{
				Count: ptr.To(int32(1)),
			}}).Obj(),
		},
		InitialPods: []*v1.Pod{
			st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
		},
		Pods: []*v1.Pod{
			st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
			st.MakePod().Name("pod3").Container("image").PVC("pvc3").Obj(),
		},
		TriggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
			if err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Delete(testCtx.Ctx, "pvc2", metav1.DeleteOptions{}); err != nil {
				return nil, fmt.Errorf("failed to delete pvc2: %w", err)
			}

			pvc := st.MakePersistentVolumeClaim().
				Name("pvc2").
				Annotation(volume.AnnBindCompleted, "true").
				VolumeName("pv2").
				AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
				Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
				Obj()
			if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc, metav1.CreateOptions{}); err != nil {
				return nil, fmt.Errorf("failed to add pvc2: %w", err)
			}
			return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}: 1}, nil
		},
		WantRequeuedPods:          sets.New("pod2"),
		EnableSchedulingQueueHint: sets.New(true),
	},
}

We define all the test cases here in the one place, and those will be run either in ./former or ./queueinghint tests, depending on EnableSchedulingQueueHint. We needed to do this because running all these test cases resulted in the timeout.

Functions

func RunTestCoreResourceEnqueue

func RunTestCoreResourceEnqueue(t *testing.T, tt *CoreResourceEnqueueTestCase)

TestCoreResourceEnqueue verify Pods failed by in-tree default plugins can be moved properly upon their registered events.

Types

type CoreResourceEnqueueTestCase

type CoreResourceEnqueueTestCase struct {
	Name string
	// InitialNodes is the list of Nodes to be created at first.
	InitialNodes []*v1.Node
	// InitialPods is the list of Pods to be created at first if it's not empty.
	// Note that the scheduler won't schedule those Pods,
	// meaning, those Pods should be already scheduled basically; they should have .spec.nodename.
	InitialPods []*v1.Pod
	// InitialPVCs are the list of PersistentVolumeClaims to be created at first.
	// Note that PVs are automatically created following PVCs.
	// Also, the namespace of pvcs is automatically filled in.
	InitialPVCs []*v1.PersistentVolumeClaim
	// InitialPVs are the list of PersistentVolume to be created at first.
	InitialPVs []*v1.PersistentVolume
	// InitialStorageClasses are the list of StorageClass to be created at first.
	InitialStorageClasses []*storagev1.StorageClass
	// InitialCSINodes are the list of CSINode to be created at first.
	InitialCSINodes []*storagev1.CSINode
	// InitialCSIDrivers are the list of CSIDriver to be created at first.
	InitialCSIDrivers []*storagev1.CSIDriver
	// InitialStorageCapacities are the list of CSIStorageCapacity to be created at first.
	InitialStorageCapacities []*storagev1.CSIStorageCapacity
	// Pods are the list of Pods to be created.
	// All of them are expected to be unschedulable at first.
	Pods []*v1.Pod
	// TriggerFn is the function that triggers the event to move Pods.
	// It returns the map keyed with ClusterEvents to be triggered by this function,
	// and valued with the number of triggering of the event.
	TriggerFn func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error)
	// WantRequeuedPods is the map of Pods that are expected to be requeued after triggerFn.
	WantRequeuedPods sets.Set[string]
	// EnableSchedulingQueueHint indicates which feature gate value(s) the test case should run with.
	// By default, it's {true, false}
	EnableSchedulingQueueHint sets.Set[bool]
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL