diff --git a/test/e2e/topology_updater.go b/test/e2e/topology_updater.go index 043983a99..05e2b65f2 100644 --- a/test/e2e/topology_updater.go +++ b/test/e2e/topology_updater.go @@ -19,6 +19,7 @@ package e2e import ( "context" "fmt" + "k8s.io/apimachinery/pkg/api/resource" "time" . "github.com/onsi/ginkgo/v2" @@ -175,7 +176,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By("getting the initial topology information") initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name) By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)") - sleeperPod := testpod.GuaranteedSleeper("500m") + sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits( + corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + // any random reasonable amount is fine + corev1.ResourceMemory: resource.MustParse("100Mi"), + })) podMap := make(map[string]*corev1.Pod) pod := f.PodClient().CreateSync(sleeperPod) @@ -221,7 +227,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By("getting the initial topology information") initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name) By("creating a pod consuming exclusive CPUs") - sleeperPod := testpod.GuaranteedSleeper("1000m") + sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits( + corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + // any random reasonable amount is fine + corev1.ResourceMemory: resource.MustParse("100Mi"), + })) // in case there is more than a single node in the cluster // we need to set the node name, so we'll have certainty about // which node we need to examine diff --git a/test/e2e/utils/pod/pod.go b/test/e2e/utils/pod/pod.go index 75d39aeaa..698d94b4d 100644 --- a/test/e2e/utils/pod/pod.go +++ b/test/e2e/utils/pod/pod.go @@ -25,7 +25,6 @@ import ( "github.com/onsi/ginkgo/v2" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" @@ -45,8 +44,8 @@ const ( ) // GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs. -func GuaranteedSleeper(cpuLimit string) *corev1.Pod { - return &corev1.Pod{ +func GuaranteedSleeper(opts ...func(pod *corev1.Pod)) *corev1.Pod { + p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "sleeper-gu-pod", }, @@ -54,20 +53,23 @@ func GuaranteedSleeper(cpuLimit string) *corev1.Pod { RestartPolicy: corev1.RestartPolicyNever, Containers: []corev1.Container{ { - Name: "sleeper-gu-cnt", - Image: PauseImage, - Resources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - // we use 1 core because that's the minimal meaningful quantity - corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(cpuLimit), - // any random reasonable amount is fine - corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"), - }, - }, + Name: "sleeper-gu-cnt", + Image: PauseImage, + Resources: corev1.ResourceRequirements{}, }, }, }, } + for _, o := range opts { + o(p) + } + return p +} + +func WithLimits(list corev1.ResourceList) func(p *corev1.Pod) { + return func(p *corev1.Pod) { + p.Spec.Containers[0].Resources.Limits = list + } } // BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough