1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-28 18:57:10 +00:00

e2e: simplify sleeper pod

Make it more flexiable by allowing modifying both
CPU and memory values, using functional options

Signed-off-by: Talor Itzhak <titzhak@redhat.com>
This commit is contained in:
Talor Itzhak 2022-11-28 20:22:56 +02:00
parent 9c725c378f
commit 0a06562930
2 changed files with 28 additions and 15 deletions
test/e2e

View file

@ -19,6 +19,7 @@ package e2e
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"time"
. "github.com/onsi/ginkgo/v2"
@ -175,7 +176,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
sleeperPod := testpod.GuaranteedSleeper("500m")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
// any random reasonable amount is fine
corev1.ResourceMemory: resource.MustParse("100Mi"),
}))
podMap := make(map[string]*corev1.Pod)
pod := f.PodClient().CreateSync(sleeperPod)
@ -221,7 +227,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming exclusive CPUs")
sleeperPod := testpod.GuaranteedSleeper("1000m")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1000m"),
// any random reasonable amount is fine
corev1.ResourceMemory: resource.MustParse("100Mi"),
}))
// in case there is more than a single node in the cluster
// we need to set the node name, so we'll have certainty about
// which node we need to examine

View file

@ -25,7 +25,6 @@ import (
"github.com/onsi/ginkgo/v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/uuid"
@ -45,8 +44,8 @@ const (
)
// GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
func GuaranteedSleeper(cpuLimit string) *corev1.Pod {
return &corev1.Pod{
func GuaranteedSleeper(opts ...func(pod *corev1.Pod)) *corev1.Pod {
p := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "sleeper-gu-pod",
},
@ -54,20 +53,23 @@ func GuaranteedSleeper(cpuLimit string) *corev1.Pod {
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Name: "sleeper-gu-cnt",
Image: PauseImage,
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
// we use 1 core because that's the minimal meaningful quantity
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(cpuLimit),
// any random reasonable amount is fine
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"),
},
},
Name: "sleeper-gu-cnt",
Image: PauseImage,
Resources: corev1.ResourceRequirements{},
},
},
},
}
for _, o := range opts {
o(p)
}
return p
}
func WithLimits(list corev1.ResourceList) func(p *corev1.Pod) {
return func(p *corev1.Pod) {
p.Spec.Containers[0].Resources.Limits = list
}
}
// BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough