mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2025-03-31 04:04:51 +00:00
e2e: simplify sleeper pod
Make it more flexiable by allowing modifying both CPU and memory values, using functional options Signed-off-by: Talor Itzhak <titzhak@redhat.com>
This commit is contained in:
parent
9c725c378f
commit
0a06562930
2 changed files with 28 additions and 15 deletions
|
@ -19,6 +19,7 @@ package e2e
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
|
@ -175,7 +176,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
||||||
By("getting the initial topology information")
|
By("getting the initial topology information")
|
||||||
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
||||||
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
|
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
|
||||||
sleeperPod := testpod.GuaranteedSleeper("500m")
|
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
|
||||||
|
corev1.ResourceList{
|
||||||
|
corev1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
// any random reasonable amount is fine
|
||||||
|
corev1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
}))
|
||||||
|
|
||||||
podMap := make(map[string]*corev1.Pod)
|
podMap := make(map[string]*corev1.Pod)
|
||||||
pod := f.PodClient().CreateSync(sleeperPod)
|
pod := f.PodClient().CreateSync(sleeperPod)
|
||||||
|
@ -221,7 +227,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
||||||
By("getting the initial topology information")
|
By("getting the initial topology information")
|
||||||
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
||||||
By("creating a pod consuming exclusive CPUs")
|
By("creating a pod consuming exclusive CPUs")
|
||||||
sleeperPod := testpod.GuaranteedSleeper("1000m")
|
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
|
||||||
|
corev1.ResourceList{
|
||||||
|
corev1.ResourceCPU: resource.MustParse("1000m"),
|
||||||
|
// any random reasonable amount is fine
|
||||||
|
corev1.ResourceMemory: resource.MustParse("100Mi"),
|
||||||
|
}))
|
||||||
// in case there is more than a single node in the cluster
|
// in case there is more than a single node in the cluster
|
||||||
// we need to set the node name, so we'll have certainty about
|
// we need to set the node name, so we'll have certainty about
|
||||||
// which node we need to examine
|
// which node we need to examine
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
@ -45,8 +44,8 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
|
// GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
|
||||||
func GuaranteedSleeper(cpuLimit string) *corev1.Pod {
|
func GuaranteedSleeper(opts ...func(pod *corev1.Pod)) *corev1.Pod {
|
||||||
return &corev1.Pod{
|
p := &corev1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "sleeper-gu-pod",
|
Name: "sleeper-gu-pod",
|
||||||
},
|
},
|
||||||
|
@ -54,20 +53,23 @@ func GuaranteedSleeper(cpuLimit string) *corev1.Pod {
|
||||||
RestartPolicy: corev1.RestartPolicyNever,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: "sleeper-gu-cnt",
|
Name: "sleeper-gu-cnt",
|
||||||
Image: PauseImage,
|
Image: PauseImage,
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: corev1.ResourceRequirements{},
|
||||||
Limits: corev1.ResourceList{
|
|
||||||
// we use 1 core because that's the minimal meaningful quantity
|
|
||||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(cpuLimit),
|
|
||||||
// any random reasonable amount is fine
|
|
||||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(p)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithLimits(list corev1.ResourceList) func(p *corev1.Pod) {
|
||||||
|
return func(p *corev1.Pod) {
|
||||||
|
p.Spec.Containers[0].Resources.Limits = list
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough
|
// BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough
|
||||||
|
|
Loading…
Add table
Reference in a new issue