1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-04-15 00:36:23 +00:00

e2e: move pod utils to a seperate package

By moving those utils in to a seperate package,
we can make the functions names shorter and clearer.

For example, instead of:
```
testutils.NFDWorkerPod(opts...)
testutils.NFDMasterPod(opts...)
testutils.SpecWithContainerImage(...)
```
we'll have:
```
testpod.NFDWorker(opts...)
testpod.NFDMaster(opts...)
testpod.SpecWithContainerImage(...)
```

It will also make the package more isolated and portable.

Signed-off-by: Talor Itzhak <titzhak@redhat.com>
This commit is contained in:
Talor Itzhak 2022-11-24 12:59:38 +02:00
parent 87573b08ba
commit 6364803b0c
3 changed files with 73 additions and 68 deletions

View file

@ -37,10 +37,12 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
nfdv1alpha1 "sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1"
nfdclient "sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned"
"sigs.k8s.io/node-feature-discovery/source/custom"
testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
)
var (
@ -111,8 +113,8 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
// Launch nfd-master
By("Creating nfd master pod and nfd-master service")
imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
masterPod = f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
masterPod = f.PodClient().CreateSync(testpod.NFDMaster(imageOpt))
// Create nfd-master service
nfdSvc, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
@ -154,11 +156,11 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
// Launch nfd-worker
By("Creating a nfd worker pod")
podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
}
workerPod := testutils.NFDWorkerPod(podSpecOpts...)
workerPod := testpod.NFDWorker(podSpecOpts...)
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
@ -204,13 +206,13 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
fConf := cfg.DefaultFeatures
By("Creating nfd-worker daemonset")
podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
By("Getting node objects")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
@ -334,18 +336,18 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
Expect(err).NotTo(HaveOccurred())
By("Creating nfd-worker daemonset with configmap mounted")
podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")),
testutils.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")),
testpod.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")),
}
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
By("Getting target node and checking labels")
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), targetNodeName, metav1.GetOptions{})
@ -417,16 +419,16 @@ core:
Expect(err).NotTo(HaveOccurred())
By("Creating nfd-worker daemonset")
podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
}
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
expected := map[string]string{
"feature.node.kubernetes.io/e2e-flag-test-1": "true",

View file

@ -39,6 +39,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
)
var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
@ -71,8 +72,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
f.PodClient().CreateSync(testpod.NFDMaster(imageOpt))
// Create nfd-master service
masterService, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
@ -86,7 +87,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
Expect(err).NotTo(HaveOccurred())
By("Waiting for daemonset pods to be ready")
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
label := labels.SelectorFromSet(map[string]string{"name": topologyUpdaterDaemonSet.Spec.Template.Labels["name"]})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label.String()})
@ -119,8 +120,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
kcfg := cfg.GetKubeletConfig()
By(fmt.Sprintf("Using config (%#v)", kcfg))
podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
topologyUpdaterDaemonSet = testpod.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
})
It("should fill the node resource topologies CR with the data", func() {
@ -133,12 +134,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)")
sleeperPod := testutils.BestEffortSleeperPod()
sleeperPod := testpod.BestEffortSleeper()
podMap := make(map[string]*corev1.Pod)
pod := f.PodClient().CreateSync(sleeperPod)
podMap[pod.Name] = pod
defer testutils.DeletePodsAsync(f, podMap)
defer testpod.DeleteAsync(f, podMap)
cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
@ -173,12 +174,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
sleeperPod := testutils.GuaranteedSleeperPod("500m")
sleeperPod := testpod.GuaranteedSleeper("500m")
podMap := make(map[string]*corev1.Pod)
pod := f.PodClient().CreateSync(sleeperPod)
podMap[pod.Name] = pod
defer testutils.DeletePodsAsync(f, podMap)
defer testpod.DeleteAsync(f, podMap)
cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
@ -219,7 +220,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming exclusive CPUs")
sleeperPod := testutils.GuaranteedSleeperPod("1000m")
sleeperPod := testpod.GuaranteedSleeper("1000m")
// in case there is more than a single node in the cluster
// we need to set the node name, so we'll have certainty about
// which node we need to examine
@ -228,7 +229,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
podMap := make(map[string]*corev1.Pod)
pod := f.PodClient().CreateSync(sleeperPod)
podMap[pod.Name] = pod
defer testutils.DeletePodsAsync(f, podMap)
defer testpod.DeleteAsync(f, podMap)
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha1.NodeResourceTopology
@ -274,11 +275,11 @@ excludeList:
kcfg := cfg.GetKubeletConfig()
By(fmt.Sprintf("Using config (%#v)", kcfg))
podSpecOpts := []testutils.PodSpecOption{
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
podSpecOpts := []testpod.SpecOption{
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
}
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
topologyUpdaterDaemonSet = testpod.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
})
It("noderesourcetopology should not advertise the memory resource", func() {

View file

@ -1,5 +1,5 @@
/*
Copyright 2018-2022 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
package pod
import (
"context"
@ -35,6 +35,8 @@ import (
"k8s.io/kubectl/pkg/util/podutils"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/utils/pointer"
"sigs.k8s.io/node-feature-discovery/test/e2e/utils"
)
var pullIfNotPresent = flag.Bool("nfd.pull-if-not-present", false, "Pull Images if not present - not always")
@ -43,8 +45,8 @@ const (
PauseImage = "registry.k8s.io/pause"
)
// GuarenteedSleeperPod makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
func GuaranteedSleeperPod(cpuLimit string) *corev1.Pod {
// GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
func GuaranteedSleeper(cpuLimit string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "sleeper-gu-pod",
@ -69,8 +71,8 @@ func GuaranteedSleeperPod(cpuLimit string) *corev1.Pod {
}
}
// BestEffortSleeperPod makes a Best Effort QoS class Pod object which sleeps long enough
func BestEffortSleeperPod() *corev1.Pod {
// BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough
func BestEffortSleeper() *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "sleeper-be-pod",
@ -87,8 +89,8 @@ func BestEffortSleeperPod() *corev1.Pod {
}
}
// DeletePodsAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends.
func DeletePodsAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
// DeleteAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends.
func DeleteAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
var wg sync.WaitGroup
for _, pod := range podMap {
wg.Add(1)
@ -96,14 +98,14 @@ func DeletePodsAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
DeletePodSyncByName(f, podName)
DeleteSyncByName(f, podName)
}(pod.Namespace, pod.Name)
}
wg.Wait()
}
// DeletePodSyncByName deletes the pod identified by `podName` in the current namespace
func DeletePodSyncByName(f *framework.Framework, podName string) {
// DeleteSyncByName deletes the pod identified by `podName` in the current namespace
func DeleteSyncByName(f *framework.Framework, podName string) {
gp := int64(0)
delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
@ -111,10 +113,10 @@ func DeletePodSyncByName(f *framework.Framework, podName string) {
f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout)
}
type PodSpecOption func(spec *corev1.PodSpec)
type SpecOption func(spec *corev1.PodSpec)
// NFDMasterPod provide NFD master pod definition
func NFDMasterPod(opts ...PodSpecOption) *corev1.Pod {
// NFDMaster provide NFD master pod definition
func NFDMaster(opts ...SpecOption) *corev1.Pod {
yes := true
no := false
p := &corev1.Pod{
@ -163,13 +165,13 @@ func NFDMasterPod(opts ...PodSpecOption) *corev1.Pod {
return p
}
// NFDWorkerPod provides NFD worker pod definition
func NFDWorkerPod(opts ...PodSpecOption) *corev1.Pod {
// NFDWorker provides NFD worker pod definition
func NFDWorker(opts ...SpecOption) *corev1.Pod {
p := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-worker-" + string(uuid.NewUUID()),
},
Spec: *nfdWorkerPodSpec(opts...),
Spec: *nfdWorkerSpec(opts...),
}
p.Spec.RestartPolicy = corev1.RestartPolicyNever
@ -178,17 +180,17 @@ func NFDWorkerPod(opts ...PodSpecOption) *corev1.Pod {
}
// NFDWorkerDaemonSet provides the NFD daemon set worker definition
func NFDWorkerDaemonSet(opts ...PodSpecOption) *appsv1.DaemonSet {
return newDaemonSet("nfd-worker", nfdWorkerPodSpec(opts...))
func NFDWorkerDaemonSet(opts ...SpecOption) *appsv1.DaemonSet {
return newDaemonSet("nfd-worker", nfdWorkerSpec(opts...))
}
// NFDTopologyUpdaterDaemonSet provides the NFD daemon set topology updater
func NFDTopologyUpdaterDaemonSet(kc KubeletConfig, opts ...PodSpecOption) *appsv1.DaemonSet {
return newDaemonSet("nfd-topology-updater", nfdTopologyUpdaterPodSpec(kc, opts...))
func NFDTopologyUpdaterDaemonSet(kc utils.KubeletConfig, opts ...SpecOption) *appsv1.DaemonSet {
return newDaemonSet("nfd-topology-updater", nfdTopologyUpdaterSpec(kc, opts...))
}
// SpecWithContainerImage returns a PodSpecOption that sets the image used by the first container.
func SpecWithContainerImage(image string) PodSpecOption {
// SpecWithContainerImage returns a SpecOption that sets the image used by the first container.
func SpecWithContainerImage(image string) SpecOption {
return func(spec *corev1.PodSpec) {
// NOTE: we might want to make the container number a parameter
cnt := &spec.Containers[0]
@ -196,8 +198,8 @@ func SpecWithContainerImage(image string) PodSpecOption {
}
}
// SpecWithContainerExtraArgs returns a PodSpecOption that adds extra args to the first container.
func SpecWithContainerExtraArgs(args ...string) PodSpecOption {
// SpecWithContainerExtraArgs returns a SpecOption that adds extra args to the first container.
func SpecWithContainerExtraArgs(args ...string) SpecOption {
return func(spec *corev1.PodSpec) {
// NOTE: we might want to make the container number a parameter
cnt := &spec.Containers[0]
@ -205,9 +207,9 @@ func SpecWithContainerExtraArgs(args ...string) PodSpecOption {
}
}
// SpecWithMasterNodeSelector returns a PodSpecOption that modifies the pod to
// SpecWithMasterNodeSelector returns a SpecOption that modifies the pod to
// be run on a control plane node of the cluster.
func SpecWithMasterNodeSelector(args ...string) PodSpecOption {
func SpecWithMasterNodeSelector(args ...string) SpecOption {
return func(spec *corev1.PodSpec) {
spec.NodeSelector["node-role.kubernetes.io/control-plane"] = ""
spec.Tolerations = append(spec.Tolerations,
@ -220,8 +222,8 @@ func SpecWithMasterNodeSelector(args ...string) PodSpecOption {
}
}
// SpecWithConfigMap returns a PodSpecOption that mounts a configmap to the first container.
func SpecWithConfigMap(name, mountPath string) PodSpecOption {
// SpecWithConfigMap returns a SpecOption that mounts a configmap to the first container.
func SpecWithConfigMap(name, mountPath string) SpecOption {
return func(spec *corev1.PodSpec) {
spec.Volumes = append(spec.Volumes,
corev1.Volume{
@ -265,7 +267,7 @@ func newDaemonSet(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet {
}
}
func nfdWorkerPodSpec(opts ...PodSpecOption) *corev1.PodSpec {
func nfdWorkerSpec(opts ...SpecOption) *corev1.PodSpec {
yes := true
no := false
p := &corev1.PodSpec{
@ -380,7 +382,7 @@ func nfdWorkerPodSpec(opts ...PodSpecOption) *corev1.PodSpec {
return p
}
func nfdTopologyUpdaterPodSpec(kc KubeletConfig, opts ...PodSpecOption) *corev1.PodSpec {
func nfdTopologyUpdaterSpec(kc utils.KubeletConfig, opts ...SpecOption) *corev1.PodSpec {
p := &corev1.PodSpec{
Containers: []corev1.Container{
{
@ -472,10 +474,10 @@ func newHostPathType(typ corev1.HostPathType) *corev1.HostPathType {
return hostPathType
}
// WaitForPodsReady waits for the pods to become ready.
// WaitForReady waits for the pods to become ready.
// NOTE: copied from k8s v1.22 after which is was removed from there.
// Convenient for checking that all pods of a daemonset are ready.
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
func WaitForReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
const poll = 2 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}