From 6364803b0c3df67b7f6562c9edee7dc58ec2eca8 Mon Sep 17 00:00:00 2001 From: Talor Itzhak Date: Thu, 24 Nov 2022 12:59:38 +0200 Subject: [PATCH 1/3] e2e: move pod utils to a seperate package By moving those utils in to a seperate package, we can make the functions names shorter and clearer. For example, instead of: ``` testutils.NFDWorkerPod(opts...) testutils.NFDMasterPod(opts...) testutils.SpecWithContainerImage(...) ``` we'll have: ``` testpod.NFDWorker(opts...) testpod.NFDMaster(opts...) testpod.SpecWithContainerImage(...) ``` It will also make the package more isolated and portable. Signed-off-by: Talor Itzhak --- test/e2e/node_feature_discovery.go | 42 +++++++++--------- test/e2e/topology_updater.go | 31 +++++++------- test/e2e/utils/{ => pod}/pod.go | 68 +++++++++++++++--------------- 3 files changed, 73 insertions(+), 68 deletions(-) rename test/e2e/utils/{ => pod}/pod.go (83%) diff --git a/test/e2e/node_feature_discovery.go b/test/e2e/node_feature_discovery.go index 93fcefdd2..78ec5b8c3 100644 --- a/test/e2e/node_feature_discovery.go +++ b/test/e2e/node_feature_discovery.go @@ -37,10 +37,12 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + nfdv1alpha1 "sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1" nfdclient "sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned" "sigs.k8s.io/node-feature-discovery/source/custom" testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils" + testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod" ) var ( @@ -111,8 +113,8 @@ var _ = SIGDescribe("Node Feature Discovery", func() { // Launch nfd-master By("Creating nfd master pod and nfd-master service") - imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)) - masterPod = f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt)) + imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)) + masterPod = f.PodClient().CreateSync(testpod.NFDMaster(imageOpt)) // Create nfd-master service nfdSvc, err := testutils.CreateService(f.ClientSet, f.Namespace.Name) @@ -154,11 +156,11 @@ var _ = SIGDescribe("Node Feature Discovery", func() { // Launch nfd-worker By("Creating a nfd worker pod") - podSpecOpts := []testutils.PodSpecOption{ - testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), - testutils.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"), + podSpecOpts := []testpod.SpecOption{ + testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), + testpod.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"), } - workerPod := testutils.NFDWorkerPod(podSpecOpts...) + workerPod := testpod.NFDWorker(podSpecOpts...) workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -204,13 +206,13 @@ var _ = SIGDescribe("Node Feature Discovery", func() { fConf := cfg.DefaultFeatures By("Creating nfd-worker daemonset") - podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))} - workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...) + podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))} + workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...) workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("Waiting for daemonset pods to be ready") - Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) + Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) By("Getting node objects") nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) @@ -334,18 +336,18 @@ var _ = SIGDescribe("Node Feature Discovery", func() { Expect(err).NotTo(HaveOccurred()) By("Creating nfd-worker daemonset with configmap mounted") - podSpecOpts := []testutils.PodSpecOption{ - testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), - testutils.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")), - testutils.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")), + podSpecOpts := []testpod.SpecOption{ + testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), + testpod.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")), + testpod.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")), } - workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...) + workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...) workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("Waiting for daemonset pods to be ready") - Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) + Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) By("Getting target node and checking labels") targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), targetNodeName, metav1.GetOptions{}) @@ -417,16 +419,16 @@ core: Expect(err).NotTo(HaveOccurred()) By("Creating nfd-worker daemonset") - podSpecOpts := []testutils.PodSpecOption{ - testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), - testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"), + podSpecOpts := []testpod.SpecOption{ + testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), + testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"), } - workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...) + workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...) workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("Waiting for daemonset pods to be ready") - Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) + Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) expected := map[string]string{ "feature.node.kubernetes.io/e2e-flag-test-1": "true", diff --git a/test/e2e/topology_updater.go b/test/e2e/topology_updater.go index f8b1954f3..d67cbf185 100644 --- a/test/e2e/topology_updater.go +++ b/test/e2e/topology_updater.go @@ -39,6 +39,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils" + testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod" ) var _ = SIGDescribe("Node Feature Discovery topology updater", func() { @@ -71,8 +72,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred()) - imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)) - f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt)) + imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)) + f.PodClient().CreateSync(testpod.NFDMaster(imageOpt)) // Create nfd-master service masterService, err := testutils.CreateService(f.ClientSet, f.Namespace.Name) @@ -86,7 +87,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { Expect(err).NotTo(HaveOccurred()) By("Waiting for daemonset pods to be ready") - Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) + Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred()) label := labels.SelectorFromSet(map[string]string{"name": topologyUpdaterDaemonSet.Spec.Template.Labels["name"]}) pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label.String()}) @@ -119,8 +120,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { kcfg := cfg.GetKubeletConfig() By(fmt.Sprintf("Using config (%#v)", kcfg)) - podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))} - topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...) + podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))} + topologyUpdaterDaemonSet = testpod.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...) }) It("should fill the node resource topologies CR with the data", func() { @@ -133,12 +134,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By("getting the initial topology information") initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name) By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)") - sleeperPod := testutils.BestEffortSleeperPod() + sleeperPod := testpod.BestEffortSleeper() podMap := make(map[string]*corev1.Pod) pod := f.PodClient().CreateSync(sleeperPod) podMap[pod.Name] = pod - defer testutils.DeletePodsAsync(f, podMap) + defer testpod.DeleteAsync(f, podMap) cooldown := 30 * time.Second By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown)) @@ -173,12 +174,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By("getting the initial topology information") initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name) By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)") - sleeperPod := testutils.GuaranteedSleeperPod("500m") + sleeperPod := testpod.GuaranteedSleeper("500m") podMap := make(map[string]*corev1.Pod) pod := f.PodClient().CreateSync(sleeperPod) podMap[pod.Name] = pod - defer testutils.DeletePodsAsync(f, podMap) + defer testpod.DeleteAsync(f, podMap) cooldown := 30 * time.Second By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown)) @@ -219,7 +220,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By("getting the initial topology information") initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name) By("creating a pod consuming exclusive CPUs") - sleeperPod := testutils.GuaranteedSleeperPod("1000m") + sleeperPod := testpod.GuaranteedSleeper("1000m") // in case there is more than a single node in the cluster // we need to set the node name, so we'll have certainty about // which node we need to examine @@ -228,7 +229,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { podMap := make(map[string]*corev1.Pod) pod := f.PodClient().CreateSync(sleeperPod) podMap[pod.Name] = pod - defer testutils.DeletePodsAsync(f, podMap) + defer testpod.DeleteAsync(f, podMap) By("checking the changes in the updated topology") var finalNodeTopo *v1alpha1.NodeResourceTopology @@ -274,11 +275,11 @@ excludeList: kcfg := cfg.GetKubeletConfig() By(fmt.Sprintf("Using config (%#v)", kcfg)) - podSpecOpts := []testutils.PodSpecOption{ - testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), - testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"), + podSpecOpts := []testpod.SpecOption{ + testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), + testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"), } - topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...) + topologyUpdaterDaemonSet = testpod.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...) }) It("noderesourcetopology should not advertise the memory resource", func() { diff --git a/test/e2e/utils/pod.go b/test/e2e/utils/pod/pod.go similarity index 83% rename from test/e2e/utils/pod.go rename to test/e2e/utils/pod/pod.go index 6741f56b6..2751fa93f 100644 --- a/test/e2e/utils/pod.go +++ b/test/e2e/utils/pod/pod.go @@ -1,5 +1,5 @@ /* -Copyright 2018-2022 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package pod import ( "context" @@ -35,6 +35,8 @@ import ( "k8s.io/kubectl/pkg/util/podutils" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/utils/pointer" + + "sigs.k8s.io/node-feature-discovery/test/e2e/utils" ) var pullIfNotPresent = flag.Bool("nfd.pull-if-not-present", false, "Pull Images if not present - not always") @@ -43,8 +45,8 @@ const ( PauseImage = "registry.k8s.io/pause" ) -// GuarenteedSleeperPod makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs. -func GuaranteedSleeperPod(cpuLimit string) *corev1.Pod { +// GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs. +func GuaranteedSleeper(cpuLimit string) *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "sleeper-gu-pod", @@ -69,8 +71,8 @@ func GuaranteedSleeperPod(cpuLimit string) *corev1.Pod { } } -// BestEffortSleeperPod makes a Best Effort QoS class Pod object which sleeps long enough -func BestEffortSleeperPod() *corev1.Pod { +// BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough +func BestEffortSleeper() *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "sleeper-be-pod", @@ -87,8 +89,8 @@ func BestEffortSleeperPod() *corev1.Pod { } } -// DeletePodsAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends. -func DeletePodsAsync(f *framework.Framework, podMap map[string]*corev1.Pod) { +// DeleteAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends. +func DeleteAsync(f *framework.Framework, podMap map[string]*corev1.Pod) { var wg sync.WaitGroup for _, pod := range podMap { wg.Add(1) @@ -96,14 +98,14 @@ func DeletePodsAsync(f *framework.Framework, podMap map[string]*corev1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - DeletePodSyncByName(f, podName) + DeleteSyncByName(f, podName) }(pod.Namespace, pod.Name) } wg.Wait() } -// DeletePodSyncByName deletes the pod identified by `podName` in the current namespace -func DeletePodSyncByName(f *framework.Framework, podName string) { +// DeleteSyncByName deletes the pod identified by `podName` in the current namespace +func DeleteSyncByName(f *framework.Framework, podName string) { gp := int64(0) delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, @@ -111,10 +113,10 @@ func DeletePodSyncByName(f *framework.Framework, podName string) { f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout) } -type PodSpecOption func(spec *corev1.PodSpec) +type SpecOption func(spec *corev1.PodSpec) -// NFDMasterPod provide NFD master pod definition -func NFDMasterPod(opts ...PodSpecOption) *corev1.Pod { +// NFDMaster provide NFD master pod definition +func NFDMaster(opts ...SpecOption) *corev1.Pod { yes := true no := false p := &corev1.Pod{ @@ -163,13 +165,13 @@ func NFDMasterPod(opts ...PodSpecOption) *corev1.Pod { return p } -// NFDWorkerPod provides NFD worker pod definition -func NFDWorkerPod(opts ...PodSpecOption) *corev1.Pod { +// NFDWorker provides NFD worker pod definition +func NFDWorker(opts ...SpecOption) *corev1.Pod { p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "nfd-worker-" + string(uuid.NewUUID()), }, - Spec: *nfdWorkerPodSpec(opts...), + Spec: *nfdWorkerSpec(opts...), } p.Spec.RestartPolicy = corev1.RestartPolicyNever @@ -178,17 +180,17 @@ func NFDWorkerPod(opts ...PodSpecOption) *corev1.Pod { } // NFDWorkerDaemonSet provides the NFD daemon set worker definition -func NFDWorkerDaemonSet(opts ...PodSpecOption) *appsv1.DaemonSet { - return newDaemonSet("nfd-worker", nfdWorkerPodSpec(opts...)) +func NFDWorkerDaemonSet(opts ...SpecOption) *appsv1.DaemonSet { + return newDaemonSet("nfd-worker", nfdWorkerSpec(opts...)) } // NFDTopologyUpdaterDaemonSet provides the NFD daemon set topology updater -func NFDTopologyUpdaterDaemonSet(kc KubeletConfig, opts ...PodSpecOption) *appsv1.DaemonSet { - return newDaemonSet("nfd-topology-updater", nfdTopologyUpdaterPodSpec(kc, opts...)) +func NFDTopologyUpdaterDaemonSet(kc utils.KubeletConfig, opts ...SpecOption) *appsv1.DaemonSet { + return newDaemonSet("nfd-topology-updater", nfdTopologyUpdaterSpec(kc, opts...)) } -// SpecWithContainerImage returns a PodSpecOption that sets the image used by the first container. -func SpecWithContainerImage(image string) PodSpecOption { +// SpecWithContainerImage returns a SpecOption that sets the image used by the first container. +func SpecWithContainerImage(image string) SpecOption { return func(spec *corev1.PodSpec) { // NOTE: we might want to make the container number a parameter cnt := &spec.Containers[0] @@ -196,8 +198,8 @@ func SpecWithContainerImage(image string) PodSpecOption { } } -// SpecWithContainerExtraArgs returns a PodSpecOption that adds extra args to the first container. -func SpecWithContainerExtraArgs(args ...string) PodSpecOption { +// SpecWithContainerExtraArgs returns a SpecOption that adds extra args to the first container. +func SpecWithContainerExtraArgs(args ...string) SpecOption { return func(spec *corev1.PodSpec) { // NOTE: we might want to make the container number a parameter cnt := &spec.Containers[0] @@ -205,9 +207,9 @@ func SpecWithContainerExtraArgs(args ...string) PodSpecOption { } } -// SpecWithMasterNodeSelector returns a PodSpecOption that modifies the pod to +// SpecWithMasterNodeSelector returns a SpecOption that modifies the pod to // be run on a control plane node of the cluster. -func SpecWithMasterNodeSelector(args ...string) PodSpecOption { +func SpecWithMasterNodeSelector(args ...string) SpecOption { return func(spec *corev1.PodSpec) { spec.NodeSelector["node-role.kubernetes.io/control-plane"] = "" spec.Tolerations = append(spec.Tolerations, @@ -220,8 +222,8 @@ func SpecWithMasterNodeSelector(args ...string) PodSpecOption { } } -// SpecWithConfigMap returns a PodSpecOption that mounts a configmap to the first container. -func SpecWithConfigMap(name, mountPath string) PodSpecOption { +// SpecWithConfigMap returns a SpecOption that mounts a configmap to the first container. +func SpecWithConfigMap(name, mountPath string) SpecOption { return func(spec *corev1.PodSpec) { spec.Volumes = append(spec.Volumes, corev1.Volume{ @@ -265,7 +267,7 @@ func newDaemonSet(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet { } } -func nfdWorkerPodSpec(opts ...PodSpecOption) *corev1.PodSpec { +func nfdWorkerSpec(opts ...SpecOption) *corev1.PodSpec { yes := true no := false p := &corev1.PodSpec{ @@ -380,7 +382,7 @@ func nfdWorkerPodSpec(opts ...PodSpecOption) *corev1.PodSpec { return p } -func nfdTopologyUpdaterPodSpec(kc KubeletConfig, opts ...PodSpecOption) *corev1.PodSpec { +func nfdTopologyUpdaterSpec(kc utils.KubeletConfig, opts ...SpecOption) *corev1.PodSpec { p := &corev1.PodSpec{ Containers: []corev1.Container{ { @@ -472,10 +474,10 @@ func newHostPathType(typ corev1.HostPathType) *corev1.HostPathType { return hostPathType } -// WaitForPodsReady waits for the pods to become ready. +// WaitForReady waits for the pods to become ready. // NOTE: copied from k8s v1.22 after which is was removed from there. // Convenient for checking that all pods of a daemonset are ready. -func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { +func WaitForReady(c clientset.Interface, ns, name string, minReadySeconds int) error { const poll = 2 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} From 9c725c378fbda2a85a40e110f656105bea8cfaf0 Mon Sep 17 00:00:00 2001 From: Talor Itzhak Date: Thu, 24 Nov 2022 13:23:34 +0200 Subject: [PATCH 2/3] e2e: separate daemonset functions from pod The new package should provide pod-related utilities, hence let's move all the daemonset-related utilities to their own package as well. Signed-off-by: Talor Itzhak --- test/e2e/node_feature_discovery.go | 7 ++-- test/e2e/topology_updater.go | 5 ++- test/e2e/utils/daemonset/daemonset.go | 58 +++++++++++++++++++++++++++ test/e2e/utils/pod/pod.go | 34 +--------------- 4 files changed, 66 insertions(+), 38 deletions(-) create mode 100644 test/e2e/utils/daemonset/daemonset.go diff --git a/test/e2e/node_feature_discovery.go b/test/e2e/node_feature_discovery.go index 78ec5b8c3..8ea5647ca 100644 --- a/test/e2e/node_feature_discovery.go +++ b/test/e2e/node_feature_discovery.go @@ -42,6 +42,7 @@ import ( nfdclient "sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned" "sigs.k8s.io/node-feature-discovery/source/custom" testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils" + testds "sigs.k8s.io/node-feature-discovery/test/e2e/utils/daemonset" testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod" ) @@ -207,7 +208,7 @@ var _ = SIGDescribe("Node Feature Discovery", func() { By("Creating nfd-worker daemonset") podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))} - workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...) + workerDS := testds.NFDWorker(podSpecOpts...) workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -341,7 +342,7 @@ var _ = SIGDescribe("Node Feature Discovery", func() { testpod.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")), testpod.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")), } - workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...) + workerDS := testds.NFDWorker(podSpecOpts...) workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -423,7 +424,7 @@ core: testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"), } - workerDS := testpod.NFDWorkerDaemonSet(podSpecOpts...) + workerDS := testds.NFDWorker(podSpecOpts...) workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/topology_updater.go b/test/e2e/topology_updater.go index d67cbf185..043983a99 100644 --- a/test/e2e/topology_updater.go +++ b/test/e2e/topology_updater.go @@ -39,6 +39,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils" + testds "sigs.k8s.io/node-feature-discovery/test/e2e/utils/daemonset" testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod" ) @@ -121,7 +122,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By(fmt.Sprintf("Using config (%#v)", kcfg)) podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))} - topologyUpdaterDaemonSet = testpod.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...) + topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...) }) It("should fill the node resource topologies CR with the data", func() { @@ -279,7 +280,7 @@ excludeList: testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)), testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"), } - topologyUpdaterDaemonSet = testpod.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...) + topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...) }) It("noderesourcetopology should not advertise the memory resource", func() { diff --git a/test/e2e/utils/daemonset/daemonset.go b/test/e2e/utils/daemonset/daemonset.go new file mode 100644 index 000000000..920f0de0c --- /dev/null +++ b/test/e2e/utils/daemonset/daemonset.go @@ -0,0 +1,58 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package daemonset + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + + "sigs.k8s.io/node-feature-discovery/test/e2e/utils" + "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod" +) + +// NFDWorker provides the NFD daemon set worker definition +func NFDWorker(opts ...pod.SpecOption) *appsv1.DaemonSet { + return new("nfd-worker", &pod.NFDWorker(opts...).Spec) +} + +// NFDTopologyUpdater provides the NFD daemon set topology updater +func NFDTopologyUpdater(kc utils.KubeletConfig, opts ...pod.SpecOption) *appsv1.DaemonSet { + return new("nfd-topology-updater", pod.NFDTopologyUpdaterSpec(kc, opts...)) +} + +// new provide the new daemon set +func new(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet { + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name + "-" + string(uuid.NewUUID()), + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"name": name}, + }, + Spec: *podSpec, + }, + MinReadySeconds: 5, + }, + } +} diff --git a/test/e2e/utils/pod/pod.go b/test/e2e/utils/pod/pod.go index 2751fa93f..75d39aeaa 100644 --- a/test/e2e/utils/pod/pod.go +++ b/test/e2e/utils/pod/pod.go @@ -24,7 +24,6 @@ import ( "github.com/onsi/ginkgo/v2" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -179,16 +178,6 @@ func NFDWorker(opts ...SpecOption) *corev1.Pod { return p } -// NFDWorkerDaemonSet provides the NFD daemon set worker definition -func NFDWorkerDaemonSet(opts ...SpecOption) *appsv1.DaemonSet { - return newDaemonSet("nfd-worker", nfdWorkerSpec(opts...)) -} - -// NFDTopologyUpdaterDaemonSet provides the NFD daemon set topology updater -func NFDTopologyUpdaterDaemonSet(kc utils.KubeletConfig, opts ...SpecOption) *appsv1.DaemonSet { - return newDaemonSet("nfd-topology-updater", nfdTopologyUpdaterSpec(kc, opts...)) -} - // SpecWithContainerImage returns a SpecOption that sets the image used by the first container. func SpecWithContainerImage(image string) SpecOption { return func(spec *corev1.PodSpec) { @@ -246,27 +235,6 @@ func SpecWithConfigMap(name, mountPath string) SpecOption { } } -// newDaemonSet provide the new daemon set -func newDaemonSet(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet { - return &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name + "-" + string(uuid.NewUUID()), - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"name": name}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"name": name}, - }, - Spec: *podSpec, - }, - MinReadySeconds: 5, - }, - } -} - func nfdWorkerSpec(opts ...SpecOption) *corev1.PodSpec { yes := true no := false @@ -382,7 +350,7 @@ func nfdWorkerSpec(opts ...SpecOption) *corev1.PodSpec { return p } -func nfdTopologyUpdaterSpec(kc utils.KubeletConfig, opts ...SpecOption) *corev1.PodSpec { +func NFDTopologyUpdaterSpec(kc utils.KubeletConfig, opts ...SpecOption) *corev1.PodSpec { p := &corev1.PodSpec{ Containers: []corev1.Container{ { From 0a065629306d538442316ba7340618354703f4e4 Mon Sep 17 00:00:00 2001 From: Talor Itzhak Date: Mon, 28 Nov 2022 20:22:56 +0200 Subject: [PATCH 3/3] e2e: simplify sleeper pod Make it more flexiable by allowing modifying both CPU and memory values, using functional options Signed-off-by: Talor Itzhak --- test/e2e/topology_updater.go | 15 +++++++++++++-- test/e2e/utils/pod/pod.go | 28 +++++++++++++++------------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/test/e2e/topology_updater.go b/test/e2e/topology_updater.go index 043983a99..05e2b65f2 100644 --- a/test/e2e/topology_updater.go +++ b/test/e2e/topology_updater.go @@ -19,6 +19,7 @@ package e2e import ( "context" "fmt" + "k8s.io/apimachinery/pkg/api/resource" "time" . "github.com/onsi/ginkgo/v2" @@ -175,7 +176,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By("getting the initial topology information") initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name) By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)") - sleeperPod := testpod.GuaranteedSleeper("500m") + sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits( + corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + // any random reasonable amount is fine + corev1.ResourceMemory: resource.MustParse("100Mi"), + })) podMap := make(map[string]*corev1.Pod) pod := f.PodClient().CreateSync(sleeperPod) @@ -221,7 +227,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() { By("getting the initial topology information") initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name) By("creating a pod consuming exclusive CPUs") - sleeperPod := testpod.GuaranteedSleeper("1000m") + sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits( + corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + // any random reasonable amount is fine + corev1.ResourceMemory: resource.MustParse("100Mi"), + })) // in case there is more than a single node in the cluster // we need to set the node name, so we'll have certainty about // which node we need to examine diff --git a/test/e2e/utils/pod/pod.go b/test/e2e/utils/pod/pod.go index 75d39aeaa..698d94b4d 100644 --- a/test/e2e/utils/pod/pod.go +++ b/test/e2e/utils/pod/pod.go @@ -25,7 +25,6 @@ import ( "github.com/onsi/ginkgo/v2" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" @@ -45,8 +44,8 @@ const ( ) // GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs. -func GuaranteedSleeper(cpuLimit string) *corev1.Pod { - return &corev1.Pod{ +func GuaranteedSleeper(opts ...func(pod *corev1.Pod)) *corev1.Pod { + p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "sleeper-gu-pod", }, @@ -54,20 +53,23 @@ func GuaranteedSleeper(cpuLimit string) *corev1.Pod { RestartPolicy: corev1.RestartPolicyNever, Containers: []corev1.Container{ { - Name: "sleeper-gu-cnt", - Image: PauseImage, - Resources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - // we use 1 core because that's the minimal meaningful quantity - corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(cpuLimit), - // any random reasonable amount is fine - corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"), - }, - }, + Name: "sleeper-gu-cnt", + Image: PauseImage, + Resources: corev1.ResourceRequirements{}, }, }, }, } + for _, o := range opts { + o(p) + } + return p +} + +func WithLimits(list corev1.ResourceList) func(p *corev1.Pod) { + return func(p *corev1.Pod) { + p.Spec.Containers[0].Resources.Limits = list + } } // BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough