1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-05 08:17:04 +00:00

test/e2e: adapt tests to updates in k8s e2e-framework

Add context to functions that now require it. Also, replace the
deprecated wait.Poll* calls with wait.PollUntilContextTimeout.

(cherry picked from commit 87371e2df0)
This commit is contained in:
Markus Lehtonen 2023-04-18 15:47:20 +03:00
parent 7c08f44ac7
commit f64467be78
5 changed files with 22 additions and 22 deletions

View file

@ -237,14 +237,14 @@ var _ = SIGDescribe("NFD master and worker", func() {
testpod.SpecWithContainerImage(dockerImage()),
)...)
masterPod := e2epod.NewPodClient(f).CreateSync(testpod.NFDMaster(podSpecOpts...))
masterPod := e2epod.NewPodClient(f).CreateSync(ctx, testpod.NFDMaster(podSpecOpts...))
// Create nfd-master service
nfdSvc, err := testutils.CreateService(ctx, f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-master pod to be running")
Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(HaveOccurred())
Expect(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(HaveOccurred())
By("Verifying the node where nfd-master is running")
// Get updated masterPod object (we want to know where it was scheduled)
@ -256,7 +256,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
Expect(masterPodNode.Annotations).To(HaveKey(nfdv1alpha1.AnnotationNs + "/master.version"))
By("Waiting for the nfd-master service to be up")
Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
Expect(e2enetwork.WaitForService(ctx, f.ClientSet, f.Namespace.Name, nfdSvc.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
})
AfterEach(func(ctx context.Context) {
@ -290,7 +290,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-worker pod to succeed")
Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.Name, f.Namespace.Name)).NotTo(HaveOccurred())
Expect(e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, workerPod.Name, f.Namespace.Name)).NotTo(HaveOccurred())
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, workerPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
@ -597,7 +597,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
)).NotTo(HaveOccurred())
By("Creating extra namespace")
extraNs, err := f.CreateNamespace("node-feature-discvery-extra-ns", nil)
extraNs, err := f.CreateNamespace(ctx, "node-feature-discvery-extra-ns", nil)
Expect(err).NotTo(HaveOccurred())
By("Create NodeFeature object in the extra namespace")

View file

@ -91,7 +91,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
topologyUpdaterNode, err = f.ClientSet.CoreV1().Nodes().Get(ctx, pods.Items[0].Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
kubeletConfig, err = kubelet.GetCurrentKubeletConfig(topologyUpdaterNode.Name, "", true)
kubeletConfig, err = kubelet.GetCurrentKubeletConfig(ctx, topologyUpdaterNode.Name, "", true, false)
Expect(err).NotTo(HaveOccurred())
workerNodes, err = testutils.GetWorkerNodes(ctx, f)
@ -130,9 +130,9 @@ var _ = SIGDescribe("NFD topology updater", func() {
sleeperPod := testpod.BestEffortSleeper()
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
@ -175,9 +175,9 @@ var _ = SIGDescribe("NFD topology updater", func() {
}))
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
@ -230,9 +230,9 @@ var _ = SIGDescribe("NFD topology updater", func() {
sleeperPod.Spec.NodeName = topologyUpdaterNode.Name
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha2.NodeResourceTopology
@ -292,9 +292,9 @@ var _ = SIGDescribe("NFD topology updater", func() {
sleeperPod.Spec.NodeName = topologyUpdaterNode.Name
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
By("checking initial CR created")
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
@ -312,7 +312,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
// which node we need to examine
sleeperPod2.Spec.NodeName = topologyUpdaterNode.Name
sleeperPod2.Name = sleeperPod2.Name + "2"
pod2 := e2epod.NewPodClient(f).CreateSync(sleeperPod2)
pod2 := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod2)
podMap[pod.Name] = pod2
By("checking the changes in the updated topology")

View file

@ -54,7 +54,7 @@ func CreateNfdCRDs(ctx context.Context, cli extclient.Interface) ([]*apiextensio
return nil, fmt.Errorf("failed to delete %q CRD: %w", crd.Name, err)
} else if err == nil {
// Wait for CRD deletion to complete before trying to re-create it
err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, false, func(ctx context.Context) (bool, error) {
_, err = cli.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{})
if err == nil {
return false, nil

View file

@ -91,7 +91,7 @@ func CreateNodeResourceTopologies(ctx context.Context, extClient extclient.Inter
}
// It takes time for the delete operation, wait until the CRD completely gone
if err = wait.PollImmediate(5*time.Second, 1*time.Minute, func() (bool, error) {
if err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
_, err = extClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{})
if err == nil {
return false, nil

View file

@ -93,7 +93,7 @@ func BestEffortSleeper() *corev1.Pod {
}
// DeleteAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends.
func DeleteAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
func DeleteAsync(ctx context.Context, f *framework.Framework, podMap map[string]*corev1.Pod) {
var wg sync.WaitGroup
for _, pod := range podMap {
wg.Add(1)
@ -101,19 +101,19 @@ func DeleteAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
DeleteSyncByName(f, podName)
DeleteSyncByName(ctx, f, podName)
}(pod.Namespace, pod.Name)
}
wg.Wait()
}
// DeleteSyncByName deletes the pod identified by `podName` in the current namespace
func DeleteSyncByName(f *framework.Framework, podName string) {
func DeleteSyncByName(ctx context.Context, f *framework.Framework, podName string) {
gp := int64(0)
delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
e2epod.NewPodClient(f).DeleteSync(podName, delOpts, e2epod.DefaultPodDeletionTimeout)
e2epod.NewPodClient(f).DeleteSync(ctx, podName, delOpts, e2epod.DefaultPodDeletionTimeout)
}
type SpecOption func(spec *corev1.PodSpec)
@ -468,7 +468,7 @@ func WaitForReady(ctx context.Context, c clientset.Interface, ns, name string, m
const poll = 2 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
return wait.PollUntilContextTimeout(ctx, poll, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
return false, nil