mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2024-12-14 11:57:51 +00:00
Update e2e test to work with updated dependencies
Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
parent
6172c648e2
commit
f363ba0e92
4 changed files with 34 additions and 325 deletions
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package apihelper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
|
@ -55,7 +56,7 @@ func (h K8sHelpers) GetClient() (*k8sclient.Clientset, error) {
|
|||
|
||||
func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*api.Node, error) {
|
||||
// Get the node object using node name
|
||||
node, err := cli.CoreV1().Nodes().Get(nodeName, meta_v1.GetOptions{})
|
||||
node, err := cli.CoreV1().Nodes().Get(context.TODO(), nodeName, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -64,12 +65,12 @@ func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*api.Nod
|
|||
}
|
||||
|
||||
func (h K8sHelpers) GetNodes(cli *k8sclient.Clientset) (*api.NodeList, error) {
|
||||
return cli.CoreV1().Nodes().List(meta_v1.ListOptions{})
|
||||
return cli.CoreV1().Nodes().List(context.TODO(), meta_v1.ListOptions{})
|
||||
}
|
||||
|
||||
func (h K8sHelpers) UpdateNode(c *k8sclient.Clientset, n *api.Node) error {
|
||||
// Send the updated node to the apiserver.
|
||||
_, err := c.CoreV1().Nodes().Update(n)
|
||||
_, err := c.CoreV1().Nodes().Update(context.TODO(), n, meta_v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -81,7 +82,7 @@ func (h K8sHelpers) PatchStatus(c *k8sclient.Clientset, nodeName string, marshal
|
|||
// Send the updated node to the apiserver.
|
||||
patch, err := json.Marshal(marshalable)
|
||||
if err == nil {
|
||||
_, err = c.CoreV1().Nodes().Patch(nodeName, types.JSONPatchType, patch, "status")
|
||||
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.JSONPatchType, patch, meta_v1.PatchOptions{}, "status")
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
295
test/e2e/e2e.go
295
test/e2e/e2e.go
|
@ -1,295 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/component-base/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
e2ereporters "k8s.io/kubernetes/test/e2e/reporters"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
utilnet "k8s.io/utils/net"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
setupSuite()
|
||||
return nil
|
||||
}, func(data []byte) {
|
||||
// Run on all Ginkgo nodes
|
||||
setupSuitePerGinkgoNode()
|
||||
})
|
||||
|
||||
var _ = ginkgo.SynchronizedAfterSuite(func() {
|
||||
framework.CleanupSuite()
|
||||
}, func() {
|
||||
framework.AfterSuiteActions()
|
||||
})
|
||||
|
||||
// RunE2ETests checks configuration parameters (specified through flags) and then runs
|
||||
// E2E tests using the Ginkgo runner.
|
||||
// If a "report directory" is specified, one or more JUnit test reports will be
|
||||
// generated in this directory, and cluster logs will also be saved.
|
||||
// This function is called on each Ginkgo node in parallel mode.
|
||||
func RunE2ETests(t *testing.T) {
|
||||
runtimeutils.ReallyCrash = true
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
gomega.RegisterFailHandler(e2elog.Fail)
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
|
||||
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
|
||||
}
|
||||
|
||||
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
|
||||
var r []ginkgo.Reporter
|
||||
if framework.TestContext.ReportDir != "" {
|
||||
// TODO: we should probably only be trying to create this directory once
|
||||
// rather than once-per-Ginkgo-node.
|
||||
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
|
||||
klog.Errorf("Failed creating report directory: %v", err)
|
||||
} else {
|
||||
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
|
||||
}
|
||||
}
|
||||
|
||||
// Stream the progress to stdout and optionally a URL accepting progress updates.
|
||||
r = append(r, e2ereporters.NewProgressReporter(framework.TestContext.ProgressReportURL))
|
||||
|
||||
klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode)
|
||||
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
|
||||
}
|
||||
|
||||
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
|
||||
// to flip to Ready, log its output and delete it.
|
||||
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
|
||||
path := "test/images/clusterapi-tester/pod.yaml"
|
||||
framework.Logf("Parsing pod from %v", path)
|
||||
p, err := manifest.PodFromManifest(path)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
|
||||
return
|
||||
}
|
||||
p.Namespace = ns
|
||||
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
|
||||
framework.Logf("Failed to create %v: %v", p.Name, err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
|
||||
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
|
||||
}
|
||||
}()
|
||||
timeout := 5 * time.Minute
|
||||
if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
|
||||
return
|
||||
}
|
||||
logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
|
||||
} else {
|
||||
framework.Logf("Output of clusterapi-tester:\n%v", logs)
|
||||
}
|
||||
}
|
||||
|
||||
// getDefaultClusterIPFamily obtains the default IP family of the cluster
|
||||
// using the Cluster IP address of the kubernetes service created in the default namespace
|
||||
// This unequivocally identifies the default IP family because services are single family
|
||||
// TODO: dual-stack may support multiple families per service
|
||||
// but we can detect if a cluster is dual stack because pods have two addresses (one per family)
|
||||
func getDefaultClusterIPFamily(c clientset.Interface) string {
|
||||
// Get the ClusterIP of the kubernetes service created in the default namespace
|
||||
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get kubernetes service ClusterIP: %v", err)
|
||||
}
|
||||
|
||||
if utilnet.IsIPv6String(svc.Spec.ClusterIP) {
|
||||
return "ipv6"
|
||||
}
|
||||
return "ipv4"
|
||||
}
|
||||
|
||||
// waitForDaemonSets for all daemonsets in the given namespace to be ready
|
||||
// (defined as all but 'allowedNotReadyNodes' pods associated with that
|
||||
// daemonset are ready).
|
||||
func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error {
|
||||
start := time.Now()
|
||||
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
|
||||
timeout, ns)
|
||||
|
||||
return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
var notReadyDaemonSets []string
|
||||
for _, ds := range dsList.Items {
|
||||
framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
|
||||
if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes {
|
||||
notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(notReadyDaemonSets) > 0 {
|
||||
framework.Logf("there are not ready daemonsets: %v", notReadyDaemonSets)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// setupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
|
||||
// There are certain operations we only want to run once per overall test invocation
|
||||
// (such as deleting old namespaces, or verifying that all system pods are running.
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
// to ensure that these operations only run on the first parallel Ginkgo node.
|
||||
//
|
||||
// This function takes two parameters: one function which runs on only the first Ginkgo node,
|
||||
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
|
||||
// accepting the byte array.
|
||||
func setupSuite() {
|
||||
// Run only on Ginkgo node 1
|
||||
|
||||
switch framework.TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
framework.LogClusterImageSources()
|
||||
}
|
||||
|
||||
c, err := framework.LoadClientset()
|
||||
if err != nil {
|
||||
klog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
|
||||
// Delete any namespaces except those created by the system. This ensures no
|
||||
// lingering resources are left over from a previous test run.
|
||||
if framework.TestContext.CleanStart {
|
||||
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
|
||||
[]string{
|
||||
metav1.NamespaceSystem,
|
||||
metav1.NamespaceDefault,
|
||||
metav1.NamespacePublic,
|
||||
v1.NamespaceNodeLease,
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Error deleting orphaned namespaces: %v", err)
|
||||
}
|
||||
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
|
||||
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
|
||||
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
|
||||
}
|
||||
}
|
||||
|
||||
// In large clusters we may get to this point but still have a bunch
|
||||
// of nodes without Routes created. Since this would make a node
|
||||
// unschedulable, we need to wait until all of them are schedulable.
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
|
||||
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
|
||||
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
framework.ExpectNoError(err)
|
||||
framework.TestContext.CloudConfig.NumNodes = len(nodes.Items)
|
||||
}
|
||||
|
||||
// Ensure all pods are running and ready before starting tests (otherwise,
|
||||
// cluster infrastructure pods that are being pulled or started can block
|
||||
// test pods from running, and tests that ensure all pods are running and
|
||||
// ready will fail).
|
||||
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
|
||||
// TODO: In large clusters, we often observe a non-starting pods due to
|
||||
// #41007. To avoid those pods preventing the whole test runs (and just
|
||||
// wasting the whole run), we allow for some not-ready pods (with the
|
||||
// number equal to the number of allowed not-ready nodes).
|
||||
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
|
||||
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
||||
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
|
||||
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
||||
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||
}
|
||||
|
||||
if err := waitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
|
||||
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
|
||||
}
|
||||
|
||||
// Log the version of the server and this client.
|
||||
framework.Logf("e2e test version: %s", version.Get().GitVersion)
|
||||
|
||||
dc := c.DiscoveryClient
|
||||
|
||||
serverVersion, serverErr := dc.ServerVersion()
|
||||
if serverErr != nil {
|
||||
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
|
||||
}
|
||||
if serverVersion != nil {
|
||||
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
|
||||
}
|
||||
|
||||
if framework.TestContext.NodeKiller.Enabled {
|
||||
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
|
||||
go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh)
|
||||
}
|
||||
}
|
||||
|
||||
// setupSuitePerGinkgoNode is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
|
||||
// There are certain operations we only want to run once per overall test invocation on each Ginkgo node
|
||||
// such as making some global variables accessible to all parallel executions
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
// Ref: https://onsi.github.io/ginkgo/#parallel-specs
|
||||
func setupSuitePerGinkgoNode() {
|
||||
// Obtain the default IP family of the cluster
|
||||
// Some e2e test are designed to work on IPv4 only, this global variable
|
||||
// allows to adapt those tests to work on both IPv4 and IPv6
|
||||
// TODO: dual-stack
|
||||
// the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters,
|
||||
// and services use the primary IP family by default
|
||||
c, err := framework.LoadClientset()
|
||||
if err != nil {
|
||||
klog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
framework.TestContext.IPFamily = getDefaultClusterIPFamily(c)
|
||||
framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily)
|
||||
}
|
|
@ -23,6 +23,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
|
@ -56,5 +57,5 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
RunE2ETests(t)
|
||||
e2e.RunE2ETests(t)
|
||||
}
|
||||
|
|
|
@ -17,27 +17,29 @@ limitations under the License.
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
master "sigs.k8s.io/node-feature-discovery/pkg/nfd-master"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -117,15 +119,15 @@ func configureRBAC(cs clientset.Interface, ns string) error {
|
|||
|
||||
// Remove RBAC configuration
|
||||
func deconfigureRBAC(cs clientset.Interface, ns string) error {
|
||||
err := cs.RbacV1().ClusterRoleBindings().Delete("nfd-master-e2e", &metav1.DeleteOptions{})
|
||||
err := cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cs.RbacV1().ClusterRoles().Delete("nfd-master-e2e", &metav1.DeleteOptions{})
|
||||
err = cs.RbacV1().ClusterRoles().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cs.CoreV1().ServiceAccounts(ns).Delete("nfd-master-e2e", &metav1.DeleteOptions{})
|
||||
err = cs.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -140,7 +142,7 @@ func createServiceAccount(cs clientset.Interface, ns string) (*v1.ServiceAccount
|
|||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
return cs.CoreV1().ServiceAccounts(ns).Create(sa)
|
||||
return cs.CoreV1().ServiceAccounts(ns).Create(context.TODO(), sa, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// Configure cluster role required by NFD
|
||||
|
@ -157,7 +159,7 @@ func createClusterRole(cs clientset.Interface) (*rbacv1.ClusterRole, error) {
|
|||
},
|
||||
},
|
||||
}
|
||||
return cs.RbacV1().ClusterRoles().Update(cr)
|
||||
return cs.RbacV1().ClusterRoles().Update(context.TODO(), cr, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// Configure cluster role binding required by NFD
|
||||
|
@ -180,7 +182,7 @@ func createClusterRoleBinding(cs clientset.Interface, ns string) (*rbacv1.Cluste
|
|||
},
|
||||
}
|
||||
|
||||
return cs.RbacV1().ClusterRoleBindings().Update(crb)
|
||||
return cs.RbacV1().ClusterRoleBindings().Update(context.TODO(), crb, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// createService creates nfd-master Service
|
||||
|
@ -200,7 +202,7 @@ func createService(cs clientset.Interface, ns string) (*v1.Service, error) {
|
|||
Type: v1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
return cs.CoreV1().Services(ns).Create(svc)
|
||||
return cs.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func nfdMasterPod(image string, onMasterNode bool) *v1.Pod {
|
||||
|
@ -361,14 +363,14 @@ func newHostPathType(typ v1.HostPathType) *v1.HostPathType {
|
|||
// cleanupNode deletes all NFD-related metadata from the Node object, i.e.
|
||||
// labels and annotations
|
||||
func cleanupNode(cs clientset.Interface) {
|
||||
nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodeList, err := cs.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
for _, n := range nodeList.Items {
|
||||
var err error
|
||||
var node *v1.Node
|
||||
for retry := 0; retry < 5; retry++ {
|
||||
node, err = cs.CoreV1().Nodes().Get(n.Name, metav1.GetOptions{})
|
||||
node, err = cs.CoreV1().Nodes().Get(context.TODO(), n.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
update := false
|
||||
|
@ -393,7 +395,7 @@ func cleanupNode(cs clientset.Interface) {
|
|||
}
|
||||
|
||||
ginkgo.By("Deleting NFD labels and annotations from node " + node.Name)
|
||||
_, err = cs.CoreV1().Nodes().Update(node)
|
||||
_, err = cs.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
} else {
|
||||
|
@ -406,7 +408,7 @@ func cleanupNode(cs clientset.Interface) {
|
|||
}
|
||||
|
||||
// Actual test suite
|
||||
var _ = framework.KubeDescribe("Node Feature Discovery", func() {
|
||||
var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
||||
f := framework.NewDefaultFramework("node-feature-discovery")
|
||||
|
||||
ginkgo.Context("when deploying a single nfd-master pod", func() {
|
||||
|
@ -420,7 +422,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
|
|||
ginkgo.By("Creating nfd master pod and nfd-master service")
|
||||
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
||||
masterPod = nfdMasterPod(image, false)
|
||||
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(masterPod)
|
||||
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), masterPod, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Create nfd-master service
|
||||
|
@ -431,7 +433,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
|
|||
gomega.Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Waiting for the nfd-master service to be up")
|
||||
gomega.Expect(framework.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
|
@ -459,16 +461,16 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
|
|||
ginkgo.By("Creating a nfd worker pod")
|
||||
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
||||
workerPod := nfdWorkerPod(image, []string{"--oneshot", "--sources=fake"})
|
||||
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(workerPod)
|
||||
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Waiting for the nfd-worker pod to succeed")
|
||||
gomega.Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, f.Namespace.Name)).NotTo(gomega.HaveOccurred())
|
||||
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(workerPod.ObjectMeta.Name, metav1.GetOptions{})
|
||||
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), workerPod.ObjectMeta.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(workerPod.Spec.NodeName, metav1.GetOptions{})
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), workerPod.Spec.NodeName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for k, v := range fakeFeatureLabels {
|
||||
gomega.Expect(node.Labels[k]).To(gomega.Equal(v))
|
||||
|
@ -482,7 +484,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
|
|||
}
|
||||
|
||||
ginkgo.By("Deleting the node-feature-discovery worker pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(workerPod.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), workerPod.ObjectMeta.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
cleanupNode(f.ClientSet)
|
||||
|
@ -508,14 +510,14 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
|
|||
|
||||
ginkgo.By("Creating nfd-worker daemonset")
|
||||
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
||||
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(workerDS)
|
||||
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Waiting for daemonset pods to be ready")
|
||||
gomega.Expect(e2epod.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Getting node objects")
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
|
@ -574,7 +576,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
|
|||
}
|
||||
|
||||
ginkgo.By("Deleting nfd-worker daemonset")
|
||||
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(workerDS.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.ObjectMeta.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
cleanupNode(f.ClientSet)
|
||||
|
|
Loading…
Reference in a new issue