1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-17 05:48:21 +00:00

Merge pull request #385 from cynepco3hahue/update_dependencies_master

Update k8s dependencies to 1.19.4
This commit is contained in:
Kubernetes Prow Robot 2020-11-18 04:00:05 -08:00 committed by GitHub
commit 88344f257b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 568 additions and 643 deletions

View file

@ -104,7 +104,7 @@ test:
$(GO_CMD) test ./cmd/... ./pkg/... $(GO_CMD) test ./cmd/... ./pkg/...
e2e-test: e2e-test:
$(GO_CMD) test -v ./test/e2e/ -args -nfd.repo=$(IMAGE_REPO) -nfd.tag=$(IMAGE_TAG_NAME) -kubeconfig=$(KUBECONFIG) -nfd.e2e-config=$(E2E_TEST_CONFIG) $(GO_CMD) test -v ./test/e2e/ -args -nfd.repo=$(IMAGE_REPO) -nfd.tag=$(IMAGE_TAG_NAME) -kubeconfig=$(KUBECONFIG) -nfd.e2e-config=$(E2E_TEST_CONFIG) -ginkgo.focus="\[NFD\]"
push: push:
$(IMAGE_PUSH_CMD) $(IMAGE_TAG) $(IMAGE_PUSH_CMD) $(IMAGE_TAG)

73
go.mod
View file

@ -4,23 +4,26 @@ go 1.14
require ( require (
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
github.com/golang/protobuf v1.3.2 github.com/golang/protobuf v1.4.3
github.com/klauspost/cpuid v1.2.3 github.com/klauspost/cpuid v1.2.3
github.com/onsi/ginkgo v1.10.1 github.com/onsi/ginkgo v1.11.0
github.com/onsi/gomega v1.7.0 github.com/onsi/gomega v1.7.0
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a github.com/smartystreets/goconvey v1.6.4
github.com/stretchr/testify v1.4.0 github.com/stretchr/testify v1.4.0
github.com/vektra/errors v0.0.0-20140903201135-c64d83aba85a github.com/vektra/errors v0.0.0-20140903201135-c64d83aba85a
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
google.golang.org/grpc v1.23.1 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 // indirect
k8s.io/api v0.17.2 golang.org/x/sys v0.0.0-20201116194326-cc9327a14d48 // indirect
k8s.io/apimachinery v0.17.2 golang.org/x/text v0.3.4 // indirect
k8s.io/client-go v0.17.2 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
k8s.io/component-base v0.17.2 google.golang.org/genproto v0.0.0-20201116205149-79184cff4dfe // indirect
k8s.io/klog v1.0.0 google.golang.org/grpc v1.27.1
k8s.io/kubernetes v1.17.2 google.golang.org/protobuf v1.25.0 // indirect
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f k8s.io/api v0.19.4
sigs.k8s.io/yaml v1.1.0 k8s.io/apimachinery v0.19.4
k8s.io/client-go v11.0.0+incompatible
k8s.io/kubernetes v1.18.6
sigs.k8s.io/yaml v1.2.0
) )
// The k8s "sub-"packages do not have 'semver' compatible versions. Thus, we // The k8s "sub-"packages do not have 'semver' compatible versions. Thus, we
@ -28,25 +31,27 @@ require (
replace ( replace (
//force version of x/text due CVE-2020-14040 //force version of x/text due CVE-2020-14040
golang.org/x/text => golang.org/x/text v0.3.3 golang.org/x/text => golang.org/x/text v0.3.3
k8s.io/api => k8s.io/api v0.0.0-20200121193204-7ea599edc7fd google.golang.org/grpc => google.golang.org/grpc v1.27.1
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20200121201129-111e9ba415da k8s.io/api => k8s.io/api v0.19.4
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191121175448-79c2a76c473a k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.4
k8s.io/apiserver => k8s.io/apiserver v0.0.0-20200121195158-da2f3bd69287 k8s.io/apimachinery => k8s.io/apimachinery v0.19.4
k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20200121201805-7928b415bdea k8s.io/apiserver => k8s.io/apiserver v0.19.4
k8s.io/client-go => k8s.io/client-go v0.0.0-20200121193945-bdedab45d4f6 k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.4
k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20200121203829-580c13bb6ed9 k8s.io/client-go => k8s.io/client-go v0.19.4
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20200121203528-48c15d793bf4 k8s.io/cloud-provider => k8s.io/cloud-provider v0.19.4
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20191121175249-e95606b614f0 k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.4
k8s.io/component-base => k8s.io/component-base v0.0.0-20200121194253-47d744dd27ec k8s.io/code-generator => k8s.io/code-generator v0.19.4
k8s.io/cri-api => k8s.io/cri-api v0.0.0-20191121183020-775aa3c1cf73 k8s.io/component-base => k8s.io/component-base v0.19.4
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20200121204128-ab1d1be7e7e9 k8s.io/cri-api => k8s.io/cri-api v0.19.4
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20200121195706-c8017da6deb7 k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.4
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20200121203241-7fc8a284e25f k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.4
k8s.io/kube-proxy => k8s.io/kube-proxy v0.0.0-20200121202405-597cb7b43db3 k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.4
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.0.0-20200121202948-05dd8b0a4787 k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.4
k8s.io/kubectl => k8s.io/kubectl v0.0.0-20200121205541-a36079a4286a k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.4
k8s.io/kubelet => k8s.io/kubelet v0.0.0-20200121202654-3d0d0a3a4b44 k8s.io/kubectl => k8s.io/kubectl v0.19.4
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.0.0-20200121204546-147d309c2148 k8s.io/kubelet => k8s.io/kubelet v0.19.4
k8s.io/metrics => k8s.io/metrics v0.0.0-20200121201502-3a7afb0af1bc k8s.io/kubernetes => k8s.io/kubernetes v1.19.4
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.0.0-20200121200150-07ea3fc70559 k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.4
k8s.io/metrics => k8s.io/metrics v0.19.4
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.4
) )

775
go.sum

File diff suppressed because it is too large Load diff

View file

@ -17,6 +17,7 @@ limitations under the License.
package apihelper package apihelper
import ( import (
"context"
"encoding/json" "encoding/json"
api "k8s.io/api/core/v1" api "k8s.io/api/core/v1"
@ -55,7 +56,7 @@ func (h K8sHelpers) GetClient() (*k8sclient.Clientset, error) {
func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*api.Node, error) { func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*api.Node, error) {
// Get the node object using node name // Get the node object using node name
node, err := cli.CoreV1().Nodes().Get(nodeName, meta_v1.GetOptions{}) node, err := cli.CoreV1().Nodes().Get(context.TODO(), nodeName, meta_v1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -64,12 +65,12 @@ func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*api.Nod
} }
func (h K8sHelpers) GetNodes(cli *k8sclient.Clientset) (*api.NodeList, error) { func (h K8sHelpers) GetNodes(cli *k8sclient.Clientset) (*api.NodeList, error) {
return cli.CoreV1().Nodes().List(meta_v1.ListOptions{}) return cli.CoreV1().Nodes().List(context.TODO(), meta_v1.ListOptions{})
} }
func (h K8sHelpers) UpdateNode(c *k8sclient.Clientset, n *api.Node) error { func (h K8sHelpers) UpdateNode(c *k8sclient.Clientset, n *api.Node) error {
// Send the updated node to the apiserver. // Send the updated node to the apiserver.
_, err := c.CoreV1().Nodes().Update(n) _, err := c.CoreV1().Nodes().Update(context.TODO(), n, meta_v1.UpdateOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -81,7 +82,7 @@ func (h K8sHelpers) PatchStatus(c *k8sclient.Clientset, nodeName string, marshal
// Send the updated node to the apiserver. // Send the updated node to the apiserver.
patch, err := json.Marshal(marshalable) patch, err := json.Marshal(marshalable)
if err == nil { if err == nil {
_, err = c.CoreV1().Nodes().Patch(nodeName, types.JSONPatchType, patch, "status") _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.JSONPatchType, patch, meta_v1.PatchOptions{}, "status")
} }
return err return err

View file

@ -1,295 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"path"
"testing"
"time"
"k8s.io/klog"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/logs"
"k8s.io/component-base/version"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/manifest"
e2ereporters "k8s.io/kubernetes/test/e2e/reporters"
testutils "k8s.io/kubernetes/test/utils"
utilnet "k8s.io/utils/net"
clientset "k8s.io/client-go/kubernetes"
)
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
setupSuite()
return nil
}, func(data []byte) {
// Run on all Ginkgo nodes
setupSuitePerGinkgoNode()
})
var _ = ginkgo.SynchronizedAfterSuite(func() {
framework.CleanupSuite()
}, func() {
framework.AfterSuiteActions()
})
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
// If a "report directory" is specified, one or more JUnit test reports will be
// generated in this directory, and cluster logs will also be saved.
// This function is called on each Ginkgo node in parallel mode.
func RunE2ETests(t *testing.T) {
runtimeutils.ReallyCrash = true
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(e2elog.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
var r []ginkgo.Reporter
if framework.TestContext.ReportDir != "" {
// TODO: we should probably only be trying to create this directory once
// rather than once-per-Ginkgo-node.
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
klog.Errorf("Failed creating report directory: %v", err)
} else {
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
}
}
// Stream the progress to stdout and optionally a URL accepting progress updates.
r = append(r, e2ereporters.NewProgressReporter(framework.TestContext.ProgressReportURL))
klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
framework.Logf("Parsing pod from %v", path)
p, err := manifest.PodFromManifest(path)
if err != nil {
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
framework.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
framework.Logf("Output of clusterapi-tester:\n%v", logs)
}
}
// getDefaultClusterIPFamily obtains the default IP family of the cluster
// using the Cluster IP address of the kubernetes service created in the default namespace
// This unequivocally identifies the default IP family because services are single family
// TODO: dual-stack may support multiple families per service
// but we can detect if a cluster is dual stack because pods have two addresses (one per family)
func getDefaultClusterIPFamily(c clientset.Interface) string {
// Get the ClusterIP of the kubernetes service created in the default namespace
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get kubernetes service ClusterIP: %v", err)
}
if utilnet.IsIPv6String(svc.Spec.ClusterIP) {
return "ipv6"
}
return "ipv4"
}
// waitForDaemonSets for all daemonsets in the given namespace to be ready
// (defined as all but 'allowedNotReadyNodes' pods associated with that
// daemonset are ready).
func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error {
start := time.Now()
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
timeout, ns)
return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{})
if err != nil {
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
var notReadyDaemonSets []string
for _, ds := range dsList.Items {
framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes {
notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name)
}
}
if len(notReadyDaemonSets) > 0 {
framework.Logf("there are not ready daemonsets: %v", notReadyDaemonSets)
return false, nil
}
return true, nil
})
}
// setupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// to ensure that these operations only run on the first parallel Ginkgo node.
//
// This function takes two parameters: one function which runs on only the first Ginkgo node,
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
// accepting the byte array.
func setupSuite() {
// Run only on Ginkgo node 1
switch framework.TestContext.Provider {
case "gce", "gke":
framework.LogClusterImageSources()
}
c, err := framework.LoadClientset()
if err != nil {
klog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except those created by the system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
[]string{
metav1.NamespaceSystem,
metav1.NamespaceDefault,
metav1.NamespacePublic,
v1.NamespaceNodeLease,
})
if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err)
}
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
framework.TestContext.CloudConfig.NumNodes = len(nodes.Items)
}
// Ensure all pods are running and ready before starting tests (otherwise,
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
// TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := waitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.
framework.Logf("e2e test version: %s", version.Get().GitVersion)
dc := c.DiscoveryClient
serverVersion, serverErr := dc.ServerVersion()
if serverErr != nil {
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
}
if serverVersion != nil {
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
}
if framework.TestContext.NodeKiller.Enabled {
nodeKiller := framework.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider)
go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCh)
}
}
// setupSuitePerGinkgoNode is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step.
// There are certain operations we only want to run once per overall test invocation on each Ginkgo node
// such as making some global variables accessible to all parallel executions
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// Ref: https://onsi.github.io/ginkgo/#parallel-specs
func setupSuitePerGinkgoNode() {
// Obtain the default IP family of the cluster
// Some e2e test are designed to work on IPv4 only, this global variable
// allows to adapt those tests to work on both IPv4 and IPv6
// TODO: dual-stack
// the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters,
// and services use the primary IP family by default
c, err := framework.LoadClientset()
if err != nil {
klog.Fatal("Error loading client: ", err)
}
framework.TestContext.IPFamily = getDefaultClusterIPFamily(c)
framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily)
}

View file

@ -23,6 +23,7 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/test/e2e"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config" "k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/framework/testfiles"
@ -56,5 +57,5 @@ func TestMain(m *testing.M) {
} }
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {
RunE2ETests(t) e2e.RunE2ETests(t)
} }

View file

@ -17,27 +17,29 @@ limitations under the License.
package e2e package e2e
import ( import (
"context"
"flag" "flag"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"strings" "strings"
"time" "time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"sigs.k8s.io/yaml"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
master "sigs.k8s.io/node-feature-discovery/pkg/nfd-master" master "sigs.k8s.io/node-feature-discovery/pkg/nfd-master"
"sigs.k8s.io/yaml"
) )
var ( var (
@ -117,15 +119,15 @@ func configureRBAC(cs clientset.Interface, ns string) error {
// Remove RBAC configuration // Remove RBAC configuration
func deconfigureRBAC(cs clientset.Interface, ns string) error { func deconfigureRBAC(cs clientset.Interface, ns string) error {
err := cs.RbacV1().ClusterRoleBindings().Delete("nfd-master-e2e", &metav1.DeleteOptions{}) err := cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
if err != nil { if err != nil {
return err return err
} }
err = cs.RbacV1().ClusterRoles().Delete("nfd-master-e2e", &metav1.DeleteOptions{}) err = cs.RbacV1().ClusterRoles().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
if err != nil { if err != nil {
return err return err
} }
err = cs.CoreV1().ServiceAccounts(ns).Delete("nfd-master-e2e", &metav1.DeleteOptions{}) err = cs.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -140,7 +142,7 @@ func createServiceAccount(cs clientset.Interface, ns string) (*v1.ServiceAccount
Namespace: ns, Namespace: ns,
}, },
} }
return cs.CoreV1().ServiceAccounts(ns).Create(sa) return cs.CoreV1().ServiceAccounts(ns).Create(context.TODO(), sa, metav1.CreateOptions{})
} }
// Configure cluster role required by NFD // Configure cluster role required by NFD
@ -157,7 +159,7 @@ func createClusterRole(cs clientset.Interface) (*rbacv1.ClusterRole, error) {
}, },
}, },
} }
return cs.RbacV1().ClusterRoles().Update(cr) return cs.RbacV1().ClusterRoles().Update(context.TODO(), cr, metav1.UpdateOptions{})
} }
// Configure cluster role binding required by NFD // Configure cluster role binding required by NFD
@ -180,7 +182,7 @@ func createClusterRoleBinding(cs clientset.Interface, ns string) (*rbacv1.Cluste
}, },
} }
return cs.RbacV1().ClusterRoleBindings().Update(crb) return cs.RbacV1().ClusterRoleBindings().Update(context.TODO(), crb, metav1.UpdateOptions{})
} }
// createService creates nfd-master Service // createService creates nfd-master Service
@ -200,7 +202,7 @@ func createService(cs clientset.Interface, ns string) (*v1.Service, error) {
Type: v1.ServiceTypeClusterIP, Type: v1.ServiceTypeClusterIP,
}, },
} }
return cs.CoreV1().Services(ns).Create(svc) return cs.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{})
} }
func nfdMasterPod(image string, onMasterNode bool) *v1.Pod { func nfdMasterPod(image string, onMasterNode bool) *v1.Pod {
@ -361,14 +363,14 @@ func newHostPathType(typ v1.HostPathType) *v1.HostPathType {
// cleanupNode deletes all NFD-related metadata from the Node object, i.e. // cleanupNode deletes all NFD-related metadata from the Node object, i.e.
// labels and annotations // labels and annotations
func cleanupNode(cs clientset.Interface) { func cleanupNode(cs clientset.Interface) {
nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{}) nodeList, err := cs.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, n := range nodeList.Items { for _, n := range nodeList.Items {
var err error var err error
var node *v1.Node var node *v1.Node
for retry := 0; retry < 5; retry++ { for retry := 0; retry < 5; retry++ {
node, err = cs.CoreV1().Nodes().Get(n.Name, metav1.GetOptions{}) node, err = cs.CoreV1().Nodes().Get(context.TODO(), n.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
update := false update := false
@ -393,7 +395,7 @@ func cleanupNode(cs clientset.Interface) {
} }
ginkgo.By("Deleting NFD labels and annotations from node " + node.Name) ginkgo.By("Deleting NFD labels and annotations from node " + node.Name)
_, err = cs.CoreV1().Nodes().Update(node) _, err = cs.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
if err != nil { if err != nil {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} else { } else {
@ -406,7 +408,7 @@ func cleanupNode(cs clientset.Interface) {
} }
// Actual test suite // Actual test suite
var _ = framework.KubeDescribe("Node Feature Discovery", func() { var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
f := framework.NewDefaultFramework("node-feature-discovery") f := framework.NewDefaultFramework("node-feature-discovery")
ginkgo.Context("when deploying a single nfd-master pod", func() { ginkgo.Context("when deploying a single nfd-master pod", func() {
@ -420,7 +422,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
ginkgo.By("Creating nfd master pod and nfd-master service") ginkgo.By("Creating nfd master pod and nfd-master service")
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag) image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
masterPod = nfdMasterPod(image, false) masterPod = nfdMasterPod(image, false)
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(masterPod) masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), masterPod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create nfd-master service // Create nfd-master service
@ -431,7 +433,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
gomega.Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(gomega.HaveOccurred()) gomega.Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for the nfd-master service to be up") ginkgo.By("Waiting for the nfd-master service to be up")
gomega.Expect(framework.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(gomega.HaveOccurred()) gomega.Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(gomega.HaveOccurred())
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
@ -459,16 +461,16 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
ginkgo.By("Creating a nfd worker pod") ginkgo.By("Creating a nfd worker pod")
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag) image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
workerPod := nfdWorkerPod(image, []string{"--oneshot", "--sources=fake"}) workerPod := nfdWorkerPod(image, []string{"--oneshot", "--sources=fake"})
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(workerPod) workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for the nfd-worker pod to succeed") ginkgo.By("Waiting for the nfd-worker pod to succeed")
gomega.Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, f.Namespace.Name)).NotTo(gomega.HaveOccurred()) gomega.Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, f.Namespace.Name)).NotTo(gomega.HaveOccurred())
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(workerPod.ObjectMeta.Name, metav1.GetOptions{}) workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), workerPod.ObjectMeta.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName)) ginkgo.By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
node, err := f.ClientSet.CoreV1().Nodes().Get(workerPod.Spec.NodeName, metav1.GetOptions{}) node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), workerPod.Spec.NodeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for k, v := range fakeFeatureLabels { for k, v := range fakeFeatureLabels {
gomega.Expect(node.Labels[k]).To(gomega.Equal(v)) gomega.Expect(node.Labels[k]).To(gomega.Equal(v))
@ -482,7 +484,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
} }
ginkgo.By("Deleting the node-feature-discovery worker pod") ginkgo.By("Deleting the node-feature-discovery worker pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(workerPod.ObjectMeta.Name, &metav1.DeleteOptions{}) err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), workerPod.ObjectMeta.Name, metav1.DeleteOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
cleanupNode(f.ClientSet) cleanupNode(f.ClientSet)
@ -508,14 +510,14 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
ginkgo.By("Creating nfd-worker daemonset") ginkgo.By("Creating nfd-worker daemonset")
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{}) workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(workerDS) workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for daemonset pods to be ready") ginkgo.By("Waiting for daemonset pods to be ready")
gomega.Expect(e2epod.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(gomega.HaveOccurred()) gomega.Expect(e2epod.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(gomega.HaveOccurred())
ginkgo.By("Getting node objects") ginkgo.By("Getting node objects")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
@ -574,7 +576,7 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
} }
ginkgo.By("Deleting nfd-worker daemonset") ginkgo.By("Deleting nfd-worker daemonset")
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(workerDS.ObjectMeta.Name, &metav1.DeleteOptions{}) err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.ObjectMeta.Name, metav1.DeleteOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
cleanupNode(f.ClientSet) cleanupNode(f.ClientSet)