1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-28 10:47:23 +00:00

Merge pull request #181 from marquiz/feature/e2e-tests-framework

Add simple e2e test
This commit is contained in:
Kubernetes Prow Robot 2019-06-27 11:53:21 -07:00 committed by GitHub
commit c191f20c30
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 1745 additions and 25 deletions

1176
Gopkg.lock generated

File diff suppressed because it is too large Load diff

View file

@ -24,6 +24,10 @@
# go-tests = true
# unused-packages = true
[prune]
go-tests = true
unused-packages = true
[[constraint]]
name = "github.com/docopt/docopt-go"
version = "0.6.2"
@ -45,20 +49,40 @@
branch = "master"
[[constraint]]
name = "k8s.io/kubernetes"
version = "~1.13.0"
# The k8s "sub-"packages do not have versions that would be correctly
# interpreted by dep (or the semver library). Thus, we need to use tag names,
# directly.
[[override]]
name = "k8s.io/api"
version = "kubernetes-1.13.1"
[[constraint]]
version = "kubernetes-1.13.5"
[[override]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.13.0"
[[constraint]]
version = "kubernetes-1.13.5"
[[override]]
name = "k8s.io/apiserver"
version = "kubernetes-1.13.5"
[[override]]
name = "k8s.io/apiextensions-apiserver"
version = "kubernetes-1.13.5"
[[override]]
name = "k8s.io/kube-aggregator"
version = "kubernetes-1.13.5"
[[override]]
name = "k8s.io/client-go"
version = "10.0.0"
version = "kubernetes-1.13.5"
[prune]
go-tests = true
unused-packages = true
# k8s.io/apiserver requires the latest master
[[override]]
name = "github.com/evanphx/json-patch"
revision = "5858425f75500d40c52783dce87d085a483ce135"
# Use revision because no proper versions exist
[[override]]
name = "k8s.io/kube-openapi"
revision = "d7c86cdc46e3a4fcf892b32dd7bc3aa775e0870e"
[[constraint]]
name = "github.com/golang/protobuf"
@ -67,3 +91,12 @@
[[constraint]]
name = "google.golang.org/grpc"
version = "1.17.0"
# Workaround https://github.com/golang/dep/issues/1799
[[override]]
name = "gopkg.in/fsnotify.v1"
source = "github.com/fsnotify/fsnotify"
[[override]]
name = "github.com/docker/distribution"
version = "v2.7.0-rc.0"

View file

@ -13,6 +13,7 @@ IMAGE_TAG_NAME := $(VERSION)
IMAGE_REPO := $(IMAGE_REGISTRY)/$(IMAGE_NAME)
IMAGE_TAG := $(IMAGE_REPO):$(IMAGE_TAG_NAME)
K8S_NAMESPACE := kube-system
KUBECONFIG :=
yaml_templates := $(wildcard *.yaml.template)
yaml_instances := $(patsubst %.yaml.template,%.yaml,$(yaml_templates))
@ -43,5 +44,9 @@ mock:
test:
go test ./cmd/... ./pkg/...
e2e-test:
dep ensure -v
go test -v ./test/e2e/ -args -nfd.repo=$(IMAGE_REPO) -nfd.tag=$(IMAGE_TAG_NAME) -kubeconfig=$(KUBECONFIG)
push:
$(IMAGE_PUSH_CMD) $(IMAGE_TAG)

View file

@ -622,6 +622,7 @@ name of the resulting container image.
| IMAGE_REPO | Container image repository to use | <IMAGE_REGISTRY>/<IMAGE_NAME>
| IMAGE_TAG | Full image:tag to tag the image with | <IMAGE_REPO>/<IMAGE_NAME>
| K8S_NAMESPACE | nfd-master and nfd-worker namespace | kube-system
| KUBECONFIG | Kubeconfig for running e2e-tests | *empty*
For example, to use a custom registry:
```
@ -633,6 +634,21 @@ Or to specify a build tool different from Docker:
make IMAGE_BUILD_CMD="buildah bud"
```
### Testing
Unit tests are automatically run as part of the container image build. You can
also run them manually in the source code tree by simply running:
```
make test
```
End-to-end tests are built on top of the e2e test framework of Kubernetes, and,
they required a cluster to run them on. For running the tests on your test
cluster you need to specify the kubeconfig to be used:
```
make e2e-test KUBECONFIG=$HOME/.kube/config
```
## Targeting Nodes with Specific Features
Nodes with specific features can be targeted using the `nodeSelector` field. The

192
test/e2e/e2e.go Normal file
View file

@ -0,0 +1,192 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"path"
"testing"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/gomega"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
"k8s.io/kubernetes/test/e2e/manifest"
testutils "k8s.io/kubernetes/test/utils"
)
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// to ensure that these operations only run on the first parallel Ginkgo node.
//
// This function takes two parameters: one function which runs on only the first Ginkgo node,
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
// accepting the byte array.
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// Run only on Ginkgo node 1
c, err := framework.LoadClientset()
if err != nil {
klog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except those created by the system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
[]string{
metav1.NamespaceSystem,
metav1.NamespaceDefault,
metav1.NamespacePublic,
})
if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err)
}
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// Ensure all pods are running and ready before starting tests (otherwise,
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
// TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
dc := c.DiscoveryClient
serverVersion, serverErr := dc.ServerVersion()
if serverErr != nil {
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
}
if serverVersion != nil {
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
}
return nil
}, func(data []byte) {
// Run on all Ginkgo nodes
})
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
framework.Logf("Running AfterSuite actions on all nodes")
framework.RunCleanupActions()
}, func() {
// Run only Ginkgo on node 1
framework.Logf("Running AfterSuite actions on node 1")
if framework.TestContext.ReportDir != "" {
framework.CoreDump(framework.TestContext.ReportDir)
}
})
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
// If a "report directory" is specified, one or more JUnit test reports will be
// generated in this directory, and cluster logs will also be saved.
// This function is called on each Ginkgo node in parallel mode.
func RunE2ETests(t *testing.T) {
runtimeutils.ReallyCrash = true
gomega.RegisterFailHandler(ginkgowrapper.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
var r []ginkgo.Reporter
if framework.TestContext.ReportDir != "" {
// TODO: we should probably only be trying to create this directory once
// rather than once-per-Ginkgo-node.
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
klog.Errorf("Failed creating report directory: %v", err)
} else {
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
}
}
klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
framework.Logf("Parsing pod from %v", path)
p, err := manifest.PodFromManifest(path)
if err != nil {
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
framework.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
framework.Logf("Output of clusterapi-tester:\n%v", logs)
}
}

43
test/e2e/e2e_test.go Normal file
View file

@ -0,0 +1,43 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"testing"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
func init() {
// Register framework flags, then handle flags and Viper config.
framework.HandleFlags()
framework.AfterReadingAllFlags(&framework.TestContext)
// TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987.
// Right now it is still needed, for example by
// test/e2e/framework/ingress/ingress_utils.go
// for providing the optional secret.yaml file and by
// test/e2e/framework/util.go for cluster/log-dump.
if framework.TestContext.RepoRoot != "" {
testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot})
}
}
func TestE2E(t *testing.T) {
RunE2ETests(t)
}

View file

@ -0,0 +1,285 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"flag"
"fmt"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
dockerRepo = flag.String("nfd.repo", "quay.io/kubernetes_incubator/node-feature-discovery", "Docker repository to fetch image from")
dockerTag = flag.String("nfd.tag", "e2e-test", "Docker tag to use")
labelPrefix = "feature.node.kubernetes.io/"
)
// Create required RBAC configuration
func configureRBAC(cs clientset.Interface, ns string) error {
_, err := createServiceAccount(cs, ns)
if err != nil {
return err
}
_, err = createClusterRole(cs)
if err != nil {
return err
}
_, err = createClusterRoleBinding(cs, ns)
if err != nil {
return err
}
return nil
}
// Remove RBAC configuration
func deconfigureRBAC(cs clientset.Interface, ns string) error {
err := cs.RbacV1().ClusterRoleBindings().Delete("nfd-master-e2e", &metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.RbacV1().ClusterRoles().Delete("nfd-master-e2e", &metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.CoreV1().ServiceAccounts(ns).Delete("nfd-master-e2e", &metav1.DeleteOptions{})
if err != nil {
return err
}
return nil
}
// Configure service account required by NFD
func createServiceAccount(cs clientset.Interface, ns string) (*v1.ServiceAccount, error) {
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-e2e",
Namespace: ns,
},
}
return cs.CoreV1().ServiceAccounts(ns).Create(sa)
}
// Configure cluster role required by NFD
func createClusterRole(cs clientset.Interface) (*rbacv1.ClusterRole, error) {
cr := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-e2e",
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "patch", "update"},
},
},
}
return cs.RbacV1().ClusterRoles().Update(cr)
}
// Configure cluster role binding required by NFD
func createClusterRoleBinding(cs clientset.Interface, ns string) (*rbacv1.ClusterRoleBinding, error) {
crb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-e2e",
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: "nfd-master-e2e",
Namespace: ns,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: "nfd-master-e2e",
},
}
return cs.RbacV1().ClusterRoleBindings().Update(crb)
}
// createService creates nfd-master Service
func createService(cs clientset.Interface, ns string) (*v1.Service, error) {
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-e2e",
},
Spec: v1.ServiceSpec{
Selector: map[string]string{"app": "nfd-master-e2e"},
Ports: []v1.ServicePort{
{
Protocol: v1.ProtocolTCP,
Port: 8080,
},
},
Type: v1.ServiceTypeClusterIP,
},
}
return cs.CoreV1().Services(ns).Create(svc)
}
func nfdMasterPod(ns string, image string, onMasterNode bool) *v1.Pod {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-" + string(uuid.NewUUID()),
Labels: map[string]string{"app": "nfd-master-e2e"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "node-feature-discovery",
Image: image,
ImagePullPolicy: v1.PullAlways,
Command: []string{"nfd-master"},
Env: []v1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
},
},
ServiceAccountName: "nfd-master-e2e",
RestartPolicy: v1.RestartPolicyNever,
},
}
if onMasterNode {
p.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
p.Spec.Tolerations = []v1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: v1.TolerationOpEqual,
Value: "",
Effect: v1.TaintEffectNoSchedule,
},
}
}
return p
}
func nfdWorkerPod(ns string, image string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-worker-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
// NOTE: We omit Volumes/VolumeMounts, at the moment as we only test the fake source
Containers: []v1.Container{
{
Name: "node-feature-discovery",
Image: image,
ImagePullPolicy: v1.PullAlways,
Command: []string{"nfd-worker"},
Args: []string{"--oneshot", "--sources=fake", "--server=nfd-master-e2e:8080"},
Env: []v1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
},
},
ServiceAccountName: "nfd-master-e2e",
// NOTE: We do not set HostNetwork/DNSPolicy because we only test the fake source
RestartPolicy: v1.RestartPolicyNever,
},
}
}
// Actual test suite
var _ = framework.KubeDescribe("Node Feature Discovery", func() {
f := framework.NewDefaultFramework("node-feature-discovery")
BeforeEach(func() {
err := configureRBAC(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
})
Context("when deployed with fake source enabled", func() {
It("should decorate the node with the fake feature labels", func() {
By("Creating a nfd master and worker pods and the nfd-master service on the selected node")
ns := f.Namespace.Name
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
fakeFeatureLabels := map[string]string{
labelPrefix + "fake-fakefeature1": "true",
labelPrefix + "fake-fakefeature2": "true",
labelPrefix + "fake-fakefeature3": "true",
}
defer deconfigureRBAC(f.ClientSet, f.Namespace.Name)
// Launch nfd-master
masterPod := nfdMasterPod(ns, image, false)
masterPod, err := f.ClientSet.CoreV1().Pods(ns).Create(masterPod)
Expect(err).NotTo(HaveOccurred())
// Create nfd-master service
nfdSvc, err := createService(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-master pod to be running")
Expect(framework.WaitForPodRunningInNamespace(f.ClientSet, masterPod)).NotTo(HaveOccurred())
By("Waiting for the nfd-master service to be up")
Expect(framework.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
// Launch nfd-worker
workerPod := nfdWorkerPod(ns, image)
workerPod, err = f.ClientSet.CoreV1().Pods(ns).Create(workerPod)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-worker pod to succeed")
Expect(framework.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, ns)).NotTo(HaveOccurred())
workerPod, err = f.ClientSet.CoreV1().Pods(ns).Get(workerPod.ObjectMeta.Name, metav1.GetOptions{})
By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
node, err := f.ClientSet.CoreV1().Nodes().Get(workerPod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for k, v := range fakeFeatureLabels {
Expect(node.Labels[k]).To(Equal(v))
}
By("Removing the fake feature labels advertised by the node-feature-discovery pod")
for key := range fakeFeatureLabels {
framework.RemoveLabelOffNode(f.ClientSet, workerPod.Spec.NodeName, key)
}
})
})
})