1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-13 20:30:03 +00:00

Merge pull request #292 from marquiz/devel/e2e-node

test/e2e: more comprehensive per-node testing
This commit is contained in:
Kubernetes Prow Robot 2020-03-03 04:47:37 -08:00 committed by GitHub
commit 7e728aecb9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 474 additions and 81 deletions

View file

@ -21,7 +21,8 @@ script:
chmod a+x $HOME/bin/aws-iam-authenticator;
export PATH=$PATH:$HOME/bin;
echo "$KUBECONFIG_AWS" > kubeconfig_aws;
make e2e-test -e KUBECONFIG=`pwd`/kubeconfig_aws;
echo "$E2E_TEST_CONFIG_DATA" > e2e-test-config;
make e2e-test -e KUBECONFIG=`pwd`/kubeconfig_aws E2E_TEST_CONFIG=`pwd`/e2e-test-config;
fi
deploy:

View file

@ -16,6 +16,7 @@ IMAGE_REPO := $(IMAGE_REGISTRY)/$(IMAGE_NAME)
IMAGE_TAG := $(IMAGE_REPO):$(IMAGE_TAG_NAME)
K8S_NAMESPACE := kube-system
KUBECONFIG :=
E2E_TEST_CONFIG :=
yaml_templates := $(wildcard *.yaml.template)
yaml_instances := $(patsubst %.yaml.template,%.yaml,$(yaml_templates))
@ -47,7 +48,7 @@ test:
$(GO_CMD) test ./cmd/... ./pkg/...
e2e-test:
$(GO_CMD) test -v ./test/e2e/ -args -nfd.repo=$(IMAGE_REPO) -nfd.tag=$(IMAGE_TAG_NAME) -kubeconfig=$(KUBECONFIG)
$(GO_CMD) test -v ./test/e2e/ -args -nfd.repo=$(IMAGE_REPO) -nfd.tag=$(IMAGE_TAG_NAME) -kubeconfig=$(KUBECONFIG) -nfd.e2e-config=$(E2E_TEST_CONFIG)
push:
$(IMAGE_PUSH_CMD) $(IMAGE_TAG)

View file

@ -63,8 +63,8 @@ func TestUpdateNodeFeatures(t *testing.T) {
mockClient := &k8sclient.Clientset{}
// Mock node with old features
mockNode := newMockNode()
mockNode.Labels[labelNs+"old-feature"] = "old-value"
mockNode.Annotations[annotationNs+"feature-labels"] = "old-feature"
mockNode.Labels[LabelNs+"old-feature"] = "old-value"
mockNode.Annotations[AnnotationNs+"feature-labels"] = "old-feature"
Convey("When I successfully update the node with feature labels", func() {
mockAPIHelper.On("GetClient").Return(mockClient, nil)
@ -78,11 +78,11 @@ func TestUpdateNodeFeatures(t *testing.T) {
Convey("Node object should have updated with labels and annotations", func() {
So(len(mockNode.Labels), ShouldEqual, len(fakeFeatureLabels))
for k, v := range fakeFeatureLabels {
So(mockNode.Labels[labelNs+k], ShouldEqual, v)
So(mockNode.Labels[LabelNs+k], ShouldEqual, v)
}
So(len(mockNode.Annotations), ShouldEqual, len(fakeAnnotations))
for k, v := range fakeAnnotations {
So(mockNode.Annotations[annotationNs+k], ShouldEqual, v)
So(mockNode.Annotations[AnnotationNs+k], ShouldEqual, v)
}
})
})
@ -209,11 +209,11 @@ func TestSetLabels(t *testing.T) {
Convey("Node object should have updated with labels and annotations", func() {
So(len(mockNode.Labels), ShouldEqual, len(mockLabels))
for k, v := range mockLabels {
So(mockNode.Labels[labelNs+k], ShouldEqual, v)
So(mockNode.Labels[LabelNs+k], ShouldEqual, v)
}
So(len(mockNode.Annotations), ShouldEqual, len(expectedAnnotations))
for k, v := range expectedAnnotations {
So(mockNode.Annotations[annotationNs+k], ShouldEqual, v)
So(mockNode.Annotations[AnnotationNs+k], ShouldEqual, v)
}
})
})
@ -229,9 +229,9 @@ func TestSetLabels(t *testing.T) {
})
Convey("Node object should only have whitelisted labels", func() {
So(len(mockNode.Labels), ShouldEqual, 1)
So(mockNode.Labels, ShouldResemble, map[string]string{labelNs + "feature-2": "val-2"})
So(mockNode.Labels, ShouldResemble, map[string]string{LabelNs + "feature-2": "val-2"})
a := map[string]string{annotationNs + "worker.version": workerVer, annotationNs + "feature-labels": "feature-2"}
a := map[string]string{AnnotationNs + "worker.version": workerVer, AnnotationNs + "feature-labels": "feature-2"}
So(len(mockNode.Annotations), ShouldEqual, len(a))
So(mockNode.Annotations, ShouldResemble, a)
})
@ -252,9 +252,9 @@ func TestSetLabels(t *testing.T) {
})
Convey("Node object should only have allowed label namespaces", func() {
So(len(mockNode.Labels), ShouldEqual, 2)
So(mockNode.Labels, ShouldResemble, map[string]string{labelNs + "feature-1": "val-1", "valid.ns/feature-2": "val-2"})
So(mockNode.Labels, ShouldResemble, map[string]string{LabelNs + "feature-1": "val-1", "valid.ns/feature-2": "val-2"})
a := map[string]string{annotationNs + "worker.version": workerVer, annotationNs + "feature-labels": "feature-1,valid.ns/feature-2"}
a := map[string]string{AnnotationNs + "worker.version": workerVer, AnnotationNs + "feature-labels": "feature-1,valid.ns/feature-2"}
So(len(mockNode.Annotations), ShouldEqual, len(a))
So(mockNode.Annotations, ShouldResemble, a)
})
@ -300,7 +300,7 @@ func TestAddLabels(t *testing.T) {
test1 := "test1"
labels[test1] = "true"
addLabels(n, labels)
So(n.Labels, ShouldContainKey, labelNs+test1)
So(n.Labels, ShouldContainKey, LabelNs+test1)
})
})
}

View file

@ -41,10 +41,10 @@ import (
const (
// Namespace for feature labels
labelNs = "feature.node.kubernetes.io/"
LabelNs = "feature.node.kubernetes.io/"
// Namespace for all NFD-related annotations
annotationNs = "nfd.node.kubernetes.io/"
AnnotationNs = "nfd.node.kubernetes.io/"
)
// package loggers
@ -301,7 +301,7 @@ func updateNodeFeatures(helper apihelper.APIHelpers, nodeName string, labels Lab
}
// Remove old labels
if l, ok := node.Annotations[annotationNs+"feature-labels"]; ok {
if l, ok := node.Annotations[AnnotationNs+"feature-labels"]; ok {
oldLabels := strings.Split(l, ",")
removeLabels(node, oldLabels)
}
@ -341,7 +341,7 @@ func removeLabels(n *api.Node, labelNames []string) {
if strings.Contains(l, "/") {
delete(n.Labels, l)
} else {
delete(n.Labels, labelNs+l)
delete(n.Labels, LabelNs+l)
}
}
}
@ -352,7 +352,7 @@ func addLabels(n *api.Node, labels map[string]string) {
if strings.Contains(k, "/") {
n.Labels[k] = v
} else {
n.Labels[labelNs+k] = v
n.Labels[LabelNs+k] = v
}
}
}
@ -360,6 +360,6 @@ func addLabels(n *api.Node, labels map[string]string) {
// Add Annotations to a Node object
func addAnnotations(n *api.Node, annotations map[string]string) {
for k, v := range annotations {
n.Annotations[annotationNs+k] = v
n.Annotations[AnnotationNs+k] = v
}
}

View file

@ -0,0 +1,94 @@
defaultFeatures:
labelWhitelist:
- "feature.node.kubernetes.io/cpu-cpuid.ADX"
- "feature.node.kubernetes.io/cpu-cpuid.AESNI"
- "feature.node.kubernetes.io/cpu-cpuid.AVX"
- "feature.node.kubernetes.io/cpu-cpuid.AVX2"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512BITALG"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512BW"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512CD"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512DQ"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512F"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512IFMA"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512VBMI"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512VBMI2"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512VL"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512VNNI"
- "feature.node.kubernetes.io/cpu-cpuid.AVX512VPOPCNTDQ"
- "feature.node.kubernetes.io/cpu-cpuid.FMA3"
- "feature.node.kubernetes.io/cpu-cpuid.GFNI"
- "feature.node.kubernetes.io/cpu-cpuid.HLE"
- "feature.node.kubernetes.io/cpu-cpuid.IBPB"
- "feature.node.kubernetes.io/cpu-cpuid.MPX"
- "feature.node.kubernetes.io/cpu-cpuid.RTM"
- "feature.node.kubernetes.io/cpu-cpuid.SHA"
- "feature.node.kubernetes.io/cpu-cpuid.STIBP"
- "feature.node.kubernetes.io/cpu-cpuid.VAES"
- "feature.node.kubernetes.io/cpu-cpuid.VMX"
- "feature.node.kubernetes.io/cpu-cpuid.VPCLMULQDQ"
- "feature.node.kubernetes.io/cpu-hardware_multithreading"
- "feature.node.kubernetes.io/cpu-power.sst_bf.enabled"
- "feature.node.kubernetes.io/cpu-pstate.turbo"
- "feature.node.kubernetes.io/cpu-rdt.RDTCMT"
- "feature.node.kubernetes.io/cpu-rdt.RDTL3CA"
- "feature.node.kubernetes.io/cpu-rdt.RDTMBA"
- "feature.node.kubernetes.io/cpu-rdt.RDTMBM"
- "feature.node.kubernetes.io/cpu-rdt.RDTMON"
- "feature.node.kubernetes.io/iommu-enabled"
- "feature.node.kubernetes.io/kernel-config.NO_HZ"
- "feature.node.kubernetes.io/kernel-config.NO_HZ_FULL"
- "feature.node.kubernetes.io/kernel-config.NO_HZ_IDLE"
- "feature.node.kubernetes.io/kernel-config.PREEMPT"
- "feature.node.kubernetes.io/kernel-selinux.enabled"
- "feature.node.kubernetes.io/kernel-version.full"
- "feature.node.kubernetes.io/kernel-version.major"
- "feature.node.kubernetes.io/kernel-version.minor"
- "feature.node.kubernetes.io/kernel-version.revision"
- "feature.node.kubernetes.io/memory-numa"
- "feature.node.kubernetes.io/memory-nv.dax"
- "feature.node.kubernetes.io/memory-nv.present"
- "feature.node.kubernetes.io/network-sriov.capable"
- "feature.node.kubernetes.io/network-sriov.configured"
- "feature.node.kubernetes.io/pci-0300_1a03.present"
- "feature.node.kubernetes.io/storage-nonrotationaldisk"
- "feature.node.kubernetes.io/system-os_release.ID"
- "feature.node.kubernetes.io/system-os_release.VERSION_ID"
- "feature.node.kubernetes.io/system-os_release.VERSION_ID.major"
- "feature.node.kubernetes.io/system-os_release.VERSION_ID.minor"
annotationWhitelist:
- "nfd.node.kubernetes.io/master.version"
- "nfd.node.kubernetes.io/worker.version"
- "nfd.node.kubernetes.io/feature-labels"
nodes:
my-node-1:
expectedLabelValues:
"feature.node.kubernetes.io/cpu-cpuid.ADX": "true"
"feature.node.kubernetes.io/cpu-cpuid.AESNI": "true"
"feature.node.kubernetes.io/cpu-cpuid.AVX": "true"
"feature.node.kubernetes.io/cpu-cpuid.AVX2": "true"
"feature.node.kubernetes.io/cpu-cpuid.AVX512BW": "true"
"feature.node.kubernetes.io/cpu-cpuid.AVX512CD": "true"
"feature.node.kubernetes.io/cpu-cpuid.AVX512DQ": "true"
"feature.node.kubernetes.io/cpu-cpuid.AVX512F": "true"
"feature.node.kubernetes.io/cpu-cpuid.AVX512VL": "true"
"feature.node.kubernetes.io/cpu-cpuid.FMA3": "true"
"feature.node.kubernetes.io/cpu-cpuid.HLE": "true"
"feature.node.kubernetes.io/cpu-cpuid.MPX": "true"
"feature.node.kubernetes.io/cpu-cpuid.RTM": "true"
"feature.node.kubernetes.io/cpu-hardware_multithreading": "true"
"feature.node.kubernetes.io/kernel-version.major": "4"
"feature.node.kubernetes.io/pci-0300_1d0f.present": "true"
"feature.node.kubernetes.io/storage-nonrotationaldisk": "true"
"feature.node.kubernetes.io/system-os_release.ID": "Centos"
"feature.node.kubernetes.io/system-os_release.VERSION_ID.major": "7"
expectedLabelKeys:
- "feature.node.kubernetes.io/kernel-version.full"
- "feature.node.kubernetes.io/kernel-version.major"
- "feature.node.kubernetes.io/kernel-version.minor"
- "feature.node.kubernetes.io/kernel-version.revision"
- "feature.node.kubernetes.io/system-os_release.VERSION_ID"
- "feature.node.kubernetes.io/system-os_release.VERSION_ID.minor"
expectedAnnotationKeys:
- "nfd.node.kubernetes.io/worker.version"
- "nfd.node.kubernetes.io/feature-labels"

View file

@ -19,26 +19,83 @@ package e2e
import (
"flag"
"fmt"
"io/ioutil"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/ghodss/yaml"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
master "sigs.k8s.io/node-feature-discovery/pkg/nfd-master"
)
var (
dockerRepo = flag.String("nfd.repo", "quay.io/kubernetes_incubator/node-feature-discovery", "Docker repository to fetch image from")
dockerTag = flag.String("nfd.tag", "e2e-test", "Docker tag to use")
labelPrefix = "feature.node.kubernetes.io/"
dockerRepo = flag.String("nfd.repo", "quay.io/kubernetes_incubator/node-feature-discovery", "Docker repository to fetch image from")
dockerTag = flag.String("nfd.tag", "e2e-test", "Docker tag to use")
e2eConfigFile = flag.String("nfd.e2e-config", "", "Configuration parameters for end-to-end tests")
labelPrefix = "feature.node.kubernetes.io/"
conf *e2eConfig
)
type e2eConfig struct {
DefaultFeatures *struct {
LabelWhitelist lookupMap
AnnotationWhitelist lookupMap
Nodes map[string]nodeConfig
}
}
type nodeConfig struct {
ExpectedLabelValues map[string]string
ExpectedLabelKeys lookupMap
ExpectedAnnotationValues map[string]string
ExpectedAnnotationKeys lookupMap
}
type lookupMap map[string]struct{}
func (l *lookupMap) UnmarshalJSON(data []byte) error {
*l = lookupMap{}
slice := []string{}
err := yaml.Unmarshal(data, &slice)
if err != nil {
return err
}
for _, k := range slice {
(*l)[k] = struct{}{}
}
return nil
}
func readConfig() {
// Read and parse only once
if conf != nil || *e2eConfigFile == "" {
return
}
By("Reading end-to-end test configuration file")
data, err := ioutil.ReadFile(*e2eConfigFile)
Expect(err).NotTo(HaveOccurred())
By("Parsing end-to-end test configuration data")
err = yaml.Unmarshal(data, &conf)
Expect(err).NotTo(HaveOccurred())
}
// Create required RBAC configuration
func configureRBAC(cs clientset.Interface, ns string) error {
_, err := createServiceAccount(cs, ns)
@ -134,7 +191,7 @@ func createService(cs clientset.Interface, ns string) (*v1.Service, error) {
Name: "nfd-master-e2e",
},
Spec: v1.ServiceSpec{
Selector: map[string]string{"app": "nfd-master-e2e"},
Selector: map[string]string{"name": "nfd-master-e2e"},
Ports: []v1.ServicePort{
{
Protocol: v1.ProtocolTCP,
@ -147,11 +204,11 @@ func createService(cs clientset.Interface, ns string) (*v1.Service, error) {
return cs.CoreV1().Services(ns).Create(svc)
}
func nfdMasterPod(ns string, image string, onMasterNode bool) *v1.Pod {
func nfdMasterPod(image string, onMasterNode bool) *v1.Pod {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-" + string(uuid.NewUUID()),
Labels: map[string]string{"app": "nfd-master-e2e"},
Labels: map[string]string{"name": "nfd-master-e2e"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
@ -190,36 +247,163 @@ func nfdMasterPod(ns string, image string, onMasterNode bool) *v1.Pod {
return p
}
func nfdWorkerPod(ns string, image string) *v1.Pod {
return &v1.Pod{
func nfdWorkerPod(image string, extraArgs []string) *v1.Pod {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-worker-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
// NOTE: We omit Volumes/VolumeMounts, at the moment as we only test the fake source
Containers: []v1.Container{
{
Name: "node-feature-discovery",
Image: image,
ImagePullPolicy: v1.PullAlways,
Command: []string{"nfd-worker"},
Args: []string{"--oneshot", "--sources=fake", "--server=nfd-master-e2e:8080"},
Env: []v1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
Spec: nfdWorkerPodSpec(image, extraArgs),
}
p.Spec.RestartPolicy = v1.RestartPolicyNever
return p
}
func nfdWorkerDaemonSet(image string, extraArgs []string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-worker-" + string(uuid.NewUUID()),
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": "nfd-worker"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": "nfd-worker"},
},
Spec: nfdWorkerPodSpec(image, extraArgs),
},
MinReadySeconds: 5,
},
}
}
func nfdWorkerPodSpec(image string, extraArgs []string) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{
{
Name: "node-feature-discovery",
Image: image,
ImagePullPolicy: v1.PullAlways,
Command: []string{"nfd-worker"},
Args: append([]string{"--server=nfd-master-e2e:8080"}, extraArgs...),
Env: []v1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "host-boot",
MountPath: "/host-boot",
ReadOnly: true,
},
{
Name: "host-os-release",
MountPath: "/host-etc/os-release",
ReadOnly: true,
},
{
Name: "host-sys",
MountPath: "/host-sys",
ReadOnly: true,
},
},
},
ServiceAccountName: "nfd-master-e2e",
// NOTE: We do not set HostNetwork/DNSPolicy because we only test the fake source
RestartPolicy: v1.RestartPolicyNever,
},
ServiceAccountName: "nfd-master-e2e",
HostNetwork: true,
DNSPolicy: v1.DNSClusterFirstWithHostNet,
Volumes: []v1.Volume{
{
Name: "host-boot",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/boot",
Type: newHostPathType(v1.HostPathDirectory),
},
},
},
{
Name: "host-os-release",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/os-release",
Type: newHostPathType(v1.HostPathFile),
},
},
},
{
Name: "host-sys",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/sys",
Type: newHostPathType(v1.HostPathDirectory),
},
},
},
},
}
}
func newHostPathType(typ v1.HostPathType) *v1.HostPathType {
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(typ)
return hostPathType
}
// cleanupNode deletes all NFD-related metadata from the Node object, i.e.
// labels and annotations
func cleanupNode(cs clientset.Interface) {
nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, n := range nodeList.Items {
var err error
var node *v1.Node
for retry := 0; retry < 5; retry++ {
node, err = cs.CoreV1().Nodes().Get(n.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
update := false
// Remove labels
for key := range node.Labels {
if strings.HasPrefix(key, master.LabelNs) {
delete(node.Labels, key)
update = true
}
}
// Remove annotations
for key := range node.Annotations {
if strings.HasPrefix(key, master.AnnotationNs) {
delete(node.Annotations, key)
update = true
}
}
if update == false {
break
}
By("Deleting NFD labels and annotations from node " + node.Name)
_, err = cs.CoreV1().Nodes().Update(node)
if err != nil {
time.Sleep(100 * time.Millisecond)
} else {
break
}
}
Expect(err).NotTo(HaveOccurred())
}
}
@ -227,28 +411,18 @@ func nfdWorkerPod(ns string, image string) *v1.Pod {
var _ = framework.KubeDescribe("Node Feature Discovery", func() {
f := framework.NewDefaultFramework("node-feature-discovery")
BeforeEach(func() {
err := configureRBAC(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
Context("when deploying a single nfd-master pod", func() {
var masterPod *v1.Pod
})
Context("when deployed with fake source enabled", func() {
It("should decorate the node with the fake feature labels", func() {
By("Creating a nfd master and worker pods and the nfd-master service on the selected node")
ns := f.Namespace.Name
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
fakeFeatureLabels := map[string]string{
labelPrefix + "fake-fakefeature1": "true",
labelPrefix + "fake-fakefeature2": "true",
labelPrefix + "fake-fakefeature3": "true",
}
defer deconfigureRBAC(f.ClientSet, f.Namespace.Name)
BeforeEach(func() {
err := configureRBAC(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
// Launch nfd-master
masterPod := nfdMasterPod(ns, image, false)
masterPod, err := f.ClientSet.CoreV1().Pods(ns).Create(masterPod)
By("Creating nfd master pod and nfd-master service")
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
masterPod = nfdMasterPod(image, false)
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(masterPod)
Expect(err).NotTo(HaveOccurred())
// Create nfd-master service
@ -260,27 +434,150 @@ var _ = framework.KubeDescribe("Node Feature Discovery", func() {
By("Waiting for the nfd-master service to be up")
Expect(framework.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
})
// Launch nfd-worker
workerPod := nfdWorkerPod(ns, image)
workerPod, err = f.ClientSet.CoreV1().Pods(ns).Create(workerPod)
AfterEach(func() {
err := deconfigureRBAC(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-worker pod to succeed")
Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, ns)).NotTo(HaveOccurred())
workerPod, err = f.ClientSet.CoreV1().Pods(ns).Get(workerPod.ObjectMeta.Name, metav1.GetOptions{})
})
By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
node, err := f.ClientSet.CoreV1().Nodes().Get(workerPod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for k, v := range fakeFeatureLabels {
Expect(node.Labels[k]).To(Equal(v))
}
//
// Simple test with only the fake source enabled
//
Context("and a single worker pod with fake source enabled", func() {
It("it should decorate the node with the fake feature labels", func() {
By("Removing the fake feature labels advertised by the node-feature-discovery pod")
for key := range fakeFeatureLabels {
framework.RemoveLabelOffNode(f.ClientSet, workerPod.Spec.NodeName, key)
}
fakeFeatureLabels := map[string]string{
master.LabelNs + "fake-fakefeature1": "true",
master.LabelNs + "fake-fakefeature2": "true",
master.LabelNs + "fake-fakefeature3": "true",
}
// Remove pre-existing stale annotations and labels
cleanupNode(f.ClientSet)
// Launch nfd-worker
By("Creating a nfd worker pod")
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
workerPod := nfdWorkerPod(image, []string{"--oneshot", "--sources=fake"})
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(workerPod)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-worker pod to succeed")
Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, f.Namespace.Name)).NotTo(HaveOccurred())
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(workerPod.ObjectMeta.Name, metav1.GetOptions{})
By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
node, err := f.ClientSet.CoreV1().Nodes().Get(workerPod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for k, v := range fakeFeatureLabels {
Expect(node.Labels[k]).To(Equal(v))
}
// Check that there are no unexpected NFD labels
for k := range node.Labels {
if strings.HasPrefix(k, master.LabelNs) {
Expect(fakeFeatureLabels).Should(HaveKey(k))
}
}
By("Deleting the node-feature-discovery worker pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(workerPod.ObjectMeta.Name, &metav1.DeleteOptions{})
cleanupNode(f.ClientSet)
})
})
//
// More comprehensive test when --e2e-node-config is enabled
//
Context("and nfd-workers as a daemonset with default sources enabled", func() {
It("the node labels listed in the e2e config should be present", func() {
readConfig()
if conf == nil {
Skip("no e2e-config was specified")
}
if conf.DefaultFeatures == nil {
Skip("no 'defaultFeatures' specified in e2e-config")
}
fConf := conf.DefaultFeatures
// Remove pre-existing stale annotations and labels
cleanupNode(f.ClientSet)
By("Creating nfd-worker daemonset")
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(workerDS)
Expect(err).NotTo(HaveOccurred())
By("Waiting for daemonset pods to be ready")
Expect(e2epod.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
By("Getting node objects")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {
if _, ok := fConf.Nodes[node.Name]; !ok {
e2elog.Logf("node %q missing from e2e-config, skipping...", node.Name)
continue
}
e2elog.Logf("verifying node %q...", node.Name)
nodeConf := fConf.Nodes[node.Name]
// Check labels
for k, v := range nodeConf.ExpectedLabelValues {
Expect(node.Labels).To(HaveKeyWithValue(k, v))
}
for k := range nodeConf.ExpectedLabelKeys {
Expect(node.Labels).To(HaveKey(k))
}
for k := range node.Labels {
if strings.HasPrefix(k, master.LabelNs) {
if _, ok := nodeConf.ExpectedLabelValues[k]; ok {
continue
}
if _, ok := nodeConf.ExpectedLabelKeys[k]; ok {
continue
}
// Ignore if the label key was not whitelisted
Expect(fConf.LabelWhitelist).NotTo(HaveKey(k))
}
}
// Check annotations
for k, v := range nodeConf.ExpectedAnnotationValues {
Expect(node.Annotations).To(HaveKeyWithValue(k, v))
}
for k := range nodeConf.ExpectedAnnotationKeys {
Expect(node.Annotations).To(HaveKey(k))
}
for k := range node.Annotations {
if strings.HasPrefix(k, master.AnnotationNs) {
if _, ok := nodeConf.ExpectedAnnotationValues[k]; ok {
continue
}
if _, ok := nodeConf.ExpectedAnnotationKeys[k]; ok {
continue
}
// Ignore if the annotation was not whitelisted
Expect(fConf.AnnotationWhitelist).NotTo(HaveKey(k))
}
}
// Node running nfd-master should have master version annotation
if node.Name == masterPod.Spec.NodeName {
Expect(node.Annotations).To(HaveKey(master.AnnotationNs + "master.version"))
}
}
By("Deleting nfd-worker daemonset")
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(workerDS.ObjectMeta.Name, &metav1.DeleteOptions{})
cleanupNode(f.ClientSet)
})
})
})
})