2019-04-25 14:41:26 +00:00
|
|
|
/*
|
|
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
2020-11-17 14:40:33 +00:00
|
|
|
"context"
|
2019-04-25 20:19:57 +00:00
|
|
|
"flag"
|
|
|
|
"fmt"
|
2020-02-21 14:16:37 +00:00
|
|
|
"io/ioutil"
|
2021-01-20 23:48:40 +00:00
|
|
|
"path/filepath"
|
2020-08-26 07:22:46 +00:00
|
|
|
"regexp"
|
2020-02-21 14:16:37 +00:00
|
|
|
"strings"
|
2019-04-25 20:19:57 +00:00
|
|
|
"time"
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
. "github.com/onsi/ginkgo"
|
|
|
|
. "github.com/onsi/gomega"
|
2020-11-17 14:40:33 +00:00
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
2020-11-17 14:40:33 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2019-04-25 20:19:57 +00:00
|
|
|
rbacv1 "k8s.io/api/rbac/v1"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2022-01-12 13:09:42 +00:00
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
2019-04-25 20:19:57 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
2022-01-12 13:09:42 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2019-04-25 20:19:57 +00:00
|
|
|
clientset "k8s.io/client-go/kubernetes"
|
2022-01-12 13:09:42 +00:00
|
|
|
"k8s.io/kubectl/pkg/util/podutils"
|
2019-04-25 14:41:26 +00:00
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2020-02-21 14:16:37 +00:00
|
|
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
2020-11-17 14:40:33 +00:00
|
|
|
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
|
2020-02-05 15:21:36 +00:00
|
|
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
2020-02-26 08:08:21 +00:00
|
|
|
|
|
|
|
master "sigs.k8s.io/node-feature-discovery/pkg/nfd-master"
|
2021-01-20 23:48:40 +00:00
|
|
|
"sigs.k8s.io/node-feature-discovery/source/custom"
|
2020-11-17 14:40:33 +00:00
|
|
|
"sigs.k8s.io/yaml"
|
2019-04-25 20:19:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2020-11-19 21:38:10 +00:00
|
|
|
dockerRepo = flag.String("nfd.repo", "gcr.io/k8s-staging-nfd/node-feature-discovery", "Docker repository to fetch image from")
|
|
|
|
dockerTag = flag.String("nfd.tag", "master", "Docker tag to use")
|
2020-02-21 14:16:37 +00:00
|
|
|
e2eConfigFile = flag.String("nfd.e2e-config", "", "Configuration parameters for end-to-end tests")
|
2021-02-25 15:43:59 +00:00
|
|
|
openShift = flag.Bool("nfd.openshift", false, "Enable OpenShift specific bits")
|
2020-02-21 14:16:37 +00:00
|
|
|
|
|
|
|
conf *e2eConfig
|
2019-04-25 14:41:26 +00:00
|
|
|
)
|
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
type e2eConfig struct {
|
|
|
|
DefaultFeatures *struct {
|
|
|
|
LabelWhitelist lookupMap
|
|
|
|
AnnotationWhitelist lookupMap
|
|
|
|
Nodes map[string]nodeConfig
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type nodeConfig struct {
|
2020-08-26 07:22:46 +00:00
|
|
|
nameRe *regexp.Regexp
|
2020-02-21 14:16:37 +00:00
|
|
|
ExpectedLabelValues map[string]string
|
|
|
|
ExpectedLabelKeys lookupMap
|
|
|
|
ExpectedAnnotationValues map[string]string
|
|
|
|
ExpectedAnnotationKeys lookupMap
|
|
|
|
}
|
|
|
|
|
|
|
|
type lookupMap map[string]struct{}
|
|
|
|
|
|
|
|
func (l *lookupMap) UnmarshalJSON(data []byte) error {
|
|
|
|
*l = lookupMap{}
|
|
|
|
slice := []string{}
|
|
|
|
|
|
|
|
err := yaml.Unmarshal(data, &slice)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, k := range slice {
|
|
|
|
(*l)[k] = struct{}{}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func readConfig() {
|
|
|
|
// Read and parse only once
|
|
|
|
if conf != nil || *e2eConfigFile == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Reading end-to-end test configuration file")
|
2020-02-21 14:16:37 +00:00
|
|
|
data, err := ioutil.ReadFile(*e2eConfigFile)
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Parsing end-to-end test configuration data")
|
2020-02-21 14:16:37 +00:00
|
|
|
err = yaml.Unmarshal(data, &conf)
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-08-26 07:22:46 +00:00
|
|
|
|
|
|
|
// Pre-compile node name matching regexps
|
|
|
|
for name, nodeConf := range conf.DefaultFeatures.Nodes {
|
|
|
|
nodeConf.nameRe, err = regexp.Compile(name)
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-08-26 07:22:46 +00:00
|
|
|
conf.DefaultFeatures.Nodes[name] = nodeConf
|
|
|
|
}
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
|
2022-01-12 13:09:42 +00:00
|
|
|
// waitForPodsReady waits for the pods to become ready.
|
|
|
|
// NOTE: copied from k8s v1.22 after which is was removed from there.
|
|
|
|
// Convenient for checking that all pods of a daemonset are ready.
|
|
|
|
func waitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
|
|
|
const poll = 2 * time.Second
|
|
|
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
|
|
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
|
|
|
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
|
|
|
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
|
|
|
if err != nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
for _, pod := range pods.Items {
|
|
|
|
if !podutils.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-25 20:19:57 +00:00
|
|
|
// Create required RBAC configuration
|
|
|
|
func configureRBAC(cs clientset.Interface, ns string) error {
|
|
|
|
_, err := createServiceAccount(cs, ns)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = createClusterRole(cs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = createClusterRoleBinding(cs, ns)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove RBAC configuration
|
|
|
|
func deconfigureRBAC(cs clientset.Interface, ns string) error {
|
2020-11-17 14:40:33 +00:00
|
|
|
err := cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
|
2019-04-25 20:19:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-11-17 14:40:33 +00:00
|
|
|
err = cs.RbacV1().ClusterRoles().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
|
2019-04-25 20:19:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-11-17 14:40:33 +00:00
|
|
|
err = cs.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
|
2019-04-25 20:19:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure service account required by NFD
|
|
|
|
func createServiceAccount(cs clientset.Interface, ns string) (*v1.ServiceAccount, error) {
|
|
|
|
sa := &v1.ServiceAccount{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "nfd-master-e2e",
|
|
|
|
Namespace: ns,
|
|
|
|
},
|
|
|
|
}
|
2020-11-17 14:40:33 +00:00
|
|
|
return cs.CoreV1().ServiceAccounts(ns).Create(context.TODO(), sa, metav1.CreateOptions{})
|
2019-04-25 20:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Configure cluster role required by NFD
|
|
|
|
func createClusterRole(cs clientset.Interface) (*rbacv1.ClusterRole, error) {
|
|
|
|
cr := &rbacv1.ClusterRole{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "nfd-master-e2e",
|
|
|
|
},
|
|
|
|
Rules: []rbacv1.PolicyRule{
|
|
|
|
{
|
|
|
|
APIGroups: []string{""},
|
|
|
|
Resources: []string{"nodes"},
|
|
|
|
Verbs: []string{"get", "patch", "update"},
|
|
|
|
},
|
2021-02-25 15:43:59 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if *openShift {
|
|
|
|
cr.Rules = append(cr.Rules,
|
|
|
|
rbacv1.PolicyRule{
|
2021-01-20 23:48:40 +00:00
|
|
|
// needed on OpenShift clusters
|
|
|
|
APIGroups: []string{"security.openshift.io"},
|
|
|
|
Resources: []string{"securitycontextconstraints"},
|
|
|
|
ResourceNames: []string{"hostaccess"},
|
|
|
|
Verbs: []string{"use"},
|
2021-02-25 15:43:59 +00:00
|
|
|
})
|
2019-04-25 20:19:57 +00:00
|
|
|
}
|
2020-11-17 14:40:33 +00:00
|
|
|
return cs.RbacV1().ClusterRoles().Update(context.TODO(), cr, metav1.UpdateOptions{})
|
2019-04-25 20:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Configure cluster role binding required by NFD
|
|
|
|
func createClusterRoleBinding(cs clientset.Interface, ns string) (*rbacv1.ClusterRoleBinding, error) {
|
|
|
|
crb := &rbacv1.ClusterRoleBinding{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "nfd-master-e2e",
|
|
|
|
},
|
|
|
|
Subjects: []rbacv1.Subject{
|
|
|
|
{
|
|
|
|
Kind: rbacv1.ServiceAccountKind,
|
|
|
|
Name: "nfd-master-e2e",
|
|
|
|
Namespace: ns,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
RoleRef: rbacv1.RoleRef{
|
|
|
|
APIGroup: rbacv1.GroupName,
|
|
|
|
Kind: "ClusterRole",
|
|
|
|
Name: "nfd-master-e2e",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-11-17 14:40:33 +00:00
|
|
|
return cs.RbacV1().ClusterRoleBindings().Update(context.TODO(), crb, metav1.UpdateOptions{})
|
2019-04-25 20:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// createService creates nfd-master Service
|
|
|
|
func createService(cs clientset.Interface, ns string) (*v1.Service, error) {
|
|
|
|
svc := &v1.Service{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "nfd-master-e2e",
|
|
|
|
},
|
|
|
|
Spec: v1.ServiceSpec{
|
2020-02-21 14:16:37 +00:00
|
|
|
Selector: map[string]string{"name": "nfd-master-e2e"},
|
2019-04-25 20:19:57 +00:00
|
|
|
Ports: []v1.ServicePort{
|
|
|
|
{
|
|
|
|
Protocol: v1.ProtocolTCP,
|
|
|
|
Port: 8080,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Type: v1.ServiceTypeClusterIP,
|
|
|
|
},
|
|
|
|
}
|
2020-11-17 14:40:33 +00:00
|
|
|
return cs.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{})
|
2019-04-25 20:19:57 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
func nfdMasterPod(image string, onMasterNode bool) *v1.Pod {
|
2019-04-25 20:19:57 +00:00
|
|
|
p := &v1.Pod{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "nfd-master-" + string(uuid.NewUUID()),
|
2020-02-21 14:16:37 +00:00
|
|
|
Labels: map[string]string{"name": "nfd-master-e2e"},
|
2019-04-25 20:19:57 +00:00
|
|
|
},
|
|
|
|
Spec: v1.PodSpec{
|
|
|
|
Containers: []v1.Container{
|
|
|
|
{
|
|
|
|
Name: "node-feature-discovery",
|
|
|
|
Image: image,
|
|
|
|
ImagePullPolicy: v1.PullAlways,
|
|
|
|
Command: []string{"nfd-master"},
|
|
|
|
Env: []v1.EnvVar{
|
|
|
|
{
|
|
|
|
Name: "NODE_NAME",
|
|
|
|
ValueFrom: &v1.EnvVarSource{
|
|
|
|
FieldRef: &v1.ObjectFieldSelector{
|
|
|
|
FieldPath: "spec.nodeName",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ServiceAccountName: "nfd-master-e2e",
|
|
|
|
RestartPolicy: v1.RestartPolicyNever,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if onMasterNode {
|
|
|
|
p.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
|
|
|
|
p.Spec.Tolerations = []v1.Toleration{
|
|
|
|
{
|
|
|
|
Key: "node-role.kubernetes.io/master",
|
|
|
|
Operator: v1.TolerationOpEqual,
|
|
|
|
Value: "",
|
|
|
|
Effect: v1.TaintEffectNoSchedule,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
func nfdWorkerPod(image string, extraArgs []string) *v1.Pod {
|
|
|
|
p := &v1.Pod{
|
2019-04-25 20:19:57 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "nfd-worker-" + string(uuid.NewUUID()),
|
|
|
|
},
|
2020-02-21 14:16:37 +00:00
|
|
|
Spec: nfdWorkerPodSpec(image, extraArgs),
|
|
|
|
}
|
|
|
|
|
|
|
|
p.Spec.RestartPolicy = v1.RestartPolicyNever
|
|
|
|
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
|
|
|
func nfdWorkerDaemonSet(image string, extraArgs []string) *appsv1.DaemonSet {
|
|
|
|
return &appsv1.DaemonSet{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "nfd-worker-" + string(uuid.NewUUID()),
|
|
|
|
},
|
|
|
|
Spec: appsv1.DaemonSetSpec{
|
|
|
|
Selector: &metav1.LabelSelector{
|
|
|
|
MatchLabels: map[string]string{"name": "nfd-worker"},
|
|
|
|
},
|
|
|
|
Template: v1.PodTemplateSpec{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: map[string]string{"name": "nfd-worker"},
|
|
|
|
},
|
|
|
|
Spec: nfdWorkerPodSpec(image, extraArgs),
|
|
|
|
},
|
|
|
|
MinReadySeconds: 5,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func nfdWorkerPodSpec(image string, extraArgs []string) v1.PodSpec {
|
|
|
|
return v1.PodSpec{
|
|
|
|
Containers: []v1.Container{
|
|
|
|
{
|
|
|
|
Name: "node-feature-discovery",
|
|
|
|
Image: image,
|
|
|
|
ImagePullPolicy: v1.PullAlways,
|
|
|
|
Command: []string{"nfd-worker"},
|
2021-12-03 08:18:57 +00:00
|
|
|
Args: append([]string{"-server=nfd-master-e2e:8080"}, extraArgs...),
|
2020-02-21 14:16:37 +00:00
|
|
|
Env: []v1.EnvVar{
|
|
|
|
{
|
|
|
|
Name: "NODE_NAME",
|
|
|
|
ValueFrom: &v1.EnvVarSource{
|
|
|
|
FieldRef: &v1.ObjectFieldSelector{
|
|
|
|
FieldPath: "spec.nodeName",
|
2019-04-25 20:19:57 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-02-21 14:16:37 +00:00
|
|
|
VolumeMounts: []v1.VolumeMount{
|
2021-11-23 08:50:20 +00:00
|
|
|
{
|
|
|
|
Name: "host-boot",
|
|
|
|
MountPath: "/host-boot",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
2020-02-21 14:16:37 +00:00
|
|
|
{
|
|
|
|
Name: "host-os-release",
|
|
|
|
MountPath: "/host-etc/os-release",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "host-sys",
|
|
|
|
MountPath: "/host-sys",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
2021-04-19 11:41:37 +00:00
|
|
|
{
|
|
|
|
Name: "host-usr-lib",
|
|
|
|
MountPath: "/host-usr/lib",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "host-usr-src",
|
|
|
|
MountPath: "/host-usr/src",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
2020-02-21 14:16:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ServiceAccountName: "nfd-master-e2e",
|
|
|
|
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
|
|
|
Volumes: []v1.Volume{
|
2021-11-23 08:50:20 +00:00
|
|
|
{
|
|
|
|
Name: "host-boot",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
HostPath: &v1.HostPathVolumeSource{
|
|
|
|
Path: "/boot",
|
|
|
|
Type: newHostPathType(v1.HostPathDirectory),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-02-21 14:16:37 +00:00
|
|
|
{
|
|
|
|
Name: "host-os-release",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
HostPath: &v1.HostPathVolumeSource{
|
|
|
|
Path: "/etc/os-release",
|
|
|
|
Type: newHostPathType(v1.HostPathFile),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "host-sys",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
HostPath: &v1.HostPathVolumeSource{
|
|
|
|
Path: "/sys",
|
|
|
|
Type: newHostPathType(v1.HostPathDirectory),
|
|
|
|
},
|
|
|
|
},
|
2019-04-25 20:19:57 +00:00
|
|
|
},
|
2021-04-19 11:41:37 +00:00
|
|
|
{
|
|
|
|
Name: "host-usr-lib",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
HostPath: &v1.HostPathVolumeSource{
|
|
|
|
Path: "/usr/lib",
|
|
|
|
Type: newHostPathType(v1.HostPathDirectory),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "host-usr-src",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
HostPath: &v1.HostPathVolumeSource{
|
|
|
|
Path: "/usr/src",
|
|
|
|
Type: newHostPathType(v1.HostPathDirectory),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-25 20:19:57 +00:00
|
|
|
},
|
|
|
|
}
|
2020-02-21 14:16:37 +00:00
|
|
|
|
2019-04-25 20:19:57 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
func newHostPathType(typ v1.HostPathType) *v1.HostPathType {
|
|
|
|
hostPathType := new(v1.HostPathType)
|
|
|
|
*hostPathType = v1.HostPathType(typ)
|
|
|
|
return hostPathType
|
|
|
|
}
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
// cleanupNode deletes all NFD-related metadata from the Node object, i.e.
|
|
|
|
// labels and annotations
|
|
|
|
func cleanupNode(cs clientset.Interface) {
|
2020-11-17 14:40:33 +00:00
|
|
|
nodeList, err := cs.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
|
|
|
for _, n := range nodeList.Items {
|
|
|
|
var err error
|
|
|
|
var node *v1.Node
|
|
|
|
for retry := 0; retry < 5; retry++ {
|
2020-11-17 14:40:33 +00:00
|
|
|
node, err = cs.CoreV1().Nodes().Get(context.TODO(), n.Name, metav1.GetOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
update := false
|
|
|
|
// Remove labels
|
|
|
|
for key := range node.Labels {
|
2021-07-06 11:58:08 +00:00
|
|
|
if strings.HasPrefix(key, master.FeatureLabelNs) {
|
2020-02-21 14:16:37 +00:00
|
|
|
delete(node.Labels, key)
|
|
|
|
update = true
|
|
|
|
}
|
|
|
|
}
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
// Remove annotations
|
|
|
|
for key := range node.Annotations {
|
2021-02-03 17:49:02 +00:00
|
|
|
if strings.HasPrefix(key, master.AnnotationNsBase) {
|
2020-02-21 14:16:37 +00:00
|
|
|
delete(node.Annotations, key)
|
|
|
|
update = true
|
|
|
|
}
|
2019-04-25 20:19:57 +00:00
|
|
|
}
|
|
|
|
|
2020-05-19 12:26:25 +00:00
|
|
|
if !update {
|
2020-02-21 14:16:37 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Deleting NFD labels and annotations from node " + node.Name)
|
2020-11-17 14:40:33 +00:00
|
|
|
_, err = cs.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
2020-02-21 14:16:37 +00:00
|
|
|
if err != nil {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Actual test suite
|
2021-07-06 08:02:28 +00:00
|
|
|
var _ = SIGDescribe("Node Feature Discovery", func() {
|
2020-02-21 14:16:37 +00:00
|
|
|
f := framework.NewDefaultFramework("node-feature-discovery")
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
Context("when deploying a single nfd-master pod", func() {
|
2020-02-21 14:16:37 +00:00
|
|
|
var masterPod *v1.Pod
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
BeforeEach(func() {
|
2020-02-21 14:16:37 +00:00
|
|
|
err := configureRBAC(f.ClientSet, f.Namespace.Name)
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2019-04-25 20:19:57 +00:00
|
|
|
|
|
|
|
// Launch nfd-master
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Creating nfd master pod and nfd-master service")
|
2020-02-21 14:16:37 +00:00
|
|
|
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
|
|
|
masterPod = nfdMasterPod(image, false)
|
2020-11-17 14:40:33 +00:00
|
|
|
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), masterPod, metav1.CreateOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2019-04-25 20:19:57 +00:00
|
|
|
|
|
|
|
// Create nfd-master service
|
|
|
|
nfdSvc, err := createService(f.ClientSet, f.Namespace.Name)
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Waiting for the nfd-master pod to be running")
|
|
|
|
Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(HaveOccurred())
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Waiting for the nfd-master service to be up")
|
|
|
|
Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
})
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
AfterEach(func() {
|
2020-02-21 14:16:37 +00:00
|
|
|
err := deconfigureRBAC(f.ClientSet, f.Namespace.Name)
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
})
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
//
|
|
|
|
// Simple test with only the fake source enabled
|
|
|
|
//
|
2021-01-20 23:48:40 +00:00
|
|
|
Context("and a single worker pod with fake source enabled", func() {
|
|
|
|
It("it should decorate the node with the fake feature labels", func() {
|
2020-02-21 14:16:37 +00:00
|
|
|
|
|
|
|
fakeFeatureLabels := map[string]string{
|
2021-07-06 11:58:08 +00:00
|
|
|
master.FeatureLabelNs + "/fake-fakefeature1": "true",
|
|
|
|
master.FeatureLabelNs + "/fake-fakefeature2": "true",
|
|
|
|
master.FeatureLabelNs + "/fake-fakefeature3": "true",
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove pre-existing stale annotations and labels
|
|
|
|
cleanupNode(f.ClientSet)
|
|
|
|
|
|
|
|
// Launch nfd-worker
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Creating a nfd worker pod")
|
2020-02-21 14:16:37 +00:00
|
|
|
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
2021-12-03 08:18:57 +00:00
|
|
|
workerPod := nfdWorkerPod(image, []string{"-oneshot", "-label-sources=fake"})
|
2020-11-17 14:40:33 +00:00
|
|
|
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Waiting for the nfd-worker pod to succeed")
|
|
|
|
Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, f.Namespace.Name)).NotTo(HaveOccurred())
|
2020-11-17 14:40:33 +00:00
|
|
|
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), workerPod.ObjectMeta.Name, metav1.GetOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
|
2020-11-17 14:40:33 +00:00
|
|
|
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), workerPod.Spec.NodeName, metav1.GetOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
for k, v := range fakeFeatureLabels {
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(node.Labels[k]).To(Equal(v))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that there are no unexpected NFD labels
|
|
|
|
for k := range node.Labels {
|
2021-07-06 11:58:08 +00:00
|
|
|
if strings.HasPrefix(k, master.FeatureLabelNs) {
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(fakeFeatureLabels).Should(HaveKey(k))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Deleting the node-feature-discovery worker pod")
|
2020-11-17 14:40:33 +00:00
|
|
|
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), workerPod.ObjectMeta.Name, metav1.DeleteOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
|
|
|
cleanupNode(f.ClientSet)
|
|
|
|
})
|
|
|
|
})
|
2019-04-25 20:19:57 +00:00
|
|
|
|
2020-02-21 14:16:37 +00:00
|
|
|
//
|
|
|
|
// More comprehensive test when --e2e-node-config is enabled
|
|
|
|
//
|
2021-01-20 23:48:40 +00:00
|
|
|
Context("and nfd-workers as a daemonset with default sources enabled", func() {
|
|
|
|
It("the node labels and annotations listed in the e2e config should be present", func() {
|
2020-02-21 14:16:37 +00:00
|
|
|
readConfig()
|
|
|
|
if conf == nil {
|
2021-01-20 23:48:40 +00:00
|
|
|
Skip("no e2e-config was specified")
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
if conf.DefaultFeatures == nil {
|
2021-01-20 23:48:40 +00:00
|
|
|
Skip("no 'defaultFeatures' specified in e2e-config")
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
fConf := conf.DefaultFeatures
|
|
|
|
|
|
|
|
// Remove pre-existing stale annotations and labels
|
|
|
|
cleanupNode(f.ClientSet)
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Creating nfd-worker daemonset")
|
2020-02-21 14:16:37 +00:00
|
|
|
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
2020-11-17 14:40:33 +00:00
|
|
|
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Waiting for daemonset pods to be ready")
|
2022-01-12 13:09:42 +00:00
|
|
|
Expect(waitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Getting node objects")
|
2020-11-17 14:40:33 +00:00
|
|
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
|
|
|
for _, node := range nodeList.Items {
|
2020-08-26 07:22:46 +00:00
|
|
|
var nodeConf *nodeConfig
|
|
|
|
for _, conf := range fConf.Nodes {
|
|
|
|
if conf.nameRe.MatchString(node.Name) {
|
|
|
|
e2elog.Logf("node %q matches rule %q", node.Name, conf.nameRe)
|
|
|
|
nodeConf = &conf
|
2020-11-24 09:34:46 +00:00
|
|
|
break
|
2020-08-26 07:22:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if nodeConf == nil {
|
|
|
|
e2elog.Logf("node %q has no matching rule in e2e-config, skipping...", node.Name)
|
2020-02-21 14:16:37 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check labels
|
2020-05-25 12:33:06 +00:00
|
|
|
e2elog.Logf("verifying labels of node %q...", node.Name)
|
2020-02-21 14:16:37 +00:00
|
|
|
for k, v := range nodeConf.ExpectedLabelValues {
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(node.Labels).To(HaveKeyWithValue(k, v))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
for k := range nodeConf.ExpectedLabelKeys {
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(node.Labels).To(HaveKey(k))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
for k := range node.Labels {
|
2021-07-06 11:58:08 +00:00
|
|
|
if strings.HasPrefix(k, master.FeatureLabelNs) {
|
2020-02-21 14:16:37 +00:00
|
|
|
if _, ok := nodeConf.ExpectedLabelValues[k]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := nodeConf.ExpectedLabelKeys[k]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Ignore if the label key was not whitelisted
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(fConf.LabelWhitelist).NotTo(HaveKey(k))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check annotations
|
2020-05-25 12:33:06 +00:00
|
|
|
e2elog.Logf("verifying annotations of node %q...", node.Name)
|
2020-02-21 14:16:37 +00:00
|
|
|
for k, v := range nodeConf.ExpectedAnnotationValues {
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(node.Annotations).To(HaveKeyWithValue(k, v))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
for k := range nodeConf.ExpectedAnnotationKeys {
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(node.Annotations).To(HaveKey(k))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
for k := range node.Annotations {
|
2021-02-03 17:49:02 +00:00
|
|
|
if strings.HasPrefix(k, master.AnnotationNsBase) {
|
2020-02-21 14:16:37 +00:00
|
|
|
if _, ok := nodeConf.ExpectedAnnotationValues[k]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := nodeConf.ExpectedAnnotationKeys[k]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Ignore if the annotation was not whitelisted
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(fConf.AnnotationWhitelist).NotTo(HaveKey(k))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node running nfd-master should have master version annotation
|
|
|
|
if node.Name == masterPod.Spec.NodeName {
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(node.Annotations).To(HaveKey(master.AnnotationNsBase + "master.version"))
|
2020-02-21 14:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 23:48:40 +00:00
|
|
|
By("Deleting nfd-worker daemonset")
|
2020-11-17 14:40:33 +00:00
|
|
|
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.ObjectMeta.Name, metav1.DeleteOptions{})
|
2021-01-20 23:48:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2020-02-21 14:16:37 +00:00
|
|
|
|
|
|
|
cleanupNode(f.ClientSet)
|
|
|
|
})
|
2019-04-25 14:41:26 +00:00
|
|
|
})
|
2021-01-20 23:48:40 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Test custom nodename source configured in 2 additional ConfigMaps
|
|
|
|
//
|
|
|
|
Context("and nfd-workers as a daemonset with 2 additional configmaps for the custom source configured", func() {
|
|
|
|
It("the nodename matching features listed in the configmaps should be present", func() {
|
|
|
|
// Remove pre-existing stale annotations and labels
|
|
|
|
cleanupNode(f.ClientSet)
|
|
|
|
|
|
|
|
By("Getting a worker node")
|
|
|
|
|
|
|
|
// We need a valid nodename for the configmap
|
|
|
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
Expect(len(nodeList.Items)).ToNot(BeZero())
|
|
|
|
|
2021-11-10 18:33:55 +00:00
|
|
|
targetNodeName := nodeList.Items[0].Name
|
2021-01-20 23:48:40 +00:00
|
|
|
for _, node := range nodeList.Items {
|
|
|
|
if _, ok := node.Labels["node-role.kubernetes.io/master"]; !ok {
|
|
|
|
targetNodeName = node.Name
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Expect(targetNodeName).ToNot(BeEmpty(), "No worker node found")
|
|
|
|
|
|
|
|
// create a wildcard name as well for this node
|
|
|
|
targetNodeNameWildcard := fmt.Sprintf("%s.*%s", targetNodeName[:2], targetNodeName[4:])
|
|
|
|
|
|
|
|
By("Creating the configmaps")
|
|
|
|
targetLabelName := "nodename-test"
|
|
|
|
targetLabelValue := "true"
|
|
|
|
|
|
|
|
targetLabelNameWildcard := "nodename-test-wildcard"
|
|
|
|
targetLabelValueWildcard := "customValue"
|
|
|
|
|
|
|
|
targetLabelNameNegative := "nodename-test-negative"
|
|
|
|
|
|
|
|
// create 2 configmaps
|
|
|
|
data1 := make(map[string]string)
|
|
|
|
data1["custom1.conf"] = `
|
|
|
|
- name: ` + targetLabelName + `
|
|
|
|
matchOn:
|
|
|
|
# default value is true
|
|
|
|
- nodename:
|
|
|
|
- ` + targetNodeName
|
|
|
|
|
|
|
|
cm1 := &v1.ConfigMap{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "custom-config-extra-" + string(uuid.NewUUID()),
|
|
|
|
},
|
|
|
|
Data: data1,
|
|
|
|
}
|
|
|
|
cm1, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm1, metav1.CreateOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
data2 := make(map[string]string)
|
|
|
|
data2["custom1.conf"] = `
|
|
|
|
- name: ` + targetLabelNameWildcard + `
|
|
|
|
value: ` + targetLabelValueWildcard + `
|
|
|
|
matchOn:
|
|
|
|
- nodename:
|
|
|
|
- ` + targetNodeNameWildcard + `
|
|
|
|
- name: ` + targetLabelNameNegative + `
|
|
|
|
matchOn:
|
|
|
|
- nodename:
|
|
|
|
- "thisNameShouldNeverMatch"`
|
|
|
|
|
|
|
|
cm2 := &v1.ConfigMap{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: "custom-config-extra-" + string(uuid.NewUUID()),
|
|
|
|
},
|
|
|
|
Data: data2,
|
|
|
|
}
|
|
|
|
cm2, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm2, metav1.CreateOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Creating nfd-worker daemonset with configmap mounted")
|
|
|
|
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
|
|
|
|
|
|
|
// add configmap mount config
|
|
|
|
volumeName1 := "custom-configs-extra1"
|
|
|
|
volumeName2 := "custom-configs-extra2"
|
|
|
|
workerDS.Spec.Template.Spec.Volumes = append(workerDS.Spec.Template.Spec.Volumes,
|
|
|
|
v1.Volume{
|
|
|
|
Name: volumeName1,
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
|
|
|
LocalObjectReference: v1.LocalObjectReference{
|
|
|
|
Name: cm1.Name,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
v1.Volume{
|
|
|
|
Name: volumeName2,
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
|
|
|
LocalObjectReference: v1.LocalObjectReference{
|
|
|
|
Name: cm2.Name,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
workerDS.Spec.Template.Spec.Containers[0].VolumeMounts = append(workerDS.Spec.Template.Spec.Containers[0].VolumeMounts,
|
|
|
|
v1.VolumeMount{
|
|
|
|
Name: volumeName1,
|
|
|
|
ReadOnly: true,
|
|
|
|
MountPath: filepath.Join(custom.Directory, "cm1"),
|
|
|
|
},
|
|
|
|
v1.VolumeMount{
|
|
|
|
Name: volumeName2,
|
|
|
|
ReadOnly: true,
|
|
|
|
MountPath: filepath.Join(custom.Directory, "cm2"),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Waiting for daemonset pods to be ready")
|
2022-01-12 13:09:42 +00:00
|
|
|
Expect(waitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
2021-01-20 23:48:40 +00:00
|
|
|
|
|
|
|
By("Getting target node and checking labels")
|
|
|
|
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), targetNodeName, metav1.GetOptions{})
|
|
|
|
Expect(err).ToNot(HaveOccurred())
|
|
|
|
|
|
|
|
labelFound := false
|
|
|
|
labelWildcardFound := false
|
|
|
|
labelNegativeFound := false
|
|
|
|
for k := range targetNode.Labels {
|
|
|
|
if strings.Contains(k, targetLabelName) {
|
|
|
|
if targetNode.Labels[k] == targetLabelValue {
|
|
|
|
labelFound = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if strings.Contains(k, targetLabelNameWildcard) {
|
|
|
|
if targetNode.Labels[k] == targetLabelValueWildcard {
|
|
|
|
labelWildcardFound = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if strings.Contains(k, targetLabelNameNegative) {
|
|
|
|
labelNegativeFound = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Expect(labelFound).To(BeTrue(), "label not found!")
|
|
|
|
Expect(labelWildcardFound).To(BeTrue(), "label for wildcard nodename not found!")
|
|
|
|
Expect(labelNegativeFound).To(BeFalse(), "label for not existing nodename found!")
|
|
|
|
|
|
|
|
By("Deleting nfd-worker daemonset")
|
|
|
|
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.ObjectMeta.Name, metav1.DeleteOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
cleanupNode(f.ClientSet)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
2019-04-25 14:41:26 +00:00
|
|
|
})
|
2020-02-21 14:16:37 +00:00
|
|
|
|
2019-04-25 14:41:26 +00:00
|
|
|
})
|