mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2025-03-13 20:30:03 +00:00
test/e2e: more flexible pod spec generation
Change the pod spec generator functions to accept parameterization in the form of more generic "mutator functions". This makes the addition of new test specific pod spec customizations a lot cleaner. Plus, hopefully makes the code a bit more readable as well. Also, slightly simplify the SpecWithConfigMap() but dropping one redundant argument. Inspired by latest contributions by Talor Itzhak (titzhak@redhat.com).
This commit is contained in:
parent
592d6c67d0
commit
1719ce6736
3 changed files with 96 additions and 88 deletions
|
@ -112,8 +112,8 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
|
||||
// Launch nfd-master
|
||||
By("Creating nfd master pod and nfd-master service")
|
||||
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
||||
masterPod = f.PodClient().CreateSync(testutils.NFDMasterPod(image, false))
|
||||
imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
|
||||
masterPod = f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
|
||||
|
||||
// Create nfd-master service
|
||||
nfdSvc, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
|
||||
|
@ -155,8 +155,11 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
|
||||
// Launch nfd-worker
|
||||
By("Creating a nfd worker pod")
|
||||
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
||||
workerPod := testutils.NFDWorkerPod(image, []string{"-oneshot", "-label-sources=fake"})
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
|
||||
}
|
||||
workerPod := testutils.NFDWorkerPod(podSpecOpts...)
|
||||
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -202,7 +205,8 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
fConf := cfg.DefaultFeatures
|
||||
|
||||
By("Creating nfd-worker daemonset")
|
||||
workerDS := testutils.NFDWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
||||
podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
|
||||
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
|
||||
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -343,45 +347,12 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating nfd-worker daemonset with configmap mounted")
|
||||
workerDS := testutils.NFDWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
||||
|
||||
// add configmap mount config
|
||||
volumeName1 := "custom-configs-extra1"
|
||||
volumeName2 := "custom-configs-extra2"
|
||||
workerDS.Spec.Template.Spec.Volumes = append(workerDS.Spec.Template.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: volumeName1,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: cm1.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
corev1.Volume{
|
||||
Name: volumeName2,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: cm2.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
workerDS.Spec.Template.Spec.Containers[0].VolumeMounts = append(workerDS.Spec.Template.Spec.Containers[0].VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: volumeName1,
|
||||
ReadOnly: true,
|
||||
MountPath: filepath.Join(custom.Directory, "cm1"),
|
||||
},
|
||||
corev1.VolumeMount{
|
||||
Name: volumeName2,
|
||||
ReadOnly: true,
|
||||
MountPath: filepath.Join(custom.Directory, "cm2"),
|
||||
},
|
||||
)
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")),
|
||||
testutils.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")),
|
||||
}
|
||||
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
|
||||
|
||||
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -449,8 +420,11 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
|
||||
It("custom labels from the NodeFeatureRule rules should be created", func() {
|
||||
By("Creating nfd-worker daemonset")
|
||||
workerArgs := []string{"-feature-sources=fake", "-label-sources=", "-sleep-interval=1s"}
|
||||
workerDS := testutils.NFDWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), workerArgs)
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithContainerExtraArgs("-feature-sources=fake", "-label-sources=", "-sleep-interval=1s"),
|
||||
}
|
||||
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
|
||||
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
|
|
@ -71,8 +71,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
|
||||
Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
|
||||
|
||||
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
||||
f.PodClient().CreateSync(testutils.NFDMasterPod(image, false))
|
||||
imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
|
||||
f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
|
||||
|
||||
// Create nfd-master service
|
||||
masterService, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
|
||||
|
@ -119,7 +119,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
kcfg := cfg.GetKubeletConfig()
|
||||
By(fmt.Sprintf("Using config (%#v)", kcfg))
|
||||
|
||||
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
||||
podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
|
||||
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
|
||||
})
|
||||
|
||||
It("should fill the node resource topologies CR with the data", func() {
|
||||
|
@ -282,8 +283,11 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
kcfg := cfg.GetKubeletConfig()
|
||||
By(fmt.Sprintf("Using config (%#v)", kcfg))
|
||||
|
||||
opts := testutils.SpecWithConfigMap(cm.Name, cm.Name, "/etc/kubernetes/node-feature-discovery")
|
||||
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{}, opts)
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
|
||||
}
|
||||
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
|
||||
})
|
||||
|
||||
It("noderesourcetopology should not advertise the memory resource", func() {
|
||||
|
|
|
@ -111,8 +111,10 @@ func DeletePodSyncByName(f *framework.Framework, podName string) {
|
|||
f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
type PodSpecOption func(spec *corev1.PodSpec)
|
||||
|
||||
// NFDMasterPod provide NFD master pod definition
|
||||
func NFDMasterPod(image string, onMasterNode bool) *corev1.Pod {
|
||||
func NFDMasterPod(opts ...PodSpecOption) *corev1.Pod {
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nfd-master-",
|
||||
|
@ -122,7 +124,6 @@ func NFDMasterPod(image string, onMasterNode bool) *corev1.Pod {
|
|||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "node-feature-discovery",
|
||||
Image: image,
|
||||
ImagePullPolicy: pullPolicy(),
|
||||
Command: []string{"nfd-master"},
|
||||
Env: []corev1.EnvVar{
|
||||
|
@ -141,27 +142,20 @@ func NFDMasterPod(image string, onMasterNode bool) *corev1.Pod {
|
|||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
if onMasterNode {
|
||||
p.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
|
||||
p.Spec.Tolerations = []corev1.Toleration{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: corev1.TolerationOpEqual,
|
||||
Value: "",
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&p.Spec)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// NFDWorkerPod provides NFD worker pod definition
|
||||
func NFDWorkerPod(image string, extraArgs []string) *corev1.Pod {
|
||||
func NFDWorkerPod(opts ...PodSpecOption) *corev1.Pod {
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nfd-worker-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: *nfdWorkerPodSpec(image, extraArgs),
|
||||
Spec: *nfdWorkerPodSpec(opts...),
|
||||
}
|
||||
|
||||
p.Spec.RestartPolicy = corev1.RestartPolicyNever
|
||||
|
@ -170,29 +164,58 @@ func NFDWorkerPod(image string, extraArgs []string) *corev1.Pod {
|
|||
}
|
||||
|
||||
// NFDWorkerDaemonSet provides the NFD daemon set worker definition
|
||||
func NFDWorkerDaemonSet(image string, extraArgs []string) *appsv1.DaemonSet {
|
||||
podSpec := nfdWorkerPodSpec(image, extraArgs)
|
||||
return newDaemonSet("nfd-worker", podSpec)
|
||||
func NFDWorkerDaemonSet(opts ...PodSpecOption) *appsv1.DaemonSet {
|
||||
return newDaemonSet("nfd-worker", nfdWorkerPodSpec(opts...))
|
||||
}
|
||||
|
||||
// NFDTopologyUpdaterDaemonSet provides the NFD daemon set topology updater
|
||||
func NFDTopologyUpdaterDaemonSet(kc KubeletConfig, image string, extraArgs []string, options ...func(spec *corev1.PodSpec)) *appsv1.DaemonSet {
|
||||
podSpec := nfdTopologyUpdaterPodSpec(kc, image, extraArgs)
|
||||
for _, o := range options {
|
||||
o(podSpec)
|
||||
}
|
||||
return newDaemonSet("nfd-topology-updater", podSpec)
|
||||
func NFDTopologyUpdaterDaemonSet(kc KubeletConfig, opts ...PodSpecOption) *appsv1.DaemonSet {
|
||||
return newDaemonSet("nfd-topology-updater", nfdTopologyUpdaterPodSpec(kc, opts...))
|
||||
}
|
||||
|
||||
func SpecWithConfigMap(cmName, volumeName, mountPath string) func(spec *corev1.PodSpec) {
|
||||
// SpecWithContainerImage returns a PodSpecOption that sets the image used by the first container.
|
||||
func SpecWithContainerImage(image string) PodSpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
// NOTE: we might want to make the container number a parameter
|
||||
cnt := &spec.Containers[0]
|
||||
cnt.Image = image
|
||||
}
|
||||
}
|
||||
|
||||
// SpecWithContainerExtraArgs returns a PodSpecOption that adds extra args to the first container.
|
||||
func SpecWithContainerExtraArgs(args ...string) PodSpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
// NOTE: we might want to make the container number a parameter
|
||||
cnt := &spec.Containers[0]
|
||||
cnt.Args = append(cnt.Args, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// SpecWithMasterNodeSelector returns a PodSpecOption that modifies the pod to
|
||||
// be run on a control plane node of the cluster.
|
||||
func SpecWithMasterNodeSelector(args ...string) PodSpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
spec.NodeSelector["node-role.kubernetes.io/control-plane"] = ""
|
||||
spec.Tolerations = append(spec.Tolerations,
|
||||
corev1.Toleration{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Operator: corev1.TolerationOpEqual,
|
||||
Value: "",
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// SpecWithConfigMap returns a PodSpecOption that mounts a configmap to the first container.
|
||||
func SpecWithConfigMap(name, mountPath string) PodSpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
spec.Volumes = append(spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: volumeName,
|
||||
Name: name,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: cmName,
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -200,7 +223,7 @@ func SpecWithConfigMap(cmName, volumeName, mountPath string) func(spec *corev1.P
|
|||
cnt := &spec.Containers[0]
|
||||
cnt.VolumeMounts = append(cnt.VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: volumeName,
|
||||
Name: name,
|
||||
ReadOnly: true,
|
||||
MountPath: mountPath,
|
||||
})
|
||||
|
@ -228,17 +251,16 @@ func newDaemonSet(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet {
|
|||
}
|
||||
}
|
||||
|
||||
func nfdWorkerPodSpec(image string, extraArgs []string) *corev1.PodSpec {
|
||||
func nfdWorkerPodSpec(opts ...PodSpecOption) *corev1.PodSpec {
|
||||
yes := true
|
||||
no := false
|
||||
return &corev1.PodSpec{
|
||||
p := &corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "node-feature-discovery",
|
||||
Image: image,
|
||||
ImagePullPolicy: pullPolicy(),
|
||||
Command: []string{"nfd-worker"},
|
||||
Args: append([]string{"-server=nfd-master-e2e:8080"}, extraArgs...),
|
||||
Args: []string{"-server=nfd-master-e2e:8080"},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
|
@ -337,23 +359,26 @@ func nfdWorkerPodSpec(image string, extraArgs []string) *corev1.PodSpec {
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func nfdTopologyUpdaterPodSpec(kc KubeletConfig, image string, extraArgs []string) *corev1.PodSpec {
|
||||
return &corev1.PodSpec{
|
||||
func nfdTopologyUpdaterPodSpec(kc KubeletConfig, opts ...PodSpecOption) *corev1.PodSpec {
|
||||
p := &corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "node-topology-updater",
|
||||
Image: image,
|
||||
ImagePullPolicy: pullPolicy(),
|
||||
Command: []string{"nfd-topology-updater"},
|
||||
Args: append([]string{
|
||||
Args: []string{
|
||||
"--kubelet-config-uri=file:///podresources/config.yaml",
|
||||
"--podresources-socket=unix:///podresources/kubelet.sock",
|
||||
"--sleep-interval=3s",
|
||||
"--watch-namespace=rte",
|
||||
"--server=nfd-master-e2e:8080",
|
||||
}, extraArgs...),
|
||||
"--server=nfd-master-e2e:8080"},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
|
@ -420,6 +445,11 @@ func nfdTopologyUpdaterPodSpec(kc KubeletConfig, image string, extraArgs []strin
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func newHostPathType(typ corev1.HostPathType) *corev1.HostPathType {
|
||||
|
|
Loading…
Add table
Reference in a new issue