mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2025-03-31 04:04:51 +00:00
Merge pull request #967 from k8stopologyawareschedwg/move_pod_utils
e2e: move pod utils to a seperate package
This commit is contained in:
commit
c22604ebb8
4 changed files with 154 additions and 108 deletions
|
@ -37,10 +37,13 @@ import (
|
|||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
nfdv1alpha1 "sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1"
|
||||
nfdclient "sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned"
|
||||
"sigs.k8s.io/node-feature-discovery/source/custom"
|
||||
testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
|
||||
testds "sigs.k8s.io/node-feature-discovery/test/e2e/utils/daemonset"
|
||||
testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -111,8 +114,8 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
|
||||
// Launch nfd-master
|
||||
By("Creating nfd master pod and nfd-master service")
|
||||
imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
|
||||
masterPod = f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
|
||||
imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
|
||||
masterPod = f.PodClient().CreateSync(testpod.NFDMaster(imageOpt))
|
||||
|
||||
// Create nfd-master service
|
||||
nfdSvc, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
|
||||
|
@ -154,11 +157,11 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
|
||||
// Launch nfd-worker
|
||||
By("Creating a nfd worker pod")
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
|
||||
podSpecOpts := []testpod.SpecOption{
|
||||
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testpod.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
|
||||
}
|
||||
workerPod := testutils.NFDWorkerPod(podSpecOpts...)
|
||||
workerPod := testpod.NFDWorker(podSpecOpts...)
|
||||
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
|
@ -204,13 +207,13 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
fConf := cfg.DefaultFeatures
|
||||
|
||||
By("Creating nfd-worker daemonset")
|
||||
podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
|
||||
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
|
||||
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
|
||||
workerDS := testds.NFDWorker(podSpecOpts...)
|
||||
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for daemonset pods to be ready")
|
||||
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
|
||||
By("Getting node objects")
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
|
@ -334,18 +337,18 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating nfd-worker daemonset with configmap mounted")
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")),
|
||||
testutils.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")),
|
||||
podSpecOpts := []testpod.SpecOption{
|
||||
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testpod.SpecWithConfigMap(cm1.Name, filepath.Join(custom.Directory, "cm1")),
|
||||
testpod.SpecWithConfigMap(cm2.Name, filepath.Join(custom.Directory, "cm2")),
|
||||
}
|
||||
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
|
||||
workerDS := testds.NFDWorker(podSpecOpts...)
|
||||
|
||||
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for daemonset pods to be ready")
|
||||
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
|
||||
By("Getting target node and checking labels")
|
||||
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), targetNodeName, metav1.GetOptions{})
|
||||
|
@ -417,16 +420,16 @@ core:
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating nfd-worker daemonset")
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
|
||||
podSpecOpts := []testpod.SpecOption{
|
||||
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
|
||||
}
|
||||
workerDS := testutils.NFDWorkerDaemonSet(podSpecOpts...)
|
||||
workerDS := testds.NFDWorker(podSpecOpts...)
|
||||
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for daemonset pods to be ready")
|
||||
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
|
||||
expected := map[string]string{
|
||||
"feature.node.kubernetes.io/e2e-flag-test-1": "true",
|
||||
|
|
|
@ -19,6 +19,7 @@ package e2e
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
|
@ -39,6 +40,8 @@ import (
|
|||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
|
||||
testds "sigs.k8s.io/node-feature-discovery/test/e2e/utils/daemonset"
|
||||
testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
||||
|
@ -71,8 +74,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
|
||||
Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
|
||||
|
||||
imageOpt := testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
|
||||
f.PodClient().CreateSync(testutils.NFDMasterPod(imageOpt))
|
||||
imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
|
||||
f.PodClient().CreateSync(testpod.NFDMaster(imageOpt))
|
||||
|
||||
// Create nfd-master service
|
||||
masterService, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
|
||||
|
@ -86,7 +89,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for daemonset pods to be ready")
|
||||
Expect(testutils.WaitForPodsReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||
|
||||
label := labels.SelectorFromSet(map[string]string{"name": topologyUpdaterDaemonSet.Spec.Template.Labels["name"]})
|
||||
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label.String()})
|
||||
|
@ -119,8 +122,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
kcfg := cfg.GetKubeletConfig()
|
||||
By(fmt.Sprintf("Using config (%#v)", kcfg))
|
||||
|
||||
podSpecOpts := []testutils.PodSpecOption{testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
|
||||
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
|
||||
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
|
||||
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
|
||||
})
|
||||
|
||||
It("should fill the node resource topologies CR with the data", func() {
|
||||
|
@ -133,12 +136,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
By("getting the initial topology information")
|
||||
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
||||
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)")
|
||||
sleeperPod := testutils.BestEffortSleeperPod()
|
||||
sleeperPod := testpod.BestEffortSleeper()
|
||||
|
||||
podMap := make(map[string]*corev1.Pod)
|
||||
pod := f.PodClient().CreateSync(sleeperPod)
|
||||
podMap[pod.Name] = pod
|
||||
defer testutils.DeletePodsAsync(f, podMap)
|
||||
defer testpod.DeleteAsync(f, podMap)
|
||||
|
||||
cooldown := 30 * time.Second
|
||||
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
|
||||
|
@ -173,12 +176,17 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
By("getting the initial topology information")
|
||||
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
||||
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
|
||||
sleeperPod := testutils.GuaranteedSleeperPod("500m")
|
||||
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
|
||||
corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("500m"),
|
||||
// any random reasonable amount is fine
|
||||
corev1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}))
|
||||
|
||||
podMap := make(map[string]*corev1.Pod)
|
||||
pod := f.PodClient().CreateSync(sleeperPod)
|
||||
podMap[pod.Name] = pod
|
||||
defer testutils.DeletePodsAsync(f, podMap)
|
||||
defer testpod.DeleteAsync(f, podMap)
|
||||
|
||||
cooldown := 30 * time.Second
|
||||
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
|
||||
|
@ -219,7 +227,12 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
By("getting the initial topology information")
|
||||
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
||||
By("creating a pod consuming exclusive CPUs")
|
||||
sleeperPod := testutils.GuaranteedSleeperPod("1000m")
|
||||
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
|
||||
corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("1000m"),
|
||||
// any random reasonable amount is fine
|
||||
corev1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
}))
|
||||
// in case there is more than a single node in the cluster
|
||||
// we need to set the node name, so we'll have certainty about
|
||||
// which node we need to examine
|
||||
|
@ -228,7 +241,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
podMap := make(map[string]*corev1.Pod)
|
||||
pod := f.PodClient().CreateSync(sleeperPod)
|
||||
podMap[pod.Name] = pod
|
||||
defer testutils.DeletePodsAsync(f, podMap)
|
||||
defer testpod.DeleteAsync(f, podMap)
|
||||
|
||||
By("checking the changes in the updated topology")
|
||||
var finalNodeTopo *v1alpha1.NodeResourceTopology
|
||||
|
@ -274,11 +287,11 @@ excludeList:
|
|||
kcfg := cfg.GetKubeletConfig()
|
||||
By(fmt.Sprintf("Using config (%#v)", kcfg))
|
||||
|
||||
podSpecOpts := []testutils.PodSpecOption{
|
||||
testutils.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testutils.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
|
||||
podSpecOpts := []testpod.SpecOption{
|
||||
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
||||
testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
|
||||
}
|
||||
topologyUpdaterDaemonSet = testutils.NFDTopologyUpdaterDaemonSet(kcfg, podSpecOpts...)
|
||||
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
|
||||
})
|
||||
|
||||
It("noderesourcetopology should not advertise the memory resource", func() {
|
||||
|
|
58
test/e2e/utils/daemonset/daemonset.go
Normal file
58
test/e2e/utils/daemonset/daemonset.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemonset
|
||||
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
"sigs.k8s.io/node-feature-discovery/test/e2e/utils"
|
||||
"sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
|
||||
)
|
||||
|
||||
// NFDWorker provides the NFD daemon set worker definition
|
||||
func NFDWorker(opts ...pod.SpecOption) *appsv1.DaemonSet {
|
||||
return new("nfd-worker", &pod.NFDWorker(opts...).Spec)
|
||||
}
|
||||
|
||||
// NFDTopologyUpdater provides the NFD daemon set topology updater
|
||||
func NFDTopologyUpdater(kc utils.KubeletConfig, opts ...pod.SpecOption) *appsv1.DaemonSet {
|
||||
return new("nfd-topology-updater", pod.NFDTopologyUpdaterSpec(kc, opts...))
|
||||
}
|
||||
|
||||
// new provide the new daemon set
|
||||
func new(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name + "-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: *podSpec,
|
||||
},
|
||||
MinReadySeconds: 5,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2018-2022 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -24,9 +24,7 @@ import (
|
|||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
@ -35,6 +33,8 @@ import (
|
|||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"sigs.k8s.io/node-feature-discovery/test/e2e/utils"
|
||||
)
|
||||
|
||||
var pullIfNotPresent = flag.Bool("nfd.pull-if-not-present", false, "Pull Images if not present - not always")
|
||||
|
@ -43,9 +43,9 @@ const (
|
|||
PauseImage = "registry.k8s.io/pause"
|
||||
)
|
||||
|
||||
// GuarenteedSleeperPod makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
|
||||
func GuaranteedSleeperPod(cpuLimit string) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
// GuaranteedSleeper makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
|
||||
func GuaranteedSleeper(opts ...func(pod *corev1.Pod)) *corev1.Pod {
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sleeper-gu-pod",
|
||||
},
|
||||
|
@ -53,24 +53,27 @@ func GuaranteedSleeperPod(cpuLimit string) *corev1.Pod {
|
|||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "sleeper-gu-cnt",
|
||||
Image: PauseImage,
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Limits: corev1.ResourceList{
|
||||
// we use 1 core because that's the minimal meaningful quantity
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(cpuLimit),
|
||||
// any random reasonable amount is fine
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
Name: "sleeper-gu-cnt",
|
||||
Image: PauseImage,
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// BestEffortSleeperPod makes a Best Effort QoS class Pod object which sleeps long enough
|
||||
func BestEffortSleeperPod() *corev1.Pod {
|
||||
func WithLimits(list corev1.ResourceList) func(p *corev1.Pod) {
|
||||
return func(p *corev1.Pod) {
|
||||
p.Spec.Containers[0].Resources.Limits = list
|
||||
}
|
||||
}
|
||||
|
||||
// BestEffortSleeper makes a Best Effort QoS class Pod object which sleeps long enough
|
||||
func BestEffortSleeper() *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sleeper-be-pod",
|
||||
|
@ -87,8 +90,8 @@ func BestEffortSleeperPod() *corev1.Pod {
|
|||
}
|
||||
}
|
||||
|
||||
// DeletePodsAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends.
|
||||
func DeletePodsAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
|
||||
// DeleteAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends.
|
||||
func DeleteAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
|
||||
var wg sync.WaitGroup
|
||||
for _, pod := range podMap {
|
||||
wg.Add(1)
|
||||
|
@ -96,14 +99,14 @@ func DeletePodsAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
|
|||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
DeletePodSyncByName(f, podName)
|
||||
DeleteSyncByName(f, podName)
|
||||
}(pod.Namespace, pod.Name)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// DeletePodSyncByName deletes the pod identified by `podName` in the current namespace
|
||||
func DeletePodSyncByName(f *framework.Framework, podName string) {
|
||||
// DeleteSyncByName deletes the pod identified by `podName` in the current namespace
|
||||
func DeleteSyncByName(f *framework.Framework, podName string) {
|
||||
gp := int64(0)
|
||||
delOpts := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
|
@ -111,10 +114,10 @@ func DeletePodSyncByName(f *framework.Framework, podName string) {
|
|||
f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
type PodSpecOption func(spec *corev1.PodSpec)
|
||||
type SpecOption func(spec *corev1.PodSpec)
|
||||
|
||||
// NFDMasterPod provide NFD master pod definition
|
||||
func NFDMasterPod(opts ...PodSpecOption) *corev1.Pod {
|
||||
// NFDMaster provide NFD master pod definition
|
||||
func NFDMaster(opts ...SpecOption) *corev1.Pod {
|
||||
yes := true
|
||||
no := false
|
||||
p := &corev1.Pod{
|
||||
|
@ -163,13 +166,13 @@ func NFDMasterPod(opts ...PodSpecOption) *corev1.Pod {
|
|||
return p
|
||||
}
|
||||
|
||||
// NFDWorkerPod provides NFD worker pod definition
|
||||
func NFDWorkerPod(opts ...PodSpecOption) *corev1.Pod {
|
||||
// NFDWorker provides NFD worker pod definition
|
||||
func NFDWorker(opts ...SpecOption) *corev1.Pod {
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nfd-worker-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: *nfdWorkerPodSpec(opts...),
|
||||
Spec: *nfdWorkerSpec(opts...),
|
||||
}
|
||||
|
||||
p.Spec.RestartPolicy = corev1.RestartPolicyNever
|
||||
|
@ -177,18 +180,8 @@ func NFDWorkerPod(opts ...PodSpecOption) *corev1.Pod {
|
|||
return p
|
||||
}
|
||||
|
||||
// NFDWorkerDaemonSet provides the NFD daemon set worker definition
|
||||
func NFDWorkerDaemonSet(opts ...PodSpecOption) *appsv1.DaemonSet {
|
||||
return newDaemonSet("nfd-worker", nfdWorkerPodSpec(opts...))
|
||||
}
|
||||
|
||||
// NFDTopologyUpdaterDaemonSet provides the NFD daemon set topology updater
|
||||
func NFDTopologyUpdaterDaemonSet(kc KubeletConfig, opts ...PodSpecOption) *appsv1.DaemonSet {
|
||||
return newDaemonSet("nfd-topology-updater", nfdTopologyUpdaterPodSpec(kc, opts...))
|
||||
}
|
||||
|
||||
// SpecWithContainerImage returns a PodSpecOption that sets the image used by the first container.
|
||||
func SpecWithContainerImage(image string) PodSpecOption {
|
||||
// SpecWithContainerImage returns a SpecOption that sets the image used by the first container.
|
||||
func SpecWithContainerImage(image string) SpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
// NOTE: we might want to make the container number a parameter
|
||||
cnt := &spec.Containers[0]
|
||||
|
@ -196,8 +189,8 @@ func SpecWithContainerImage(image string) PodSpecOption {
|
|||
}
|
||||
}
|
||||
|
||||
// SpecWithContainerExtraArgs returns a PodSpecOption that adds extra args to the first container.
|
||||
func SpecWithContainerExtraArgs(args ...string) PodSpecOption {
|
||||
// SpecWithContainerExtraArgs returns a SpecOption that adds extra args to the first container.
|
||||
func SpecWithContainerExtraArgs(args ...string) SpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
// NOTE: we might want to make the container number a parameter
|
||||
cnt := &spec.Containers[0]
|
||||
|
@ -205,9 +198,9 @@ func SpecWithContainerExtraArgs(args ...string) PodSpecOption {
|
|||
}
|
||||
}
|
||||
|
||||
// SpecWithMasterNodeSelector returns a PodSpecOption that modifies the pod to
|
||||
// SpecWithMasterNodeSelector returns a SpecOption that modifies the pod to
|
||||
// be run on a control plane node of the cluster.
|
||||
func SpecWithMasterNodeSelector(args ...string) PodSpecOption {
|
||||
func SpecWithMasterNodeSelector(args ...string) SpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
spec.NodeSelector["node-role.kubernetes.io/control-plane"] = ""
|
||||
spec.Tolerations = append(spec.Tolerations,
|
||||
|
@ -220,8 +213,8 @@ func SpecWithMasterNodeSelector(args ...string) PodSpecOption {
|
|||
}
|
||||
}
|
||||
|
||||
// SpecWithConfigMap returns a PodSpecOption that mounts a configmap to the first container.
|
||||
func SpecWithConfigMap(name, mountPath string) PodSpecOption {
|
||||
// SpecWithConfigMap returns a SpecOption that mounts a configmap to the first container.
|
||||
func SpecWithConfigMap(name, mountPath string) SpecOption {
|
||||
return func(spec *corev1.PodSpec) {
|
||||
spec.Volumes = append(spec.Volumes,
|
||||
corev1.Volume{
|
||||
|
@ -244,28 +237,7 @@ func SpecWithConfigMap(name, mountPath string) PodSpecOption {
|
|||
}
|
||||
}
|
||||
|
||||
// newDaemonSet provide the new daemon set
|
||||
func newDaemonSet(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name + "-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: *podSpec,
|
||||
},
|
||||
MinReadySeconds: 5,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func nfdWorkerPodSpec(opts ...PodSpecOption) *corev1.PodSpec {
|
||||
func nfdWorkerSpec(opts ...SpecOption) *corev1.PodSpec {
|
||||
yes := true
|
||||
no := false
|
||||
p := &corev1.PodSpec{
|
||||
|
@ -380,7 +352,7 @@ func nfdWorkerPodSpec(opts ...PodSpecOption) *corev1.PodSpec {
|
|||
return p
|
||||
}
|
||||
|
||||
func nfdTopologyUpdaterPodSpec(kc KubeletConfig, opts ...PodSpecOption) *corev1.PodSpec {
|
||||
func NFDTopologyUpdaterSpec(kc utils.KubeletConfig, opts ...SpecOption) *corev1.PodSpec {
|
||||
p := &corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
|
@ -472,10 +444,10 @@ func newHostPathType(typ corev1.HostPathType) *corev1.HostPathType {
|
|||
return hostPathType
|
||||
}
|
||||
|
||||
// WaitForPodsReady waits for the pods to become ready.
|
||||
// WaitForReady waits for the pods to become ready.
|
||||
// NOTE: copied from k8s v1.22 after which is was removed from there.
|
||||
// Convenient for checking that all pods of a daemonset are ready.
|
||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
func WaitForReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
const poll = 2 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
Loading…
Add table
Reference in a new issue