1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2024-12-14 11:57:51 +00:00

Merge pull request #1318 from marquiz/devel/release-0.13-k8s-1.28

[release-0.13] Bump kubernetes to v1.28.1
This commit is contained in:
Kubernetes Prow Robot 2023-08-30 09:44:47 -07:00 committed by GitHub
commit 9be9259263
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 787 additions and 661 deletions

246
go.mod
View file

@ -5,115 +5,117 @@ go 1.20
require (
github.com/fsnotify/fsnotify v1.6.0
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/golang/protobuf v1.5.3
github.com/google/go-cmp v0.5.9
github.com/jaypipes/ghw v0.8.1-0.20210827132705-c7224150a17e
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.0
github.com/k8stopologyawareschedwg/podfingerprint v0.1.2
github.com/klauspost/cpuid/v2 v2.2.4
github.com/onsi/ginkgo/v2 v2.4.0
github.com/onsi/gomega v1.23.0
github.com/opencontainers/runc v1.1.6
github.com/onsi/ginkgo/v2 v2.9.4
github.com/onsi/gomega v1.27.6
github.com/opencontainers/runc v1.1.7
github.com/smartystreets/assertions v1.2.0
github.com/smartystreets/goconvey v1.6.4
github.com/stretchr/testify v1.8.0
github.com/stretchr/testify v1.8.2
github.com/vektra/errors v0.0.0-20140903201135-c64d83aba85a
golang.org/x/exp v0.0.0-20230307190834-24139beb5833
golang.org/x/net v0.8.0
google.golang.org/grpc v1.49.0
google.golang.org/protobuf v1.28.1
k8s.io/api v0.26.7
k8s.io/apiextensions-apiserver v0.26.7
k8s.io/apimachinery v0.26.7
k8s.io/client-go v0.26.7
k8s.io/klog/v2 v2.80.1
k8s.io/kubectl v0.26.7
k8s.io/kubelet v0.26.7
k8s.io/kubernetes v1.26.7
k8s.io/pod-security-admission v0.0.0
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
golang.org/x/net v0.13.0
google.golang.org/grpc v1.54.0
google.golang.org/protobuf v1.30.0
k8s.io/api v0.28.1
k8s.io/apiextensions-apiserver v0.28.1
k8s.io/apimachinery v0.28.1
k8s.io/client-go v0.28.1
k8s.io/klog/v2 v2.100.1
k8s.io/kubectl v0.28.1
k8s.io/kubelet v0.28.1
k8s.io/kubernetes v1.28.1
k8s.io/pod-security-admission v0.28.1
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2
sigs.k8s.io/yaml v1.3.0
)
require (
cloud.google.com/go v0.97.0 // indirect
github.com/Azure/azure-sdk-for-go v55.0.0+incompatible // indirect
cloud.google.com/go/compute v1.19.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.1.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b // indirect
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/Microsoft/hcsshim v0.8.22 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.8.25 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e // indirect
github.com/aws/aws-sdk-go v1.44.116 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect
github.com/cilium/ebpf v0.7.0 // indirect
github.com/container-storage-interface/spec v1.7.0 // indirect
github.com/containerd/cgroups v1.0.1 // indirect
github.com/cilium/ebpf v0.9.1 // indirect
github.com/container-storage-interface/spec v1.8.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/ttrpc v1.1.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/containerd/ttrpc v1.2.2 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.0.6 // indirect
github.com/gofrs/uuid v4.0.0+incompatible // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/google/cadvisor v0.46.1 // indirect
github.com/google/cel-go v0.12.6 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/cadvisor v0.47.3 // indirect
github.com/google/cel-go v0.16.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
github.com/googleapis/gax-go/v2 v2.7.1 // indirect
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jaypipes/pcidb v0.6.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/karrick/godirwalk v1.17.0 // indirect
github.com/libopenstorage/openstorage v1.0.0 // indirect
github.com/lithammer/dedent v1.1.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/ipvs v1.0.1 // indirect
github.com/moby/ipvs v1.1.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@ -123,32 +125,32 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect
github.com/opencontainers/selinux v1.10.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/cobra v1.6.0 // indirect
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/stretchr/objx v0.4.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/vishvananda/netlink v1.1.0 // indirect
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/vmware/govmomi v0.30.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.5 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
go.etcd.io/etcd/client/v3 v3.5.5 // indirect
go.opencensus.io v0.23.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
@ -157,41 +159,45 @@ require (
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
go.opentelemetry.io/otel/trace v1.10.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/api v0.60.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/api v0.114.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
gopkg.in/gcfg.v1 v1.2.0 // indirect
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
gopkg.in/gcfg.v1 v1.2.3 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/warnings.v0 v0.1.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
k8s.io/apiserver v0.26.7 // indirect
k8s.io/cloud-provider v0.26.7 // indirect
k8s.io/component-base v0.26.7 // indirect
k8s.io/component-helpers v0.26.7 // indirect
k8s.io/cri-api v0.0.0 // indirect
k8s.io/csi-translation-lib v0.26.7 // indirect
k8s.io/dynamic-resource-allocation v0.26.7 // indirect
k8s.io/kms v0.26.7 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
k8s.io/kube-proxy v0.0.0 // indirect
k8s.io/apiserver v0.28.1 // indirect
k8s.io/cloud-provider v0.28.1 // indirect
k8s.io/component-base v0.28.1 // indirect
k8s.io/component-helpers v0.28.1 // indirect
k8s.io/controller-manager v0.28.1 // indirect
k8s.io/cri-api v0.28.1 // indirect
k8s.io/csi-translation-lib v0.28.1 // indirect
k8s.io/dynamic-resource-allocation v0.28.1 // indirect
k8s.io/kms v0.28.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/kube-scheduler v0.0.0 // indirect
k8s.io/legacy-cloud-providers v0.0.0 // indirect
k8s.io/mount-utils v0.25.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
@ -199,29 +205,29 @@ require (
// need to override with commits (corresponding their kubernetes-* tags)
replace (
github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2
k8s.io/api => k8s.io/api v0.26.7
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.7
k8s.io/apimachinery => k8s.io/apimachinery v0.26.7
k8s.io/apiserver => k8s.io/apiserver v0.26.7
k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.7
k8s.io/client-go => k8s.io/client-go v0.26.7
k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.7
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.7
k8s.io/code-generator => k8s.io/code-generator v0.26.7
k8s.io/component-base => k8s.io/component-base v0.26.7
k8s.io/component-helpers => k8s.io/component-helpers v0.26.7
k8s.io/controller-manager => k8s.io/controller-manager v0.26.7
k8s.io/cri-api => k8s.io/cri-api v0.26.7
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.7
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.7
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.7
k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.7
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.7
k8s.io/kubectl => k8s.io/kubectl v0.26.7
k8s.io/kubelet => k8s.io/kubelet v0.26.7
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.7
k8s.io/metrics => k8s.io/metrics v0.26.7
k8s.io/mount-utils => k8s.io/mount-utils v0.26.7
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.7
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.7
k8s.io/api => k8s.io/api v0.28.1
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.1
k8s.io/apimachinery => k8s.io/apimachinery v0.28.1
k8s.io/apiserver => k8s.io/apiserver v0.28.1
k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.1
k8s.io/client-go => k8s.io/client-go v0.28.1
k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.1
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.1
k8s.io/code-generator => k8s.io/code-generator v0.28.1
k8s.io/component-base => k8s.io/component-base v0.28.1
k8s.io/component-helpers => k8s.io/component-helpers v0.28.1
k8s.io/controller-manager => k8s.io/controller-manager v0.28.1
k8s.io/cri-api => k8s.io/cri-api v0.28.1
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.1
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.1
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.1
k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.1
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.1
k8s.io/kubectl => k8s.io/kubectl v0.28.1
k8s.io/kubelet => k8s.io/kubelet v0.28.1
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.1
k8s.io/metrics => k8s.io/metrics v0.28.1
k8s.io/mount-utils => k8s.io/mount-utils v0.28.1
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.1
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.1
)

511
go.sum

File diff suppressed because it is too large Load diff

View file

@ -16,6 +16,8 @@ limitations under the License.
package podres
//go:generate mockery --srcpkg=k8s.io/kubelet/pkg/apis/podresources/v1 --name PodResourcesListerClient
import (
"fmt"
"log"

View file

@ -1,78 +0,0 @@
// Code generated by mockery v2.4.0-beta. DO NOT EDIT.
package podres
import (
context "context"
grpc "google.golang.org/grpc"
mock "github.com/stretchr/testify/mock"
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
)
// MockPodResourcesListerClient is an autogenerated mock type for the PodResourcesListerClient type
type MockPodResourcesListerClient struct {
mock.Mock
}
// GetAllocatableResources provides a mock function with given fields: ctx, in, opts
func (_m *MockPodResourcesListerClient) GetAllocatableResources(ctx context.Context, in *v1.AllocatableResourcesRequest, opts ...grpc.CallOption) (*v1.AllocatableResourcesResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *v1.AllocatableResourcesResponse
if rf, ok := ret.Get(0).(func(context.Context, *v1.AllocatableResourcesRequest, ...grpc.CallOption) *v1.AllocatableResourcesResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.AllocatableResourcesResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1.AllocatableResourcesRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// List provides a mock function with given fields: ctx, in, opts
func (_m *MockPodResourcesListerClient) List(ctx context.Context, in *v1.ListPodResourcesRequest, opts ...grpc.CallOption) (*v1.ListPodResourcesResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *v1.ListPodResourcesResponse
if rf, ok := ret.Get(0).(func(context.Context, *v1.ListPodResourcesRequest, ...grpc.CallOption) *v1.ListPodResourcesResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.ListPodResourcesResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *v1.ListPodResourcesRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}

View file

@ -0,0 +1,132 @@
// Code generated by mockery v2.20.0. DO NOT EDIT.
package mocks
import (
context "context"
grpc "google.golang.org/grpc"
mock "github.com/stretchr/testify/mock"
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
)
// PodResourcesListerClient is an autogenerated mock type for the PodResourcesListerClient type
type PodResourcesListerClient struct {
mock.Mock
}
// Get provides a mock function with given fields: ctx, in, opts
func (_m *PodResourcesListerClient) Get(ctx context.Context, in *v1.GetPodResourcesRequest, opts ...grpc.CallOption) (*v1.GetPodResourcesResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *v1.GetPodResourcesResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *v1.GetPodResourcesRequest, ...grpc.CallOption) (*v1.GetPodResourcesResponse, error)); ok {
return rf(ctx, in, opts...)
}
if rf, ok := ret.Get(0).(func(context.Context, *v1.GetPodResourcesRequest, ...grpc.CallOption) *v1.GetPodResourcesResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.GetPodResourcesResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *v1.GetPodResourcesRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetAllocatableResources provides a mock function with given fields: ctx, in, opts
func (_m *PodResourcesListerClient) GetAllocatableResources(ctx context.Context, in *v1.AllocatableResourcesRequest, opts ...grpc.CallOption) (*v1.AllocatableResourcesResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *v1.AllocatableResourcesResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *v1.AllocatableResourcesRequest, ...grpc.CallOption) (*v1.AllocatableResourcesResponse, error)); ok {
return rf(ctx, in, opts...)
}
if rf, ok := ret.Get(0).(func(context.Context, *v1.AllocatableResourcesRequest, ...grpc.CallOption) *v1.AllocatableResourcesResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.AllocatableResourcesResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *v1.AllocatableResourcesRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// List provides a mock function with given fields: ctx, in, opts
func (_m *PodResourcesListerClient) List(ctx context.Context, in *v1.ListPodResourcesRequest, opts ...grpc.CallOption) (*v1.ListPodResourcesResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *v1.ListPodResourcesResponse
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *v1.ListPodResourcesRequest, ...grpc.CallOption) (*v1.ListPodResourcesResponse, error)); ok {
return rf(ctx, in, opts...)
}
if rf, ok := ret.Get(0).(func(context.Context, *v1.ListPodResourcesRequest, ...grpc.CallOption) *v1.ListPodResourcesResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.ListPodResourcesResponse)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *v1.ListPodResourcesRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewPodResourcesListerClient interface {
mock.TestingT
Cleanup(func())
}
// NewPodResourcesListerClient creates a new instance of PodResourcesListerClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewPodResourcesListerClient(t mockConstructorTestingTNewPodResourcesListerClient) *PodResourcesListerClient {
mock := &PodResourcesListerClient{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View file

@ -33,7 +33,7 @@ import (
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
"sigs.k8s.io/node-feature-discovery/pkg/apihelper"
"sigs.k8s.io/node-feature-discovery/pkg/podres"
mockv1 "sigs.k8s.io/node-feature-discovery/pkg/podres/mocks"
)
func TestPodScanner(t *testing.T) {
@ -54,7 +54,7 @@ func TestPodScanner(t *testing.T) {
}
Convey("When I scan for pod resources using fake client and no namespace", t, func() {
mockPodResClient := new(podres.MockPodResourcesListerClient)
mockPodResClient := new(mockv1.PodResourcesListerClient)
mockAPIHelper := new(apihelper.MockAPIHelpers)
mockClient := &k8sclient.Clientset{}
computePodFingerprint := true
@ -676,7 +676,7 @@ func TestPodScanner(t *testing.T) {
})
Convey("When I scan for pod resources using fake client and given namespace", t, func() {
mockPodResClient := new(podres.MockPodResourcesListerClient)
mockPodResClient := new(mockv1.PodResourcesListerClient)
mockAPIHelper := new(apihelper.MockAPIHelpers)
mockClient := &k8sclient.Clientset{}
computePodFingerprint := false

View file

@ -51,10 +51,10 @@ import (
// cleanupNode deletes all NFD-related metadata from the Node object, i.e.
// labels and annotations
func cleanupNode(cs clientset.Interface) {
func cleanupNode(ctx context.Context, cs clientset.Interface) {
// Per-node cleanup function
cleanup := func(nodeName string) error {
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
update := false
@ -114,14 +114,14 @@ func cleanupNode(cs clientset.Interface) {
if updateStatus {
By("Deleting NFD extended resources from node " + nodeName)
if _, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}); err != nil {
if _, err := cs.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{}); err != nil {
return err
}
}
if update {
By("Deleting NFD labels, annotations and taints from node " + node.Name)
if _, err := cs.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}); err != nil {
if _, err := cs.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}); err != nil {
return err
}
}
@ -129,7 +129,7 @@ func cleanupNode(cs clientset.Interface) {
}
// Cleanup all nodes
nodeList, err := cs.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodeList, err := cs.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, n := range nodeList.Items {
@ -144,23 +144,23 @@ func cleanupNode(cs clientset.Interface) {
}
}
func cleanupCRs(cli *nfdclient.Clientset, namespace string) {
func cleanupCRs(ctx context.Context, cli *nfdclient.Clientset, namespace string) {
// Drop NodeFeatureRule objects
nfrs, err := cli.NfdV1alpha1().NodeFeatureRules().List(context.TODO(), metav1.ListOptions{})
nfrs, err := cli.NfdV1alpha1().NodeFeatureRules().List(ctx, metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
By("Deleting NodeFeatureRule objects from the cluster")
for _, nfr := range nfrs.Items {
err = cli.NfdV1alpha1().NodeFeatureRules().Delete(context.TODO(), nfr.Name, metav1.DeleteOptions{})
err = cli.NfdV1alpha1().NodeFeatureRules().Delete(ctx, nfr.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
nfs, err := cli.NfdV1alpha1().NodeFeatures(namespace).List(context.TODO(), metav1.ListOptions{})
nfs, err := cli.NfdV1alpha1().NodeFeatures(namespace).List(ctx, metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
By("Deleting NodeFeature objects from namespace " + namespace)
for _, nf := range nfs.Items {
err = cli.NfdV1alpha1().NodeFeatures(namespace).Delete(context.TODO(), nf.Name, metav1.DeleteOptions{})
err = cli.NfdV1alpha1().NodeFeatures(namespace).Delete(ctx, nf.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
@ -185,8 +185,8 @@ var _ = SIGDescribe("NFD master and worker", func() {
extraMasterPodSpecOpts []testpod.SpecOption
)
checkNodeFeatureObject := func(name string) {
_, err := nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
checkNodeFeatureObject := func(ctx context.Context, name string) {
_, err := nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Get(ctx, name, metav1.GetOptions{})
if useNodeFeatureApi {
By(fmt.Sprintf("Check that NodeFeature object for the node %q was created", name))
Expect(err).NotTo(HaveOccurred())
@ -196,39 +196,39 @@ var _ = SIGDescribe("NFD master and worker", func() {
}
}
BeforeAll(func() {
BeforeAll(func(ctx context.Context) {
// Create clients for apiextensions and our CRD api
extClient = extclient.NewForConfigOrDie(f.ClientConfig())
nfdClient = nfdclient.NewForConfigOrDie(f.ClientConfig())
By("Creating NFD CRDs")
var err error
crds, err = testutils.CreateNfdCRDs(extClient)
crds, err = testutils.CreateNfdCRDs(ctx, extClient)
Expect(err).NotTo(HaveOccurred())
})
AfterAll(func() {
AfterAll(func(ctx context.Context) {
for _, crd := range crds {
err := extClient.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
err := extClient.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
JustBeforeEach(func() {
JustBeforeEach(func(ctx context.Context) {
// Drop the pod security admission label as nfd-worker needs host mounts
if _, ok := f.Namespace.Labels[admissionapi.EnforceLevelLabel]; ok {
e2elog.Logf("Deleting %s label from the test namespace", admissionapi.EnforceLevelLabel)
delete(f.Namespace.Labels, admissionapi.EnforceLevelLabel)
_, err := f.ClientSet.CoreV1().Namespaces().Update(context.TODO(), f.Namespace, metav1.UpdateOptions{})
_, err := f.ClientSet.CoreV1().Namespaces().Update(ctx, f.Namespace, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
}
err := testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)
err := testutils.ConfigureRBAC(ctx, f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
// Remove pre-existing stale annotations and labels etc and CRDs
cleanupCRs(nfdClient, f.Namespace.Name)
cleanupNode(f.ClientSet)
cleanupCRs(ctx, nfdClient, f.Namespace.Name)
cleanupNode(ctx, f.ClientSet)
// Launch nfd-master
By("Creating nfd master pod and nfd-master service")
@ -237,33 +237,33 @@ var _ = SIGDescribe("NFD master and worker", func() {
testpod.SpecWithContainerImage(dockerImage()),
)...)
masterPod := e2epod.NewPodClient(f).CreateSync(testpod.NFDMaster(podSpecOpts...))
masterPod := e2epod.NewPodClient(f).CreateSync(ctx, testpod.NFDMaster(podSpecOpts...))
// Create nfd-master service
nfdSvc, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
nfdSvc, err := testutils.CreateService(ctx, f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-master pod to be running")
Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(HaveOccurred())
Expect(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(HaveOccurred())
By("Verifying the node where nfd-master is running")
// Get updated masterPod object (we want to know where it was scheduled)
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), masterPod.Name, metav1.GetOptions{})
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, masterPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Node running nfd-master should have master version annotation
masterPodNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), masterPod.Spec.NodeName, metav1.GetOptions{})
masterPodNode, err := f.ClientSet.CoreV1().Nodes().Get(ctx, masterPod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(masterPodNode.Annotations).To(HaveKey(nfdv1alpha1.AnnotationNs + "/master.version"))
By("Waiting for the nfd-master service to be up")
Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
Expect(e2enetwork.WaitForService(ctx, f.ClientSet, f.Namespace.Name, nfdSvc.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
})
AfterEach(func() {
Expect(testutils.DeconfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
AfterEach(func(ctx context.Context) {
Expect(testutils.DeconfigureRBAC(ctx, f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
cleanupNode(f.ClientSet)
cleanupCRs(nfdClient, f.Namespace.Name)
cleanupNode(ctx, f.ClientSet)
cleanupCRs(ctx, nfdClient, f.Namespace.Name)
extraMasterPodSpecOpts = nil
})
@ -271,7 +271,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
// Simple test with only the fake source enabled
//
Context("and a single worker pod with fake source enabled", func() {
It("it should decorate the node with the fake feature labels", func() {
It("it should decorate the node with the fake feature labels", func(ctx context.Context) {
fakeFeatureLabels := map[string]string{
nfdv1alpha1.FeatureLabelNs + "/fake-fakefeature1": "true",
nfdv1alpha1.FeatureLabelNs + "/fake-fakefeature2": "true",
@ -286,16 +286,16 @@ var _ = SIGDescribe("NFD master and worker", func() {
testpod.SpecWithContainerExtraArgs("-oneshot", "-label-sources=fake"),
)
workerPod := testpod.NFDWorker(podSpecOpts...)
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, workerPod, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-worker pod to succeed")
Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.Name, f.Namespace.Name)).NotTo(HaveOccurred())
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), workerPod.Name, metav1.GetOptions{})
Expect(e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, workerPod.Name, f.Namespace.Name)).NotTo(HaveOccurred())
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, workerPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), workerPod.Spec.NodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, workerPod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for k, v := range fakeFeatureLabels {
Expect(node.Labels[k]).To(Equal(v))
@ -308,10 +308,10 @@ var _ = SIGDescribe("NFD master and worker", func() {
}
}
checkNodeFeatureObject(node.Name)
checkNodeFeatureObject(ctx, node.Name)
By("Deleting the node-feature-discovery worker pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), workerPod.Name, metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, workerPod.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
})
@ -320,7 +320,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
// More comprehensive test when --e2e-node-config is enabled
//
Context("and nfd-workers as a daemonset with default sources enabled", func() {
It("the node labels and annotations listed in the e2e config should be present", func() {
It("the node labels and annotations listed in the e2e config should be present", func(ctx context.Context) {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())
@ -337,14 +337,14 @@ var _ = SIGDescribe("NFD master and worker", func() {
testpod.SpecWithContainerImage(dockerImage()),
)
workerDS := testds.NFDWorker(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for worker daemonset pods to be ready")
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(ctx, f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
By("Getting node objects")
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(len(nodeList.Items)).ToNot(BeZero())
@ -398,12 +398,12 @@ var _ = SIGDescribe("NFD master and worker", func() {
}
// Check existence of NodeFeature object
checkNodeFeatureObject(node.Name)
checkNodeFeatureObject(ctx, node.Name)
}
By("Deleting nfd-worker daemonset")
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.Name, metav1.DeleteOptions{})
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(ctx, workerDS.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
})
@ -412,11 +412,11 @@ var _ = SIGDescribe("NFD master and worker", func() {
// Test custom nodename source configured in 2 additional ConfigMaps
//
Context("and nfd-workers as a daemonset with 2 additional configmaps for the custom source configured", func() {
It("the nodename matching features listed in the configmaps should be present", func() {
It("the nodename matching features listed in the configmaps should be present", func(ctx context.Context) {
By("Getting a worker node")
// We need a valid nodename for the configmap
nodes, err := getNonControlPlaneNodes(f.ClientSet)
nodes, err := getNonControlPlaneNodes(ctx, f.ClientSet)
Expect(err).NotTo(HaveOccurred())
targetNodeName := nodes[0].Name
@ -443,7 +443,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
- ` + targetNodeName
cm1 := testutils.NewConfigMap("custom-config-extra-1", "custom.conf", data1)
cm1, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm1, metav1.CreateOptions{})
cm1, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm1, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
data2 := `
@ -458,7 +458,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
- "thisNameShouldNeverMatch"`
cm2 := testutils.NewConfigMap("custom-config-extra-2", "custom.conf", data2)
cm2, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm2, metav1.CreateOptions{})
cm2, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm2, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating nfd-worker daemonset with configmap mounted")
@ -469,14 +469,14 @@ var _ = SIGDescribe("NFD master and worker", func() {
)
workerDS := testds.NFDWorker(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for worker daemonset pods to be ready")
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(ctx, f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
By("Getting target node and checking labels")
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), targetNodeName, metav1.GetOptions{})
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(ctx, targetNodeName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
labelFound := false
@ -503,7 +503,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
Expect(labelNegativeFound).To(BeFalse(), "label for not existing nodename found!")
By("Deleting nfd-worker daemonset")
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.Name, metav1.DeleteOptions{})
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(ctx, workerDS.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
})
@ -512,7 +512,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
// Test NodeFeature
//
Context("and NodeFeature objects deployed", func() {
BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
extraMasterPodSpecOpts = []testpod.SpecOption{
testpod.SpecWithContainerExtraArgs(
"-deny-label-ns=*.denied.ns,random.unwanted.ns,*.vendor.io",
@ -520,20 +520,20 @@ var _ = SIGDescribe("NFD master and worker", func() {
),
}
})
It("labels from the NodeFeature objects should be created", func() {
It("labels from the NodeFeature objects should be created", func(ctx context.Context) {
if !useNodeFeatureApi {
Skip("NodeFeature API not enabled")
}
// We pick one node targeted for our NodeFeature objects
nodes, err := getNonControlPlaneNodes(f.ClientSet)
nodes, err := getNonControlPlaneNodes(ctx, f.ClientSet)
Expect(err).NotTo(HaveOccurred())
targetNodeName := nodes[0].Name
Expect(targetNodeName).ToNot(BeEmpty(), "No suitable worker node found")
By("Creating NodeFeature object")
nodeFeatures, err := testutils.CreateOrUpdateNodeFeaturesFromFile(nfdClient, "nodefeature-1.yaml", f.Namespace.Name, targetNodeName)
nodeFeatures, err := testutils.CreateOrUpdateNodeFeaturesFromFile(ctx, nfdClient, "nodefeature-1.yaml", f.Namespace.Name, targetNodeName)
Expect(err).NotTo(HaveOccurred())
By("Verifying node labels from NodeFeature object #1")
@ -544,15 +544,15 @@ var _ = SIGDescribe("NFD master and worker", func() {
nfdv1alpha1.FeatureLabelNs + "/fake-fakefeature3": "overridden",
},
}
Expect(checkForNodeLabels(f.ClientSet,
Expect(checkForNodeLabels(ctx, f.ClientSet,
expectedLabels, nodes,
)).NotTo(HaveOccurred())
By("Deleting NodeFeature object")
err = nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Delete(context.TODO(), nodeFeatures[0], metav1.DeleteOptions{})
err = nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Delete(ctx, nodeFeatures[0], metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Verifying node labels from NodeFeature object were removed")
Expect(checkForNodeLabels(f.ClientSet,
Expect(checkForNodeLabels(ctx, f.ClientSet,
nil, nodes,
)).NotTo(HaveOccurred())
@ -562,11 +562,11 @@ var _ = SIGDescribe("NFD master and worker", func() {
testpod.SpecWithContainerExtraArgs("-label-sources=fake"),
)
workerDS := testds.NFDWorker(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for worker daemonset pods to be ready")
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(ctx, f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
By("Verifying node labels from nfd-worker")
expectedLabels = map[string]k8sLabels{
@ -576,12 +576,12 @@ var _ = SIGDescribe("NFD master and worker", func() {
nfdv1alpha1.FeatureLabelNs + "/fake-fakefeature3": "true",
},
}
Expect(checkForNodeLabels(f.ClientSet,
Expect(checkForNodeLabels(ctx, f.ClientSet,
expectedLabels, nodes,
)).NotTo(HaveOccurred())
By("Re-creating NodeFeature object")
_, err = testutils.CreateOrUpdateNodeFeaturesFromFile(nfdClient, "nodefeature-1.yaml", f.Namespace.Name, targetNodeName)
_, err = testutils.CreateOrUpdateNodeFeaturesFromFile(ctx, nfdClient, "nodefeature-1.yaml", f.Namespace.Name, targetNodeName)
Expect(err).NotTo(HaveOccurred())
By("Verifying node labels from NodeFeature object #1 are created")
@ -592,43 +592,43 @@ var _ = SIGDescribe("NFD master and worker", func() {
nfdv1alpha1.FeatureLabelNs + "/fake-fakefeature2": "true",
nfdv1alpha1.FeatureLabelNs + "/fake-fakefeature3": "overridden",
}
Expect(checkForNodeLabels(f.ClientSet,
Expect(checkForNodeLabels(ctx, f.ClientSet,
expectedLabels, nodes,
)).NotTo(HaveOccurred())
By("Creating extra namespace")
extraNs, err := f.CreateNamespace("node-feature-discvery-extra-ns", nil)
extraNs, err := f.CreateNamespace(ctx, "node-feature-discvery-extra-ns", nil)
Expect(err).NotTo(HaveOccurred())
By("Create NodeFeature object in the extra namespace")
nodeFeatures, err = testutils.CreateOrUpdateNodeFeaturesFromFile(nfdClient, "nodefeature-2.yaml", extraNs.Name, targetNodeName)
nodeFeatures, err = testutils.CreateOrUpdateNodeFeaturesFromFile(ctx, nfdClient, "nodefeature-2.yaml", extraNs.Name, targetNodeName)
Expect(err).NotTo(HaveOccurred())
By("Verifying node labels from NodeFeature object #2 are created")
expectedLabels[targetNodeName][nfdv1alpha1.FeatureLabelNs+"/e2e-nodefeature-test-1"] = "overridden-from-obj-2"
expectedLabels[targetNodeName][nfdv1alpha1.FeatureLabelNs+"/e2e-nodefeature-test-3"] = "obj-2"
Expect(checkForNodeLabels(f.ClientSet, expectedLabels, nodes)).NotTo(HaveOccurred())
Expect(checkForNodeLabels(ctx, f.ClientSet, expectedLabels, nodes)).NotTo(HaveOccurred())
By("Deleting NodeFeature object from the extra namespace")
err = nfdClient.NfdV1alpha1().NodeFeatures(extraNs.Name).Delete(context.TODO(), nodeFeatures[0], metav1.DeleteOptions{})
err = nfdClient.NfdV1alpha1().NodeFeatures(extraNs.Name).Delete(ctx, nodeFeatures[0], metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Verifying node labels from NodeFeature object were removed")
expectedLabels[targetNodeName][nfdv1alpha1.FeatureLabelNs+"/e2e-nodefeature-test-1"] = "obj-1"
delete(expectedLabels[targetNodeName], nfdv1alpha1.FeatureLabelNs+"/e2e-nodefeature-test-3")
Expect(checkForNodeLabels(f.ClientSet, expectedLabels, nodes)).NotTo(HaveOccurred())
Expect(checkForNodeLabels(f.ClientSet,
Expect(checkForNodeLabels(ctx, f.ClientSet, expectedLabels, nodes)).NotTo(HaveOccurred())
Expect(checkForNodeLabels(ctx, f.ClientSet,
expectedLabels,
nodes,
)).NotTo(HaveOccurred())
})
It("denied labels should not be created by the NodeFeature object", func() {
It("denied labels should not be created by the NodeFeature object", func(ctx context.Context) {
if !useNodeFeatureApi {
Skip("NodeFeature API not enabled")
}
nodes, err := getNonControlPlaneNodes(f.ClientSet)
nodes, err := getNonControlPlaneNodes(ctx, f.ClientSet)
Expect(err).NotTo(HaveOccurred())
targetNodeName := nodes[0].Name
@ -636,7 +636,7 @@ var _ = SIGDescribe("NFD master and worker", func() {
// Apply Node Feature object
By("Create NodeFeature object")
nodeFeatures, err := testutils.CreateOrUpdateNodeFeaturesFromFile(nfdClient, "nodefeature-3.yaml", f.Namespace.Name, targetNodeName)
nodeFeatures, err := testutils.CreateOrUpdateNodeFeaturesFromFile(ctx, nfdClient, "nodefeature-3.yaml", f.Namespace.Name, targetNodeName)
Expect(err).NotTo(HaveOccurred())
// Verify that denied label was not added
@ -647,17 +647,17 @@ var _ = SIGDescribe("NFD master and worker", func() {
"custom.vendor.io/e2e-nodefeature-test-3": "vendor-ns",
},
}
Expect(checkForNodeLabels(
Expect(checkForNodeLabels(ctx,
f.ClientSet,
expectedLabels,
nodes,
)).NotTo(HaveOccurred())
By("Deleting NodeFeature object")
err = nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Delete(context.TODO(), nodeFeatures[0], metav1.DeleteOptions{})
err = nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Delete(ctx, nodeFeatures[0], metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(checkForNodeLabels(
Expect(checkForNodeLabels(ctx,
f.ClientSet,
nil,
nodes,
@ -690,14 +690,14 @@ var _ = SIGDescribe("NFD master and worker", func() {
Effect: "NoExecute",
},
}
BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
extraMasterPodSpecOpts = []testpod.SpecOption{
testpod.SpecWithContainerExtraArgs("-enable-taints"),
testpod.SpecWithTolerations(testTolerations),
}
})
It("custom labels from the NodeFeatureRule rules should be created", func() {
nodes, err := getNonControlPlaneNodes(f.ClientSet)
It("custom labels from the NodeFeatureRule rules should be created", func(ctx context.Context) {
nodes, err := getNonControlPlaneNodes(ctx, f.ClientSet)
Expect(err).NotTo(HaveOccurred())
targetNodeName := nodes[0].Name
@ -710,7 +710,7 @@ core:
featureSources: ["fake"]
labelSources: []
`)
cm, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{})
cm, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating nfd-worker daemonset")
podSpecOpts := createPodSpecOpts(
@ -719,11 +719,11 @@ core:
testpod.SpecWithTolerations(testTolerations),
)
workerDS := testds.NFDWorker(podSpecOpts...)
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, workerDS, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for worker daemonset pods to be ready")
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(ctx, f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 2)).NotTo(HaveOccurred())
expected := map[string]k8sLabels{
"*": {
@ -734,17 +734,17 @@ core:
}
By("Creating NodeFeatureRules #1")
Expect(testutils.CreateNodeFeatureRulesFromFile(nfdClient, "nodefeaturerule-1.yaml")).NotTo(HaveOccurred())
Expect(testutils.CreateNodeFeatureRulesFromFile(ctx, nfdClient, "nodefeaturerule-1.yaml")).NotTo(HaveOccurred())
By("Verifying node labels from NodeFeatureRules #1")
Expect(checkForNodeLabels(
Expect(checkForNodeLabels(ctx,
f.ClientSet,
expected,
nodes,
)).NotTo(HaveOccurred())
By("Creating NodeFeatureRules #2")
Expect(testutils.CreateNodeFeatureRulesFromFile(nfdClient, "nodefeaturerule-2.yaml")).NotTo(HaveOccurred())
Expect(testutils.CreateNodeFeatureRulesFromFile(ctx, nfdClient, "nodefeaturerule-2.yaml")).NotTo(HaveOccurred())
// Add features from NodeFeatureRule #2
expected["*"][nfdv1alpha1.FeatureLabelNs+"/e2e-matchany-test-1"] = "true"
@ -752,7 +752,7 @@ core:
expected["*"][nfdv1alpha1.FeatureLabelNs+"/e2e-template-test-1-instance_2"] = "found"
By("Verifying node labels from NodeFeatureRules #1 and #2")
Expect(checkForNodeLabels(
Expect(checkForNodeLabels(ctx,
f.ClientSet,
expected,
nodes,
@ -760,7 +760,7 @@ core:
// Add features from NodeFeatureRule #3
By("Creating NodeFeatureRules #3")
Expect(testutils.CreateNodeFeatureRulesFromFile(nfdClient, "nodefeaturerule-3.yaml")).NotTo(HaveOccurred())
Expect(testutils.CreateNodeFeatureRulesFromFile(ctx, nfdClient, "nodefeaturerule-3.yaml")).NotTo(HaveOccurred())
By("Verifying node taints and annotation from NodeFeatureRules #3")
expectedTaints := []corev1.Taint{
@ -782,11 +782,11 @@ core:
}
expectedAnnotation := map[string]string{
"nfd.node.kubernetes.io/taints": "feature.node.kubernetes.io/fake-special-node=exists:PreferNoSchedule,feature.node.kubernetes.io/fake-dedicated-node=true:NoExecute,feature.node.kubernetes.io/performance-optimized-node=true:NoExecute"}
Expect(waitForNfdNodeTaints(f.ClientSet, expectedTaints, nodes)).NotTo(HaveOccurred())
Expect(waitForNfdNodeAnnotations(f.ClientSet, expectedAnnotation)).NotTo(HaveOccurred())
Expect(waitForNfdNodeTaints(ctx, f.ClientSet, expectedTaints, nodes)).NotTo(HaveOccurred())
Expect(waitForNfdNodeAnnotations(ctx, f.ClientSet, expectedAnnotation)).NotTo(HaveOccurred())
By("Re-applying NodeFeatureRules #3 with updated taints")
Expect(testutils.UpdateNodeFeatureRulesFromFile(nfdClient, "nodefeaturerule-3-updated.yaml")).NotTo(HaveOccurred())
Expect(testutils.UpdateNodeFeatureRulesFromFile(ctx, nfdClient, "nodefeaturerule-3-updated.yaml")).NotTo(HaveOccurred())
expectedTaintsUpdated := []corev1.Taint{
{
Key: "feature.node.kubernetes.io/fake-special-node",
@ -803,11 +803,11 @@ core:
"nfd.node.kubernetes.io/taints": "feature.node.kubernetes.io/fake-special-node=exists:PreferNoSchedule,feature.node.kubernetes.io/foo=true:NoExecute"}
By("Verifying updated node taints and annotation from NodeFeatureRules #3")
Expect(waitForNfdNodeTaints(f.ClientSet, expectedTaintsUpdated, nodes)).NotTo(HaveOccurred())
Expect(waitForNfdNodeAnnotations(f.ClientSet, expectedAnnotationUpdated)).NotTo(HaveOccurred())
Expect(waitForNfdNodeTaints(ctx, f.ClientSet, expectedTaintsUpdated, nodes)).NotTo(HaveOccurred())
Expect(waitForNfdNodeAnnotations(ctx, f.ClientSet, expectedAnnotationUpdated)).NotTo(HaveOccurred())
By("Deleting NodeFeatureRule object")
err = nfdClient.NfdV1alpha1().NodeFeatureRules().Delete(context.TODO(), "e2e-test-3", metav1.DeleteOptions{})
err = nfdClient.NfdV1alpha1().NodeFeatureRules().Delete(ctx, "e2e-test-3", metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
expectedERAnnotation := map[string]string{
@ -820,45 +820,45 @@ core:
}
By("Creating NodeFeatureRules #4")
Expect(testutils.CreateNodeFeatureRulesFromFile(nfdClient, "nodefeaturerule-4.yaml")).NotTo(HaveOccurred())
Expect(testutils.CreateNodeFeatureRulesFromFile(ctx, nfdClient, "nodefeaturerule-4.yaml")).NotTo(HaveOccurred())
By("Verifying node annotations from NodeFeatureRules #4")
Expect(waitForNfdNodeAnnotations(f.ClientSet, expectedERAnnotation)).NotTo(HaveOccurred())
Expect(waitForNfdNodeAnnotations(ctx, f.ClientSet, expectedERAnnotation)).NotTo(HaveOccurred())
By("Verfiying node status capacity from NodeFeatureRules #4")
Expect(waitForCapacity(f.ClientSet, expectedCapacity, nodes)).NotTo(HaveOccurred())
Expect(waitForCapacity(ctx, f.ClientSet, expectedCapacity, nodes)).NotTo(HaveOccurred())
By("Deleting NodeFeatureRule object")
err = nfdClient.NfdV1alpha1().NodeFeatureRules().Delete(context.TODO(), "e2e-extened-resource-test", metav1.DeleteOptions{})
err = nfdClient.NfdV1alpha1().NodeFeatureRules().Delete(ctx, "e2e-extened-resource-test", metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Verfiying node status capacity from NodeFeatureRules #4")
Expect(waitForCapacity(f.ClientSet, nil, nodes)).NotTo(HaveOccurred())
Expect(waitForCapacity(ctx, f.ClientSet, nil, nodes)).NotTo(HaveOccurred())
By("Deleting nfd-worker daemonset")
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.Name, metav1.DeleteOptions{})
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(ctx, workerDS.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
})
})
Context("and check whether master config passed successfully or not", func() {
BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
extraMasterPodSpecOpts = []testpod.SpecOption{
testpod.SpecWithConfigMap("nfd-master-conf", "/etc/kubernetes/node-feature-discovery"),
}
cm := testutils.NewConfigMap("nfd-master-conf", "nfd-master.conf", `
denyLabelNs: ["*.denied.ns","random.unwanted.ns"]
`)
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{})
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
})
It("master configuration should take place", func() {
It("master configuration should take place", func(ctx context.Context) {
// deploy node feature object
if !useNodeFeatureApi {
Skip("NodeFeature API not enabled")
}
nodes, err := getNonControlPlaneNodes(f.ClientSet)
nodes, err := getNonControlPlaneNodes(ctx, f.ClientSet)
Expect(err).NotTo(HaveOccurred())
targetNodeName := nodes[0].Name
@ -866,7 +866,7 @@ denyLabelNs: ["*.denied.ns","random.unwanted.ns"]
// Apply Node Feature object
By("Create NodeFeature object")
nodeFeatures, err := testutils.CreateOrUpdateNodeFeaturesFromFile(nfdClient, "nodefeature-3.yaml", f.Namespace.Name, targetNodeName)
nodeFeatures, err := testutils.CreateOrUpdateNodeFeaturesFromFile(ctx, nfdClient, "nodefeature-3.yaml", f.Namespace.Name, targetNodeName)
Expect(err).NotTo(HaveOccurred())
// Verify that denied label was not added
@ -877,20 +877,20 @@ denyLabelNs: ["*.denied.ns","random.unwanted.ns"]
"custom.vendor.io/e2e-nodefeature-test-3": "vendor-ns",
},
}
Expect(checkForNodeLabels(
Expect(checkForNodeLabels(ctx,
f.ClientSet,
expectedLabels,
nodes,
)).NotTo(HaveOccurred())
By("Deleting NodeFeature object")
err = nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Delete(context.TODO(), nodeFeatures[0], metav1.DeleteOptions{})
err = nfdClient.NfdV1alpha1().NodeFeatures(f.Namespace.Name).Delete(ctx, nodeFeatures[0], metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
// TODO: Find a better way to handle the timeout that happens to reflect the configmap changes
Skip("Testing the master dynamic configuration")
// Verify that config changes were applied
By("Updating the master config")
Expect(testutils.UpdateConfigMap(f.ClientSet, "nfd-master-conf", f.Namespace.Name, "nfd-master.conf", `
Expect(testutils.UpdateConfigMap(ctx, f.ClientSet, "nfd-master-conf", f.Namespace.Name, "nfd-master.conf", `
denyLabelNs: []
`))
By("Verifying that denied labels were removed")
@ -902,7 +902,7 @@ denyLabelNs: []
"random.unwanted.ns/e2e-nodefeature-test-2": "unwanted-ns",
},
}
Expect(checkForNodeLabels(
Expect(checkForNodeLabels(ctx,
f.ClientSet,
expectedLabels,
nodes,
@ -936,9 +936,9 @@ func simplePoll(poll func() error, wait time.Duration) error {
}
// waitForCapacity waits for the capacity to be updated in the node status
func waitForCapacity(cli clientset.Interface, expectedNewERs corev1.ResourceList, oldNodes []corev1.Node) error {
func waitForCapacity(ctx context.Context, cli clientset.Interface, expectedNewERs corev1.ResourceList, oldNodes []corev1.Node) error {
poll := func() error {
nodes, err := getNonControlPlaneNodes(cli)
nodes, err := getNonControlPlaneNodes(ctx, cli)
if err != nil {
return err
}
@ -960,9 +960,9 @@ func waitForCapacity(cli clientset.Interface, expectedNewERs corev1.ResourceList
}
// waitForNfdNodeAnnotations waits for node to be annotated as expected.
func waitForNfdNodeAnnotations(cli clientset.Interface, expected map[string]string) error {
func waitForNfdNodeAnnotations(ctx context.Context, cli clientset.Interface, expected map[string]string) error {
poll := func() error {
nodes, err := getNonControlPlaneNodes(cli)
nodes, err := getNonControlPlaneNodes(ctx, cli)
if err != nil {
return err
}
@ -982,10 +982,10 @@ func waitForNfdNodeAnnotations(cli clientset.Interface, expected map[string]stri
type k8sLabels map[string]string
// checkForNfdNodeLabels waits and checks that node is labeled as expected.
func checkForNodeLabels(cli clientset.Interface, expectedNewLabels map[string]k8sLabels, oldNodes []corev1.Node) error {
func checkForNodeLabels(ctx context.Context, cli clientset.Interface, expectedNewLabels map[string]k8sLabels, oldNodes []corev1.Node) error {
poll := func() error {
nodes, err := getNonControlPlaneNodes(cli)
nodes, err := getNonControlPlaneNodes(ctx, cli)
if err != nil {
return err
}
@ -1013,9 +1013,9 @@ func checkForNodeLabels(cli clientset.Interface, expectedNewLabels map[string]k8
}
// waitForNfdNodeTaints waits for node to be tainted as expected.
func waitForNfdNodeTaints(cli clientset.Interface, expectedNewTaints []corev1.Taint, oldNodes []corev1.Node) error {
func waitForNfdNodeTaints(ctx context.Context, cli clientset.Interface, expectedNewTaints []corev1.Taint, oldNodes []corev1.Node) error {
poll := func() error {
nodes, err := getNonControlPlaneNodes(cli)
nodes, err := getNonControlPlaneNodes(ctx, cli)
if err != nil {
return err
}
@ -1035,8 +1035,8 @@ func waitForNfdNodeTaints(cli clientset.Interface, expectedNewTaints []corev1.Ta
}
// getNonControlPlaneNodes gets the nodes that are not tainted for exclusive control-plane usage
func getNonControlPlaneNodes(cli clientset.Interface) ([]corev1.Node, error) {
nodeList, err := cli.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
func getNonControlPlaneNodes(ctx context.Context, cli clientset.Interface) ([]corev1.Node, error) {
nodeList, err := cli.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}

View file

@ -21,7 +21,6 @@ import (
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@ -37,7 +36,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/kubelet"
e2ekubeletconfig "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
admissionapi "k8s.io/pod-security-admission/api"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -58,7 +57,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
f := framework.NewDefaultFramework("node-topology-updater")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
JustBeforeEach(func() {
JustBeforeEach(func(ctx context.Context) {
var err error
if extClient == nil {
@ -72,43 +71,43 @@ var _ = SIGDescribe("NFD topology updater", func() {
}
By("Creating the node resource topologies CRD")
Expect(testutils.CreateNodeResourceTopologies(extClient)).ToNot(BeNil())
Expect(testutils.CreateNodeResourceTopologies(ctx, extClient)).ToNot(BeNil())
By("Configuring RBAC")
Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
Expect(testutils.ConfigureRBAC(ctx, f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
By("Creating nfd-topology-updater daemonset")
topologyUpdaterDaemonSet, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), topologyUpdaterDaemonSet, metav1.CreateOptions{})
topologyUpdaterDaemonSet, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, topologyUpdaterDaemonSet, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for daemonset pods to be ready")
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
Expect(testpod.WaitForReady(ctx, f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
label := labels.SelectorFromSet(map[string]string{"name": topologyUpdaterDaemonSet.Spec.Template.Labels["name"]})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label.String()})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{LabelSelector: label.String()})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).ToNot(BeEmpty())
topologyUpdaterNode, err = f.ClientSet.CoreV1().Nodes().Get(context.TODO(), pods.Items[0].Spec.NodeName, metav1.GetOptions{})
topologyUpdaterNode, err = f.ClientSet.CoreV1().Nodes().Get(ctx, pods.Items[0].Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
kubeletConfig, err = kubelet.GetCurrentKubeletConfig(topologyUpdaterNode.Name, "", true)
kubeletConfig, err = e2ekubeletconfig.GetCurrentKubeletConfig(ctx, topologyUpdaterNode.Name, "", true, false)
Expect(err).NotTo(HaveOccurred())
workerNodes, err = testutils.GetWorkerNodes(f)
workerNodes, err = testutils.GetWorkerNodes(ctx, f)
Expect(err).NotTo(HaveOccurred())
})
ginkgo.AfterEach(func() {
AfterEach(func(ctx context.Context) {
framework.Logf("Node Feature Discovery topology updater CRD and RBAC removal")
err := testutils.DeconfigureRBAC(f.ClientSet, f.Namespace.Name)
err := testutils.DeconfigureRBAC(ctx, f.ClientSet, f.Namespace.Name)
if err != nil {
framework.Failf("AfterEach: Failed to delete RBAC resources: %v", err)
}
})
Context("with topology-updater daemonset running", func() {
ginkgo.BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())
@ -118,29 +117,29 @@ var _ = SIGDescribe("NFD topology updater", func() {
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})
It("should fill the node resource topologies CR with the data", func() {
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
It("should fill the node resource topologies CR with the data", func(ctx context.Context) {
nodeTopology := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
isValid := testutils.IsValidNodeTopology(nodeTopology, kubeletConfig)
Expect(isValid).To(BeTrue(), "received invalid topology: %v", nodeTopology)
})
It("it should not account for any cpus if a container doesn't request exclusive cpus (best effort QOS)", func() {
It("it should not account for any cpus if a container doesn't request exclusive cpus (best effort QOS)", func(ctx context.Context) {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)")
sleeperPod := testpod.BestEffortSleeper()
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
// the object, hance the resource version must NOT change, so we can only sleep
time.Sleep(cooldown)
By("checking the changes in the updated topology - expecting none")
finalNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
finalNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
initialAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(initialNodeTopo)
finalAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(finalNodeTopo)
@ -164,9 +163,9 @@ var _ = SIGDescribe("NFD topology updater", func() {
Expect(isGreaterEqual).To(BeTrue(), fmt.Sprintf("final allocatable resources not restored - cmp=%d initial=%v final=%v", cmp, initialAllocRes, finalAllocRes))
})
It("it should not account for any cpus if a container doesn't request exclusive cpus (guaranteed QOS, nonintegral cpu request)", func() {
It("it should not account for any cpus if a container doesn't request exclusive cpus (guaranteed QOS, nonintegral cpu request)", func(ctx context.Context) {
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
@ -176,16 +175,16 @@ var _ = SIGDescribe("NFD topology updater", func() {
}))
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
cooldown := 30 * time.Second
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
// the object, hence the resource version must NOT change, so we can only sleep
time.Sleep(cooldown)
By("checking the changes in the updated topology - expecting none")
finalNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
finalNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
initialAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(initialNodeTopo)
finalAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(finalNodeTopo)
@ -209,7 +208,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
Expect(isGreaterEqual).To(BeTrue(), fmt.Sprintf("final allocatable resources not restored - cmp=%d initial=%v final=%v", cmp, initialAllocRes, finalAllocRes))
})
It("it should account for containers requesting exclusive cpus", func() {
It("it should account for containers requesting exclusive cpus", func(ctx context.Context) {
nodes, err := testutils.FilterNodesWithEnoughCores(workerNodes, "1000m")
Expect(err).NotTo(HaveOccurred())
if len(nodes) < 1 {
@ -217,7 +216,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
}
By("getting the initial topology information")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
By("creating a pod consuming exclusive CPUs")
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
corev1.ResourceList{
@ -231,14 +230,14 @@ var _ = SIGDescribe("NFD topology updater", func() {
sleeperPod.Spec.NodeName = topologyUpdaterNode.Name
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha2.NodeResourceTopology
Eventually(func() bool {
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, topologyUpdaterNode.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false
@ -264,7 +263,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
})
When("sleep interval disabled", func() {
ginkgo.BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())
@ -273,7 +272,7 @@ var _ = SIGDescribe("NFD topology updater", func() {
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(dockerImage()), testpod.SpecWithContainerExtraArgs("-sleep-interval=0s")}
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})
It("should still create CRs using a reactive updates", func() {
It("should still create CRs using a reactive updates", func(ctx context.Context) {
nodes, err := testutils.FilterNodesWithEnoughCores(workerNodes, "1000m")
Expect(err).NotTo(HaveOccurred())
if len(nodes) < 1 {
@ -293,12 +292,12 @@ var _ = SIGDescribe("NFD topology updater", func() {
sleeperPod.Spec.NodeName = topologyUpdaterNode.Name
podMap := make(map[string]*corev1.Pod)
pod := e2epod.NewPodClient(f).CreateSync(sleeperPod)
pod := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod)
podMap[pod.Name] = pod
defer testpod.DeleteAsync(f, podMap)
defer testpod.DeleteAsync(ctx, f, podMap)
By("checking initial CR created")
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
initialNodeTopo := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
By("creating additional pod consuming exclusive CPUs")
sleeperPod2 := testpod.GuaranteedSleeper(testpod.WithLimits(
@ -313,13 +312,13 @@ var _ = SIGDescribe("NFD topology updater", func() {
// which node we need to examine
sleeperPod2.Spec.NodeName = topologyUpdaterNode.Name
sleeperPod2.Name = sleeperPod2.Name + "2"
pod2 := e2epod.NewPodClient(f).CreateSync(sleeperPod2)
pod2 := e2epod.NewPodClient(f).CreateSync(ctx, sleeperPod2)
podMap[pod.Name] = pod2
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha2.NodeResourceTopology
Eventually(func() bool {
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, topologyUpdaterNode.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false
@ -348,12 +347,12 @@ var _ = SIGDescribe("NFD topology updater", func() {
})
When("topology-updater configure to exclude memory", func() {
BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cm := testutils.NewConfigMap("nfd-topology-updater-conf", "nfd-topology-updater.conf", `
excludeList:
'*': [memory]
`)
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{})
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, cm, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
cfg, err := testutils.GetConfig()
@ -369,10 +368,10 @@ excludeList:
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})
It("noderesourcetopology should not advertise the memory resource", func() {
It("noderesourcetopology should not advertise the memory resource", func(ctx context.Context) {
Eventually(func() bool {
memoryFound := false
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
nodeTopology := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
for _, zone := range nodeTopology.Zones {
for _, res := range zone.Resources {
if res.Name == string(corev1.ResourceMemory) {
@ -387,7 +386,7 @@ excludeList:
})
})
When("topology-updater configure to compute pod fingerprint", func() {
BeforeEach(func() {
BeforeEach(func(ctx context.Context) {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())
@ -400,10 +399,10 @@ excludeList:
}
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
})
It("noderesourcetopology should advertise pod fingerprint in top-level attribute", func() {
It("noderesourcetopology should advertise pod fingerprint in top-level attribute", func(ctx context.Context) {
Eventually(func() bool {
// get node topology
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
nodeTopology := testutils.GetNodeTopology(ctx, topologyClient, topologyUpdaterNode.Name)
// look for attribute
podFingerprintAttribute, err := findAttribute(nodeTopology.Attributes, podfingerprint.Attribute)
@ -412,7 +411,7 @@ excludeList:
return false
}
// get pods in node
pods, err := f.ClientSet.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{FieldSelector: "spec.nodeName=" + topologyUpdaterNode.Name})
pods, err := f.ClientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{FieldSelector: "spec.nodeName=" + topologyUpdaterNode.Name})
if err != nil {
framework.Logf("podFingerprint error while recovering %q node pods: %v", topologyUpdaterNode.Name, err)
return false

View file

@ -36,13 +36,13 @@ func NewConfigMap(name, key, data string) *corev1.ConfigMap {
}
// UpdateConfigMap is a helper for updating a ConfigMap object.
func UpdateConfigMap(c clientset.Interface, name, namespace, key, data string) error {
cm, err := c.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
func UpdateConfigMap(ctx context.Context, c clientset.Interface, name, namespace, key, data string) error {
cm, err := c.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("configmap %s is not found", name)
}
cm.Data[key] = data
_, err = c.CoreV1().ConfigMaps(namespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
_, err = c.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("error while updating configmap with name %s", name)
}

View file

@ -40,7 +40,7 @@ import (
var packagePath string
// CreateNfdCRDs creates the NodeFeatureRule CRD in the API server.
func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDefinition, error) {
func CreateNfdCRDs(ctx context.Context, cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDefinition, error) {
crds, err := crdsFromFile(filepath.Join(packagePath, "..", "..", "..", "deployment", "base", "nfd-crds", "nfd-api-crds.yaml"))
if err != nil {
return nil, err
@ -49,13 +49,13 @@ func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDe
newCRDs := make([]*apiextensionsv1.CustomResourceDefinition, len(crds))
for i, crd := range crds {
// Delete existing CRD (if any) with this we also get rid of stale objects
err = cli.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
err = cli.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete %q CRD: %w", crd.Name, err)
} else if err == nil {
// Wait for CRD deletion to complete before trying to re-create it
err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
_, err = cli.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{})
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, false, func(ctx context.Context) (bool, error) {
_, err = cli.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{})
if err == nil {
return false, nil
} else if errors.IsNotFound(err) {
@ -68,7 +68,7 @@ func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDe
}
}
newCRDs[i], err = cli.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{})
newCRDs[i], err = cli.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{})
if err != nil {
return nil, err
}
@ -77,7 +77,7 @@ func CreateNfdCRDs(cli extclient.Interface) ([]*apiextensionsv1.CustomResourceDe
}
// CreateOrUpdateNodeFeaturesFromFile creates/updates a NodeFeature object from a given file located under test data directory.
func CreateOrUpdateNodeFeaturesFromFile(cli nfdclientset.Interface, filename, namespace, nodename string) ([]string, error) {
func CreateOrUpdateNodeFeaturesFromFile(ctx context.Context, cli nfdclientset.Interface, filename, namespace, nodename string) ([]string, error) {
objs, err := nodeFeaturesFromFile(filepath.Join(packagePath, "..", "data", filename))
if err != nil {
return nil, err
@ -91,13 +91,13 @@ func CreateOrUpdateNodeFeaturesFromFile(cli nfdclientset.Interface, filename, na
}
obj.Labels[nfdv1alpha1.NodeFeatureObjNodeNameLabel] = nodename
if oldObj, err := cli.NfdV1alpha1().NodeFeatures(namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}); errors.IsNotFound(err) {
if _, err := cli.NfdV1alpha1().NodeFeatures(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}); err != nil {
if oldObj, err := cli.NfdV1alpha1().NodeFeatures(namespace).Get(ctx, obj.Name, metav1.GetOptions{}); errors.IsNotFound(err) {
if _, err := cli.NfdV1alpha1().NodeFeatures(namespace).Create(ctx, obj, metav1.CreateOptions{}); err != nil {
return names, fmt.Errorf("failed to create NodeFeature %w", err)
}
} else if err == nil {
obj.SetResourceVersion(oldObj.GetResourceVersion())
if _, err = cli.NfdV1alpha1().NodeFeatures(namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}); err != nil {
if _, err = cli.NfdV1alpha1().NodeFeatures(namespace).Update(ctx, obj, metav1.UpdateOptions{}); err != nil {
return names, fmt.Errorf("failed to update NodeFeature object: %w", err)
}
} else {
@ -109,14 +109,14 @@ func CreateOrUpdateNodeFeaturesFromFile(cli nfdclientset.Interface, filename, na
}
// CreateNodeFeatureRuleFromFile creates a NodeFeatureRule object from a given file located under test data directory.
func CreateNodeFeatureRulesFromFile(cli nfdclientset.Interface, filename string) error {
func CreateNodeFeatureRulesFromFile(ctx context.Context, cli nfdclientset.Interface, filename string) error {
objs, err := nodeFeatureRulesFromFile(filepath.Join(packagePath, "..", "data", filename))
if err != nil {
return err
}
for _, obj := range objs {
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Create(context.TODO(), obj, metav1.CreateOptions{}); err != nil {
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Create(ctx, obj, metav1.CreateOptions{}); err != nil {
return err
}
}
@ -124,7 +124,7 @@ func CreateNodeFeatureRulesFromFile(cli nfdclientset.Interface, filename string)
}
// UpdateNodeFeatureRulesFromFile updates existing NodeFeatureRule object from a given file located under test data directory.
func UpdateNodeFeatureRulesFromFile(cli nfdclientset.Interface, filename string) error {
func UpdateNodeFeatureRulesFromFile(ctx context.Context, cli nfdclientset.Interface, filename string) error {
objs, err := nodeFeatureRulesFromFile(filepath.Join(packagePath, "..", "data", filename))
if err != nil {
return err
@ -132,12 +132,12 @@ func UpdateNodeFeatureRulesFromFile(cli nfdclientset.Interface, filename string)
for _, obj := range objs {
var nfr *nfdv1alpha1.NodeFeatureRule
if nfr, err = cli.NfdV1alpha1().NodeFeatureRules().Get(context.TODO(), obj.Name, metav1.GetOptions{}); err != nil {
if nfr, err = cli.NfdV1alpha1().NodeFeatureRules().Get(ctx, obj.Name, metav1.GetOptions{}); err != nil {
return fmt.Errorf("failed to get NodeFeatureRule %w", err)
}
obj.SetResourceVersion(nfr.GetResourceVersion())
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Update(context.TODO(), obj, metav1.UpdateOptions{}); err != nil {
if _, err = cli.NfdV1alpha1().NodeFeatureRules().Update(ctx, obj, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update NodeFeatureRule %w", err)
}
}

View file

@ -40,22 +40,22 @@ const (
)
// GetWorkerNodes returns all nodes labeled as worker
func GetWorkerNodes(f *framework.Framework) ([]corev1.Node, error) {
return GetNodesByRole(f, RoleWorker)
func GetWorkerNodes(ctx context.Context, f *framework.Framework) ([]corev1.Node, error) {
return GetNodesByRole(ctx, f, RoleWorker)
}
// GetByRole returns all nodes with the specified role
func GetNodesByRole(f *framework.Framework, role string) ([]corev1.Node, error) {
func GetNodesByRole(ctx context.Context, f *framework.Framework, role string) ([]corev1.Node, error) {
selector, err := labels.Parse(fmt.Sprintf("%s/%s=", LabelRole, role))
if err != nil {
return nil, err
}
return GetNodesBySelector(f, selector)
return GetNodesBySelector(ctx, f, selector)
}
// GetBySelector returns all nodes with the specified selector
func GetNodesBySelector(f *framework.Framework, selector labels.Selector) ([]corev1.Node, error) {
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
func GetNodesBySelector(ctx context.Context, f *framework.Framework, selector labels.Selector) ([]corev1.Node, error) {
nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, err
}

View file

@ -78,21 +78,21 @@ func NewNodeResourceTopologies() (*apiextensionsv1.CustomResourceDefinition, err
// CreateNodeResourceTopologies creates the NodeResourceTopology in the cluster if the CRD doesn't exists already.
// Returns the CRD golang object present in the cluster.
func CreateNodeResourceTopologies(extClient extclient.Interface) (*apiextensionsv1.CustomResourceDefinition, error) {
func CreateNodeResourceTopologies(ctx context.Context, extClient extclient.Interface) (*apiextensionsv1.CustomResourceDefinition, error) {
crd, err := NewNodeResourceTopologies()
if err != nil {
return nil, err
}
// Delete existing CRD (if any) with this we also get rid of stale objects
err = extClient.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, metav1.DeleteOptions{})
err = extClient.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, fmt.Errorf("failed to delete NodeResourceTopology CRD: %w", err)
}
// It takes time for the delete operation, wait until the CRD completely gone
if err = wait.PollImmediate(5*time.Second, 1*time.Minute, func() (bool, error) {
_, err = extClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{})
if err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
_, err = extClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{})
if err == nil {
return false, nil
}
@ -104,15 +104,15 @@ func CreateNodeResourceTopologies(extClient extclient.Interface) (*apiextensions
}); err != nil {
return nil, fmt.Errorf("failed to get NodeResourceTopology CRD: %w", err)
}
return extClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{})
return extClient.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{})
}
// GetNodeTopology returns the NodeResourceTopology data for the node identified by `nodeName`.
func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology {
func GetNodeTopology(ctx context.Context, topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology {
var nodeTopology *v1alpha2.NodeResourceTopology
var err error
gomega.EventuallyWithOffset(1, func() bool {
nodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), nodeName, metav1.GetOptions{})
nodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false

View file

@ -93,7 +93,7 @@ func BestEffortSleeper() *corev1.Pod {
}
// DeleteAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends.
func DeleteAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
func DeleteAsync(ctx context.Context, f *framework.Framework, podMap map[string]*corev1.Pod) {
var wg sync.WaitGroup
for _, pod := range podMap {
wg.Add(1)
@ -101,19 +101,19 @@ func DeleteAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
DeleteSyncByName(f, podName)
DeleteSyncByName(ctx, f, podName)
}(pod.Namespace, pod.Name)
}
wg.Wait()
}
// DeleteSyncByName deletes the pod identified by `podName` in the current namespace
func DeleteSyncByName(f *framework.Framework, podName string) {
func DeleteSyncByName(ctx context.Context, f *framework.Framework, podName string) {
gp := int64(0)
delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
e2epod.NewPodClient(f).DeleteSync(podName, delOpts, e2epod.DefaultPodDeletionTimeout)
e2epod.NewPodClient(f).DeleteSync(ctx, podName, delOpts, e2epod.DefaultPodDeletionTimeout)
}
type SpecOption func(spec *corev1.PodSpec)
@ -464,12 +464,12 @@ func newHostPathType(typ corev1.HostPathType) *corev1.HostPathType {
// WaitForReady waits for the pods to become ready.
// NOTE: copied from k8s v1.22 after which is was removed from there.
// Convenient for checking that all pods of a daemonset are ready.
func WaitForReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
func WaitForReady(ctx context.Context, c clientset.Interface, ns, name string, minReadySeconds int) error {
const poll = 2 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
return wait.PollUntilContextTimeout(ctx, poll, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
return false, nil
}

View file

@ -31,48 +31,48 @@ var (
)
// ConfigureRBAC creates required RBAC configuration
func ConfigureRBAC(cs clientset.Interface, ns string) error {
_, err := createServiceAccount(cs, "nfd-master-e2e", ns)
func ConfigureRBAC(ctx context.Context, cs clientset.Interface, ns string) error {
_, err := createServiceAccount(ctx, cs, "nfd-master-e2e", ns)
if err != nil {
return err
}
_, err = createServiceAccount(cs, "nfd-worker-e2e", ns)
_, err = createServiceAccount(ctx, cs, "nfd-worker-e2e", ns)
if err != nil {
return err
}
_, err = createServiceAccount(cs, "nfd-topology-updater-e2e", ns)
_, err = createServiceAccount(ctx, cs, "nfd-topology-updater-e2e", ns)
if err != nil {
return err
}
_, err = createClusterRoleMaster(cs)
_, err = createClusterRoleMaster(ctx, cs)
if err != nil {
return err
}
_, err = createRoleWorker(cs, ns)
_, err = createRoleWorker(ctx, cs, ns)
if err != nil {
return err
}
_, err = createClusterRoleTopologyUpdater(cs)
_, err = createClusterRoleTopologyUpdater(ctx, cs)
if err != nil {
return err
}
_, err = createClusterRoleBindingMaster(cs, ns)
_, err = createClusterRoleBindingMaster(ctx, cs, ns)
if err != nil {
return err
}
_, err = createRoleBindingWorker(cs, ns)
_, err = createRoleBindingWorker(ctx, cs, ns)
if err != nil {
return err
}
_, err = createClusterRoleBindingTopologyUpdater(cs, ns)
_, err = createClusterRoleBindingTopologyUpdater(ctx, cs, ns)
if err != nil {
return err
}
@ -81,40 +81,40 @@ func ConfigureRBAC(cs clientset.Interface, ns string) error {
}
// DeconfigureRBAC removes RBAC configuration
func DeconfigureRBAC(cs clientset.Interface, ns string) error {
err := cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nfd-topology-updater-e2e", metav1.DeleteOptions{})
func DeconfigureRBAC(ctx context.Context, cs clientset.Interface, ns string) error {
err := cs.RbacV1().ClusterRoleBindings().Delete(ctx, "nfd-topology-updater-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
err = cs.RbacV1().ClusterRoleBindings().Delete(ctx, "nfd-master-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.RbacV1().RoleBindings(ns).Delete(context.TODO(), "nfd-worker-e2e", metav1.DeleteOptions{})
err = cs.RbacV1().RoleBindings(ns).Delete(ctx, "nfd-worker-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.RbacV1().ClusterRoles().Delete(context.TODO(), "nfd-topology-updater-e2e", metav1.DeleteOptions{})
err = cs.RbacV1().ClusterRoles().Delete(ctx, "nfd-topology-updater-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.RbacV1().ClusterRoles().Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
err = cs.RbacV1().ClusterRoles().Delete(ctx, "nfd-master-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.RbacV1().Roles(ns).Delete(context.TODO(), "nfd-worker-e2e", metav1.DeleteOptions{})
err = cs.RbacV1().Roles(ns).Delete(ctx, "nfd-worker-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), "nfd-topology-updater-e2e", metav1.DeleteOptions{})
err = cs.CoreV1().ServiceAccounts(ns).Delete(ctx, "nfd-topology-updater-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), "nfd-master-e2e", metav1.DeleteOptions{})
err = cs.CoreV1().ServiceAccounts(ns).Delete(ctx, "nfd-master-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
err = cs.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), "nfd-worker-e2e", metav1.DeleteOptions{})
err = cs.CoreV1().ServiceAccounts(ns).Delete(ctx, "nfd-worker-e2e", metav1.DeleteOptions{})
if err != nil {
return err
}
@ -122,18 +122,18 @@ func DeconfigureRBAC(cs clientset.Interface, ns string) error {
}
// Configure service account
func createServiceAccount(cs clientset.Interface, name, ns string) (*corev1.ServiceAccount, error) {
func createServiceAccount(ctx context.Context, cs clientset.Interface, name, ns string) (*corev1.ServiceAccount, error) {
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
}
return cs.CoreV1().ServiceAccounts(ns).Create(context.TODO(), sa, metav1.CreateOptions{})
return cs.CoreV1().ServiceAccounts(ns).Create(ctx, sa, metav1.CreateOptions{})
}
// Configure cluster role required by NFD Master
func createClusterRoleMaster(cs clientset.Interface) (*rbacv1.ClusterRole, error) {
func createClusterRoleMaster(ctx context.Context, cs clientset.Interface) (*rbacv1.ClusterRole, error) {
cr := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-e2e",
@ -161,11 +161,11 @@ func createClusterRoleMaster(cs clientset.Interface) (*rbacv1.ClusterRole, error
Verbs: []string{"use"},
})
}
return cs.RbacV1().ClusterRoles().Update(context.TODO(), cr, metav1.UpdateOptions{})
return cs.RbacV1().ClusterRoles().Update(ctx, cr, metav1.UpdateOptions{})
}
// Configure role required by NFD Worker
func createRoleWorker(cs clientset.Interface, ns string) (*rbacv1.Role, error) {
func createRoleWorker(ctx context.Context, cs clientset.Interface, ns string) (*rbacv1.Role, error) {
cr := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-worker-e2e",
@ -179,11 +179,11 @@ func createRoleWorker(cs clientset.Interface, ns string) (*rbacv1.Role, error) {
},
},
}
return cs.RbacV1().Roles(ns).Update(context.TODO(), cr, metav1.UpdateOptions{})
return cs.RbacV1().Roles(ns).Update(ctx, cr, metav1.UpdateOptions{})
}
// Configure cluster role required by NFD Topology Updater
func createClusterRoleTopologyUpdater(cs clientset.Interface) (*rbacv1.ClusterRole, error) {
func createClusterRoleTopologyUpdater(ctx context.Context, cs clientset.Interface) (*rbacv1.ClusterRole, error) {
cr := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-topology-updater-e2e",
@ -218,11 +218,11 @@ func createClusterRoleTopologyUpdater(cs clientset.Interface) (*rbacv1.ClusterRo
Verbs: []string{"use"},
})
}
return cs.RbacV1().ClusterRoles().Update(context.TODO(), cr, metav1.UpdateOptions{})
return cs.RbacV1().ClusterRoles().Update(ctx, cr, metav1.UpdateOptions{})
}
// Configure cluster role binding required by NFD Master
func createClusterRoleBindingMaster(cs clientset.Interface, ns string) (*rbacv1.ClusterRoleBinding, error) {
func createClusterRoleBindingMaster(ctx context.Context, cs clientset.Interface, ns string) (*rbacv1.ClusterRoleBinding, error) {
crb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-e2e",
@ -241,11 +241,11 @@ func createClusterRoleBindingMaster(cs clientset.Interface, ns string) (*rbacv1.
},
}
return cs.RbacV1().ClusterRoleBindings().Update(context.TODO(), crb, metav1.UpdateOptions{})
return cs.RbacV1().ClusterRoleBindings().Update(ctx, crb, metav1.UpdateOptions{})
}
// Configure role binding required by NFD Master
func createRoleBindingWorker(cs clientset.Interface, ns string) (*rbacv1.RoleBinding, error) {
func createRoleBindingWorker(ctx context.Context, cs clientset.Interface, ns string) (*rbacv1.RoleBinding, error) {
crb := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-worker-e2e",
@ -265,11 +265,11 @@ func createRoleBindingWorker(cs clientset.Interface, ns string) (*rbacv1.RoleBin
},
}
return cs.RbacV1().RoleBindings(ns).Update(context.TODO(), crb, metav1.UpdateOptions{})
return cs.RbacV1().RoleBindings(ns).Update(ctx, crb, metav1.UpdateOptions{})
}
// Configure cluster role binding required by NFD Topology Updater
func createClusterRoleBindingTopologyUpdater(cs clientset.Interface, ns string) (*rbacv1.ClusterRoleBinding, error) {
func createClusterRoleBindingTopologyUpdater(ctx context.Context, cs clientset.Interface, ns string) (*rbacv1.ClusterRoleBinding, error) {
crb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-topology-updater-e2e",
@ -288,5 +288,5 @@ func createClusterRoleBindingTopologyUpdater(cs clientset.Interface, ns string)
},
}
return cs.RbacV1().ClusterRoleBindings().Update(context.TODO(), crb, metav1.UpdateOptions{})
return cs.RbacV1().ClusterRoleBindings().Update(ctx, crb, metav1.UpdateOptions{})
}

View file

@ -25,7 +25,7 @@ import (
)
// CreateService creates nfd-master Service
func CreateService(cs clientset.Interface, ns string) (*corev1.Service, error) {
func CreateService(ctx context.Context, cs clientset.Interface, ns string) (*corev1.Service, error) {
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "nfd-master-e2e",
@ -41,5 +41,5 @@ func CreateService(cs clientset.Interface, ns string) (*corev1.Service, error) {
Type: corev1.ServiceTypeClusterIP,
},
}
return cs.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{})
return cs.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{})
}