1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2024-12-14 11:57:51 +00:00

nfd-topology-updater: update NodeResourceTopology objects directly

Drop the gRPC communication to nfd-master and connect to the Kubernetes
API server directly when updating NodeResourceTopology objects.
Topology-updater already has connection to the API server for listing
Pods so this is not that dramatic change. It also simplifies the code
a lot as there is no need for the NFD gRPC client and no need for
managing TLS certs/keys.

This change aligns nfd-topology-updater with the future direction of
nfd-worker where the gRPC API is being dropped and replaced by a
CRD-based API.

This patch also update deployment files and documentation to reflect
this change.
This commit is contained in:
Markus Lehtonen 2022-12-02 11:47:56 +02:00
parent 9f68f6c93a
commit f13ed2d91c
23 changed files with 101 additions and 993 deletions

View file

@ -93,10 +93,7 @@ func main() {
klog.Infof("detected kubelet Topology Manager policy %q", tmPolicy)
// Get new TopologyUpdater instance
instance, err := topology.NewTopologyUpdater(*args, *resourcemonitorArgs, tmPolicy)
if err != nil {
klog.Exitf("failed to initialize TopologyUpdater instance: %v", err)
}
instance := topology.NewTopologyUpdater(*args, *resourcemonitorArgs, tmPolicy)
if err = instance.Run(); err != nil {
klog.Exit(err)
@ -129,12 +126,6 @@ func initFlags(flagset *flag.FlagSet) (*topology.Args, *resourcemonitor.Args) {
args := &topology.Args{}
resourcemonitorArgs := &resourcemonitor.Args{}
flagset.StringVar(&args.CaFile, "ca-file", "",
"Root certificate for verifying connections")
flagset.StringVar(&args.CertFile, "cert-file", "",
"Certificate used for authenticating connections")
flagset.StringVar(&args.KeyFile, "key-file", "",
"Private key matching -cert-file")
flagset.BoolVar(&args.Oneshot, "oneshot", false,
"Update once and exit")
flagset.BoolVar(&args.NoPublish, "no-publish", false,
@ -151,10 +142,6 @@ func initFlags(flagset *flag.FlagSet) (*topology.Args, *resourcemonitor.Args) {
"API auth token file path. It is used to request kubelet configz endpoint, only takes effect when kubelet-config-uri is https. Default to /var/run/secrets/kubernetes.io/serviceaccount/token.")
flagset.StringVar(&resourcemonitorArgs.PodResourceSocketPath, "podresources-socket", hostpath.VarDir.Path("lib/kubelet/pod-resources/kubelet.sock"),
"Pod Resource Socket path to use.")
flagset.StringVar(&args.Server, "server", "localhost:8080",
"NFD server address to connecto to.")
flagset.StringVar(&args.ServerNameOverride, "server-name-override", "",
"Hostname expected from server certificate, useful in testing")
flagset.StringVar(&args.ConfigFile, "config", "/etc/kubernetes/node-feature-discovery/nfd-topology-updater.conf",
"Config file to use.")

View file

@ -87,16 +87,10 @@ func TestArgsParse(t *testing.T) {
"-no-publish",
"-sleep-interval=30s",
"-kubelet-config-uri=file:///path/testconfig.yaml",
"-podresources-socket=/path/testkubelet.sock",
"-ca-file=ca",
"-cert-file=crt",
"-key-file=key")
"-podresources-socket=/path/testkubelet.sock")
Convey("-no-publish is set and args.sources is set to appropriate values", func() {
So(args.NoPublish, ShouldBeTrue)
So(args.CaFile, ShouldEqual, "ca")
So(args.CertFile, ShouldEqual, "crt")
So(args.KeyFile, ShouldEqual, "key")
So(finderArgs.SleepInterval, ShouldEqual, 30*time.Second)
So(finderArgs.KubeletConfigURI, ShouldEqual, "file:///path/testconfig.yaml")
So(finderArgs.PodResourceSocketPath, ShouldEqual, "/path/testkubelet.sock")

View file

@ -22,3 +22,11 @@ rules:
- pods
verbs:
- get
- apiGroups:
- topology.node.k8s.io
resources:
- noderesourcetopologies
verbs:
- create
- get
- update

View file

@ -12,14 +12,6 @@ rules:
- patch
- update
- list
- apiGroups:
- topology.node.k8s.io
resources:
- noderesourcetopologies
verbs:
- create
- get
- update
- apiGroups:
- nfd.k8s-sigs.io
resources:

View file

@ -21,5 +21,4 @@ spec:
imagePullPolicy: Always
command:
- "nfd-topology-updater"
args:
- "-server=nfd-master:8080"
args: []

View file

@ -32,16 +32,6 @@ rules:
- get
- list
- watch
{{- if .Values.topologyUpdater.enable }}
- apiGroups:
- topology.node.k8s.io
resources:
- noderesourcetopologies
verbs:
- create
- get
- update
{{- end }}
{{- end }}
---
@ -66,4 +56,12 @@ rules:
- pods
verbs:
- get
- apiGroups:
- topology.node.k8s.io
resources:
- noderesourcetopologies
verbs:
- create
- get
- update
{{- end }}

View file

@ -40,7 +40,6 @@ spec:
command:
- "nfd-topology-updater"
args:
- "--server={{ include "node-feature-discovery.fullname" . }}-master:{{ .Values.master.service.port }}"
{{- if .Values.topologyUpdater.updateInterval | empty | not }}
- "--sleep-interval={{ .Values.topologyUpdater.updateInterval }}"
{{- else }}

View file

@ -4,9 +4,7 @@ kind: Kustomization
namespace: node-feature-discovery
bases:
- ../../base/rbac
- ../../base/rbac-topologyupdater
- ../../base/master
- ../../base/noderesourcetopologies-crd
- ../../base/topologyupdater-daemonset

View file

@ -101,9 +101,9 @@ node(s) will run extra job instance(s) to satisfy the request.
### Master Worker Topologyupdater
NFD Master, NFD worker and NFD Topologyupdater can be configured to be deployed
as separate pods. The `master-worker-topologyupdater` overlay may be used to
achieve this:
NFD-Master, nfd-worker and nfd-topology-updater can be configured to be
deployed as separate pods. The `master-worker-topologyupdater` overlay may be
used to achieve this:
```bash
kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/master-worker-topologyupdater?ref={{ site.release }}
@ -112,7 +112,7 @@ kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deplo
### Topologyupdater
In order to deploy just NFD master and NFD Topologyupdater (without nfd-worker)
In order to deploy just nfd-topology-updater (without nfd-master and nfd-worker)
use the `topologyupdater` overlay:
```bash
@ -120,10 +120,9 @@ kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deplo
```
NFD Topologyupdater can be configured along with the `default` overlay
(which deploys NFD worker and NFD master) where all the software components
are deployed as separate pods. The `topologyupdater` overlay may be used
along with `default` overlay to achieve this:
NFD-Topology-Updater can be configured along with the `default` overlay
(which deploys nfd-worker and nfd-master) where all the software components
are deployed as separate pods;
```bash

View file

@ -42,9 +42,9 @@ instance of nfd-worker is supposed to be running on each node of the cluster,
NFD-Topology-Updater is a daemon responsible for examining allocated
resources on a worker node to account for resources available to be allocated
to new pod on a per-zone basis (where a zone can be a NUMA node). It then
communicates the information to nfd-master which does the
[NodeResourceTopology CR](#noderesourcetopology-cr) creation corresponding
to all the nodes in the cluster. One instance of nfd-topology-updater is
creates or updates a
[NodeResourceTopology](../usage/custom-resources#noderesourcetopology) custom
resource object specific to this node. One instance of nfd-topology-updater is
supposed to be running on each node of the cluster.
## Feature Discovery

View file

@ -78,8 +78,8 @@ feature-dependent-pod 1/1 Running 0 23s 10.36.0.4 node-2
### Deploy nfd-topology-updater
In order to deploy nfd-master and nfd-topology-updater daemons
use `topologyupdater` kustomize overlay.
In order to deploy nfd-topology-updater use the `topologyupdater` kustomize
overlay.
```bash
kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/topologyupdater?ref={{ site.release }}
@ -87,16 +87,13 @@ kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deplo
### Verify nfd-topology-updater
Wait until NFD topologyupdater (and NFD master) are running.
Wait until nfd-topology-updater is running.
```bash
$ kubectl -n node-feature-discovery get ds,deploy
$ kubectl -n node-feature-discovery get ds
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/nfd-topology-updater 2 2 2 2 2 <none> 5s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nfd-master 1/1 1 1 17s
```
Check that the NodeResourceTopology objects are created

View file

@ -46,84 +46,6 @@ Example:
nfd-topology-updater -config=/opt/nfd/nfd-topology-updater.conf
```
### -server
The `-server` flag specifies the address of the nfd-master endpoint where to
connect to.
Default: localhost:8080
Example:
```bash
nfd-topology-updater -server=nfd-master.nfd.svc.cluster.local:443
```
### -ca-file
The `-ca-file` is one of the three flags (together with `-cert-file` and
`-key-file`) controlling the mutual TLS authentication on the topology-updater side.
This flag specifies the TLS root certificate that is used for verifying the
authenticity of nfd-master.
Default: *empty*
Note: Must be specified together with `-cert-file` and `-key-file`
Example:
```bash
nfd-topology-updater -ca-file=/opt/nfd/ca.crt -cert-file=/opt/nfd/updater.crt -key-file=/opt/nfd/updater.key
```
### -cert-file
The `-cert-file` is one of the three flags (together with `-ca-file` and
`-key-file`) controlling mutual TLS authentication on the topology-updater
side. This flag specifies the TLS certificate presented for authenticating
outgoing requests.
Default: *empty*
Note: Must be specified together with `-ca-file` and `-key-file`
Example:
```bash
nfd-topology-updater -cert-file=/opt/nfd/updater.crt -key-file=/opt/nfd/updater.key -ca-file=/opt/nfd/ca.crt
```
### -key-file
The `-key-file` is one of the three flags (together with `-ca-file` and
`-cert-file`) controlling the mutual TLS authentication on topology-updater
side. This flag specifies the private key corresponding the given certificate file
(`-cert-file`) that is used for authenticating outgoing requests.
Default: *empty*
Note: Must be specified together with `-cert-file` and `-ca-file`
Example:
```bash
nfd-topology-updater -key-file=/opt/nfd/updater.key -cert-file=/opt/nfd/updater.crt -ca-file=/opt/nfd/ca.crt
```
### -server-name-override
The `-server-name-override` flag specifies the common name (CN) which to
expect from the nfd-master TLS certificate. This flag is mostly intended for
development and debugging purposes.
Default: *empty*
Example:
```bash
nfd-topology-updater -server-name-override=localhost
```
### -no-publish
The `-no-publish` flag disables all communication with the nfd-master, making

View file

@ -10,11 +10,11 @@ sort: 5
---
NFD-Topology-Updater is preferably run as a Kubernetes DaemonSet. This assures
re-examination (and CR updates) on regular intervals capturing changes in
the allocated resources and hence the allocatable resources on a per zone
basis. It makes sure that more CR instances are created as new nodes get
added to the cluster. Topology-Updater connects to the nfd-master service
to create CR instances corresponding to nodes.
re-examination on regular intervals, capturing changes in the allocated
resources and hence the allocatable resources on a per zone basis by updating
[NodeResourceTopology](custom-resources#noderesourcetopology) custom resources.
It makes sure that new NodeResourceTopology instances are created for each new
nodes that get added to the cluster.
When run as a daemonset, nodes are re-examined for the allocated resources
(to determine the information of the allocatable resources on a per zone basis

View file

@ -22,16 +22,16 @@ import (
"path/filepath"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/klog/v2"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"golang.org/x/net/context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/node-feature-discovery/pkg/apihelper"
nfdclient "sigs.k8s.io/node-feature-discovery/pkg/nfd-client"
"sigs.k8s.io/node-feature-discovery/pkg/podres"
"sigs.k8s.io/node-feature-discovery/pkg/resourcemonitor"
pb "sigs.k8s.io/node-feature-discovery/pkg/topologyupdater"
"sigs.k8s.io/node-feature-discovery/pkg/utils"
"sigs.k8s.io/node-feature-discovery/pkg/version"
"sigs.k8s.io/yaml"
@ -39,11 +39,12 @@ import (
// Args are the command line arguments
type Args struct {
nfdclient.Args
NoPublish bool
Oneshot bool
KubeConfigFile string
ConfigFile string
Klog map[string]*utils.KlogFlagVal
}
// NFDConfig contains the configuration settings of NFDTopologyUpdater.
@ -52,38 +53,32 @@ type NFDConfig struct {
}
type NfdTopologyUpdater interface {
nfdclient.NfdClient
Update(v1alpha1.ZoneList) error
Run() error
Stop()
}
type staticNodeInfo struct {
nodeName string
tmPolicy string
}
type nfdTopologyUpdater struct {
nfdclient.NfdBaseClient
nodeInfo *staticNodeInfo
args Args
apihelper apihelper.APIHelpers
resourcemonitorArgs resourcemonitor.Args
certWatch *utils.FsWatcher
client pb.NodeTopologyClient
stop chan struct{} // channel for signaling stop
configFilePath string
config *NFDConfig
}
// NewTopologyUpdater creates a new NfdTopologyUpdater instance.
func NewTopologyUpdater(args Args, resourcemonitorArgs resourcemonitor.Args, policy string) (NfdTopologyUpdater, error) {
base, err := nfdclient.NewNfdBaseClient(&args.Args)
if err != nil {
return nil, err
}
func NewTopologyUpdater(args Args, resourcemonitorArgs resourcemonitor.Args, policy string) NfdTopologyUpdater {
nfd := &nfdTopologyUpdater{
NfdBaseClient: base,
args: args,
resourcemonitorArgs: resourcemonitorArgs,
nodeInfo: &staticNodeInfo{
nodeName: os.Getenv("NODE_NAME"),
tmPolicy: policy,
},
stop: make(chan struct{}, 1),
@ -92,27 +87,26 @@ func NewTopologyUpdater(args Args, resourcemonitorArgs resourcemonitor.Args, pol
if args.ConfigFile != "" {
nfd.configFilePath = filepath.Clean(args.ConfigFile)
}
return nfd, nil
return nfd
}
// Run nfdTopologyUpdater client. Returns if a fatal error is encountered, or, after
// Run nfdTopologyUpdater. Returns if a fatal error is encountered, or, after
// one request if OneShot is set to 'true' in the updater args.
func (w *nfdTopologyUpdater) Run() error {
klog.Infof("Node Feature Discovery Topology Updater %s", version.Get())
klog.Infof("NodeName: '%s'", nfdclient.NodeName())
klog.Infof("NodeName: '%s'", w.nodeInfo.nodeName)
podResClient, err := podres.GetPodResClient(w.resourcemonitorArgs.PodResourceSocketPath)
if err != nil {
return fmt.Errorf("failed to get PodResource Client: %w", err)
}
var kubeApihelper apihelper.K8sHelpers
if !w.args.NoPublish {
kubeconfig, err := apihelper.GetKubeconfig(w.args.KubeConfigFile)
if err != nil {
return err
}
kubeApihelper = apihelper.K8sHelpers{Kubeconfig: kubeconfig}
w.apihelper = apihelper.K8sHelpers{Kubeconfig: kubeconfig}
}
if err := w.configure(); err != nil {
return fmt.Errorf("faild to configure Node Feature Discovery Topology Updater: %w", err)
@ -120,7 +114,7 @@ func (w *nfdTopologyUpdater) Run() error {
var resScan resourcemonitor.ResourcesScanner
resScan, err = resourcemonitor.NewPodResourcesScanner(w.resourcemonitorArgs.Namespace, podResClient, kubeApihelper)
resScan, err = resourcemonitor.NewPodResourcesScanner(w.resourcemonitorArgs.Namespace, podResClient, w.apihelper)
if err != nil {
return fmt.Errorf("failed to initialize ResourceMonitor instance: %w", err)
}
@ -131,7 +125,7 @@ func (w *nfdTopologyUpdater) Run() error {
// zonesChannel := make(chan v1alpha1.ZoneList)
var zones v1alpha1.ZoneList
excludeList := resourcemonitor.NewExcludeResourceList(w.config.ExcludeList, nfdclient.NodeName())
excludeList := resourcemonitor.NewExcludeResourceList(w.config.ExcludeList, w.nodeInfo.nodeName)
resAggr, err := resourcemonitor.NewResourcesAggregator(podResClient, excludeList)
if err != nil {
return fmt.Errorf("failed to obtain node resource information: %w", err)
@ -139,12 +133,6 @@ func (w *nfdTopologyUpdater) Run() error {
klog.V(2).Infof("resAggr is: %v\n", resAggr)
// Create watcher for TLS certificates
w.certWatch, err = utils.CreateFsWatcher(time.Second, w.args.CaFile, w.args.CertFile, w.args.KeyFile)
if err != nil {
return err
}
crTrigger := time.NewTicker(w.resourcemonitorArgs.SleepInterval)
for {
select {
@ -158,49 +146,24 @@ func (w *nfdTopologyUpdater) Run() error {
}
zones = resAggr.Aggregate(podResources)
utils.KlogDump(1, "After aggregating resources identified zones are", " ", zones)
if err = w.Update(zones); err != nil {
return err
if !w.args.NoPublish {
if err = w.updateNodeResourceTopology(zones); err != nil {
return err
}
}
if w.args.Oneshot {
return nil
}
case <-w.certWatch.Events:
klog.Infof("TLS certificate update, renewing connection to nfd-master")
w.Disconnect()
if err := w.Connect(); err != nil {
return err
}
case <-w.stop:
klog.Infof("shutting down nfd-topology-updater")
w.certWatch.Close()
return nil
}
}
}
func (w *nfdTopologyUpdater) Update(zones v1alpha1.ZoneList) error {
// Connect to NFD master
err := w.Connect()
if err != nil {
return fmt.Errorf("failed to connect: %w", err)
}
defer w.Disconnect()
if w.client == nil {
return nil
}
err = advertiseNodeTopology(w.client, zones, w.nodeInfo.tmPolicy, nfdclient.NodeName())
if err != nil {
return fmt.Errorf("failed to advertise node topology: %w", err)
}
return nil
}
// Stop NFD Topology Updater
func (w *nfdTopologyUpdater) Stop() {
select {
@ -209,59 +172,39 @@ func (w *nfdTopologyUpdater) Stop() {
}
}
// connect creates a client connection to the NFD master
func (w *nfdTopologyUpdater) Connect() error {
// Return a dummy connection in case of dry-run
if w.args.NoPublish {
return nil
}
if err := w.NfdBaseClient.Connect(); err != nil {
return err
}
w.client = pb.NewNodeTopologyClient(w.ClientConn())
return nil
}
// disconnect closes the connection to NFD master
func (w *nfdTopologyUpdater) Disconnect() {
w.NfdBaseClient.Disconnect()
w.client = nil
}
// advertiseNodeTopology advertises the topology CR to a Kubernetes node
// via the NFD server.
func advertiseNodeTopology(client pb.NodeTopologyClient, zoneInfo v1alpha1.ZoneList, tmPolicy string, nodeName string) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
zones := make([]*v1alpha1.Zone, len(zoneInfo))
// TODO: Avoid copying of data to allow returning the zone info
// directly in a compatible data type (i.e. []*v1alpha1.Zone).
for i, zone := range zoneInfo {
zones[i] = &v1alpha1.Zone{
Name: zone.Name,
Type: zone.Type,
Parent: zone.Parent,
Resources: zone.Resources,
Costs: zone.Costs,
}
}
topologyReq := &pb.NodeTopologyRequest{
Zones: zones,
NfdVersion: version.Get(),
NodeName: nodeName,
TopologyPolicies: []string{tmPolicy},
}
utils.KlogDump(1, "Sending NodeTopologyRequest to nfd-master:", " ", topologyReq)
_, err := client.UpdateNodeTopology(ctx, topologyReq)
func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha1.ZoneList) error {
cli, err := w.apihelper.GetTopologyClient()
if err != nil {
return err
}
nrt, err := cli.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), w.nodeInfo.nodeName, metav1.GetOptions{})
if errors.IsNotFound(err) {
nrtNew := v1alpha1.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: w.nodeInfo.nodeName,
},
Zones: zoneInfo,
TopologyPolicies: []string{w.nodeInfo.tmPolicy},
}
_, err := cli.TopologyV1alpha1().NodeResourceTopologies().Create(context.TODO(), &nrtNew, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create NodeResourceTopology: %w", err)
}
return nil
} else if err != nil {
return err
}
nrtMutated := nrt.DeepCopy()
nrtMutated.Zones = zoneInfo
nrtUpdated, err := cli.TopologyV1alpha1().NodeResourceTopologies().Update(context.TODO(), nrtMutated, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update NodeResourceTopology: %w", err)
}
utils.KlogDump(4, "CR instance updated resTopo:", " ", nrtUpdated)
return nil
}

View file

@ -1,169 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologyupdater_test
import (
"fmt"
"os"
"testing"
"time"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
. "github.com/smartystreets/goconvey/convey"
"k8s.io/apimachinery/pkg/api/resource"
nfdclient "sigs.k8s.io/node-feature-discovery/pkg/nfd-client"
u "sigs.k8s.io/node-feature-discovery/pkg/nfd-client/topology-updater"
nfdmaster "sigs.k8s.io/node-feature-discovery/pkg/nfd-master"
"sigs.k8s.io/node-feature-discovery/pkg/resourcemonitor"
"sigs.k8s.io/node-feature-discovery/test/data"
)
type testContext struct {
master nfdmaster.NfdMaster
errs chan error
}
func setupTest(args *nfdmaster.Args) testContext {
// Fixed port and no-publish, for convenience
args.NoPublish = true
args.Port = 8192
m, err := nfdmaster.NewNfdMaster(args)
if err != nil {
fmt.Printf("Test setup failed: %v\n", err)
os.Exit(1)
}
ctx := testContext{master: m, errs: make(chan error)}
// Run nfd-master instance, intended to be used as the server counterpart
go func() {
ctx.errs <- ctx.master.Run()
close(ctx.errs)
}()
ready := ctx.master.WaitForReady(5 * time.Second)
if !ready {
fmt.Println("Test setup failed: timeout while waiting for nfd-master")
os.Exit(1)
}
return ctx
}
func teardownTest(ctx testContext) {
ctx.master.Stop()
for e := range ctx.errs {
if e != nil {
fmt.Printf("Error in test context: %v\n", e)
os.Exit(1)
}
}
}
func TestNewTopologyUpdater(t *testing.T) {
Convey("When initializing new NfdTopologyUpdater instance", t, func() {
Convey("When one of -cert-file, -key-file or -ca-file is missing", func() {
tmPolicy := "fake-topology-manager-policy"
_, err := u.NewTopologyUpdater(u.Args{Args: nfdclient.Args{CertFile: "crt", KeyFile: "key"}}, resourcemonitor.Args{}, tmPolicy)
_, err2 := u.NewTopologyUpdater(u.Args{Args: nfdclient.Args{KeyFile: "key", CaFile: "ca"}}, resourcemonitor.Args{}, tmPolicy)
_, err3 := u.NewTopologyUpdater(u.Args{Args: nfdclient.Args{CertFile: "crt", CaFile: "ca"}}, resourcemonitor.Args{}, tmPolicy)
Convey("An error should be returned", func() {
So(err, ShouldNotBeNil)
So(err2, ShouldNotBeNil)
So(err3, ShouldNotBeNil)
})
})
})
}
func TestUpdate(t *testing.T) {
ctx := setupTest(&nfdmaster.Args{})
resourceInfo := v1alpha1.ResourceInfoList{
v1alpha1.ResourceInfo{
Name: "cpu",
Available: resource.MustParse("2"),
Allocatable: resource.MustParse("4"),
Capacity: resource.MustParse("4"),
},
}
zones := v1alpha1.ZoneList{
v1alpha1.Zone{
Name: "node-0",
Type: "Node",
Resources: resourceInfo,
},
}
defer teardownTest(ctx)
Convey("When running nfd-topology-updater against nfd-master", t, func() {
Convey("When running as a Oneshot job with Zones", func() {
args := u.Args{
Oneshot: true,
Args: nfdclient.Args{
Server: "localhost:8192"},
}
updater, _ := u.NewTopologyUpdater(args, resourcemonitor.Args{}, "fake-topology-manager-policy")
err := updater.Update(zones)
Convey("No error should be returned", func() {
So(err, ShouldBeNil)
})
})
})
}
func TestRunTls(t *testing.T) {
masterArgs := &nfdmaster.Args{
CaFile: data.FilePath("ca.crt"),
CertFile: data.FilePath("nfd-test-master.crt"),
KeyFile: data.FilePath("nfd-test-master.key"),
VerifyNodeName: false,
}
ctx := setupTest(masterArgs)
defer teardownTest(ctx)
Convey("When running nfd-worker against nfd-master with mutual TLS auth enabled", t, func() {
Convey("When publishing CRs obtained from Zones", func() {
resourceInfo := v1alpha1.ResourceInfoList{
v1alpha1.ResourceInfo{
Name: "cpu",
Available: resource.MustParse("2"),
Allocatable: resource.MustParse("4"),
Capacity: resource.MustParse("4"),
},
}
zones := v1alpha1.ZoneList{
v1alpha1.Zone{
Name: "node-0",
Type: "Node",
Resources: resourceInfo,
},
}
updaterArgs := u.Args{
Args: nfdclient.Args{
CaFile: data.FilePath("ca.crt"),
CertFile: data.FilePath("nfd-test-topology-updater.crt"),
KeyFile: data.FilePath("nfd-test-topology-updater.key"),
Server: "localhost:8192",
ServerNameOverride: "nfd-test-master",
},
Oneshot: true,
}
updater, _ := u.NewTopologyUpdater(updaterArgs, resourcemonitor.Args{}, "fake-topology-manager-policy")
err := updater.Update(zones)
Convey("No error should be returned", func() {
So(err, ShouldBeNil)
})
})
})
}

View file

@ -29,7 +29,6 @@ import (
"strings"
"time"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@ -37,8 +36,6 @@ import (
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/peer"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
label "k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
@ -49,7 +46,6 @@ import (
"sigs.k8s.io/node-feature-discovery/pkg/apihelper"
nfdv1alpha1 "sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1"
pb "sigs.k8s.io/node-feature-discovery/pkg/labeler"
topologypb "sigs.k8s.io/node-feature-discovery/pkg/topologyupdater"
"sigs.k8s.io/node-feature-discovery/pkg/utils"
"sigs.k8s.io/node-feature-discovery/pkg/version"
)
@ -220,7 +216,6 @@ func (m *nfdMaster) runGrpcServer(errChan chan<- error) {
m.server = grpc.NewServer(serverOpts...)
pb.RegisterLabelerServer(m.server, m)
grpc_health_v1.RegisterHealthServer(m.server, health.NewServer())
topologypb.RegisterNodeTopologyServer(m.server, m)
klog.Infof("gRPC server serving on port: %d", m.args.Port)
// Run gRPC server
@ -574,27 +569,6 @@ func authorizeClient(c context.Context, checkNodeName bool, nodeName string) err
return nil
}
func (m *nfdMaster) UpdateNodeTopology(c context.Context, r *topologypb.NodeTopologyRequest) (*topologypb.NodeTopologyResponse, error) {
err := authorizeClient(c, m.args.VerifyNodeName, r.NodeName)
if err != nil {
return &topologypb.NodeTopologyResponse{}, err
}
if klog.V(1).Enabled() {
klog.Infof("REQUEST Node: %s NFD-version: %s Topology Policy: %s", r.NodeName, r.NfdVersion, r.TopologyPolicies)
utils.KlogDump(1, "Zones received:", " ", r.Zones)
} else {
klog.Infof("received CR updation request for node %q", r.NodeName)
}
if !m.args.NoPublish {
err := m.updateCR(r.NodeName, r.TopologyPolicies, r.Zones)
if err != nil {
klog.Errorf("failed to advertise NodeResourceTopology: %v", err)
return &topologypb.NodeTopologyResponse{}, err
}
}
return &topologypb.NodeTopologyResponse{}, nil
}
func (m *nfdMaster) processNodeFeatureRule(r *pb.SetLabelsRequest) (map[string]string, []corev1.Taint) {
if m.nfdController == nil {
return nil, nil
@ -806,60 +780,6 @@ func stringToNsNames(cslist, ns string) []string {
return names
}
func modifyCR(topoUpdaterZones []*v1alpha1.Zone) []v1alpha1.Zone {
zones := make([]v1alpha1.Zone, len(topoUpdaterZones))
// TODO: Avoid copying of data to allow returning the zone info
// directly in a compatible data type (i.e. []*v1alpha1.Zone).
for i, zone := range topoUpdaterZones {
zones[i] = v1alpha1.Zone{
Name: zone.Name,
Type: zone.Type,
Parent: zone.Parent,
Costs: zone.Costs,
Resources: zone.Resources,
}
}
return zones
}
func (m *nfdMaster) updateCR(hostname string, tmpolicy []string, topoUpdaterZones []*v1alpha1.Zone) error {
cli, err := m.apihelper.GetTopologyClient()
if err != nil {
return err
}
zones := modifyCR(topoUpdaterZones)
nrt, err := cli.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), hostname, metav1.GetOptions{})
if errors.IsNotFound(err) {
nrtNew := v1alpha1.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: hostname,
},
Zones: zones,
TopologyPolicies: tmpolicy,
}
_, err := cli.TopologyV1alpha1().NodeResourceTopologies().Create(context.TODO(), &nrtNew, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create v1alpha1.NodeResourceTopology!:%w", err)
}
return nil
} else if err != nil {
return err
}
nrtMutated := nrt.DeepCopy()
nrtMutated.Zones = zones
nrtUpdated, err := cli.TopologyV1alpha1().NodeResourceTopologies().Update(context.TODO(), nrtMutated, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update v1alpha1.NodeResourceTopology!:%w", err)
}
utils.KlogDump(2, "CR instance updated resTopo:", " ", nrtUpdated)
return nil
}
func (m *nfdMaster) instanceAnnotation(name string) string {
if m.args.Instance == "" {
return name

View file

@ -1,20 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologyupdater
//go:generate protoc --go_opt=paths=source_relative --go_out=plugins=grpc:. -I . -I ../../vendor/ topology-updater.proto
//go:generate mockery --name=NodeTopologyClient --inpackage

View file

@ -1,61 +0,0 @@
// Code generated by mockery v2.13.0. DO NOT EDIT.
package topologyupdater
import (
context "context"
grpc "google.golang.org/grpc"
mock "github.com/stretchr/testify/mock"
)
// MockNodeTopologyClient is an autogenerated mock type for the NodeTopologyClient type
type MockNodeTopologyClient struct {
mock.Mock
}
// UpdateNodeTopology provides a mock function with given fields: ctx, in, opts
func (_m *MockNodeTopologyClient) UpdateNodeTopology(ctx context.Context, in *NodeTopologyRequest, opts ...grpc.CallOption) (*NodeTopologyResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *NodeTopologyResponse
if rf, ok := ret.Get(0).(func(context.Context, *NodeTopologyRequest, ...grpc.CallOption) *NodeTopologyResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*NodeTopologyResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *NodeTopologyRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type NewMockNodeTopologyClientT interface {
mock.TestingT
Cleanup(func())
}
// NewMockNodeTopologyClient creates a new instance of MockNodeTopologyClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockNodeTopologyClient(t NewMockNodeTopologyClientT) *MockNodeTopologyClient {
mock := &MockNodeTopologyClient{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View file

@ -1,353 +0,0 @@
//
//Copyright 2021 The Kubernetes Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.23.0
// protoc v3.17.3
// source: topology-updater.proto
package topologyupdater
import (
context "context"
proto "github.com/golang/protobuf/proto"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type NodeTopologyRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NfdVersion string `protobuf:"bytes,1,opt,name=nfd_version,json=nfdVersion,proto3" json:"nfd_version,omitempty"`
NodeName string `protobuf:"bytes,2,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
TopologyPolicies []string `protobuf:"bytes,3,rep,name=topology_policies,json=topologyPolicies,proto3" json:"topology_policies,omitempty"`
Zones []*v1alpha1.Zone `protobuf:"bytes,4,rep,name=zones,proto3" json:"zones,omitempty"`
}
func (x *NodeTopologyRequest) Reset() {
*x = NodeTopologyRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_topology_updater_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeTopologyRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeTopologyRequest) ProtoMessage() {}
func (x *NodeTopologyRequest) ProtoReflect() protoreflect.Message {
mi := &file_topology_updater_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeTopologyRequest.ProtoReflect.Descriptor instead.
func (*NodeTopologyRequest) Descriptor() ([]byte, []int) {
return file_topology_updater_proto_rawDescGZIP(), []int{0}
}
func (x *NodeTopologyRequest) GetNfdVersion() string {
if x != nil {
return x.NfdVersion
}
return ""
}
func (x *NodeTopologyRequest) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *NodeTopologyRequest) GetTopologyPolicies() []string {
if x != nil {
return x.TopologyPolicies
}
return nil
}
func (x *NodeTopologyRequest) GetZones() []*v1alpha1.Zone {
if x != nil {
return x.Zones
}
return nil
}
type NodeTopologyResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *NodeTopologyResponse) Reset() {
*x = NodeTopologyResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_topology_updater_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeTopologyResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeTopologyResponse) ProtoMessage() {}
func (x *NodeTopologyResponse) ProtoReflect() protoreflect.Message {
mi := &file_topology_updater_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeTopologyResponse.ProtoReflect.Descriptor instead.
func (*NodeTopologyResponse) Descriptor() ([]byte, []int) {
return file_topology_updater_proto_rawDescGZIP(), []int{1}
}
var File_topology_updater_proto protoreflect.FileDescriptor
var file_topology_updater_proto_rawDesc = []byte{
0x0a, 0x16, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x2d, 0x75, 0x70, 0x64, 0x61, 0x74,
0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f,
0x67, 0x79, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x1a, 0x66, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x38, 0x73, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67,
0x79, 0x61, 0x77, 0x61, 0x72, 0x65, 0x73, 0x63, 0x68, 0x65, 0x64, 0x77, 0x67, 0x2f, 0x6e, 0x6f,
0x64, 0x65, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f,
0x67, 0x79, 0x2d, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f,
0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0xa6, 0x01, 0x0a, 0x13, 0x4e, 0x6f, 0x64, 0x65, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f,
0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x66, 0x64,
0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x6e, 0x66, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f,
0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e,
0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x6f, 0x70, 0x6f, 0x6c,
0x6f, 0x67, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03,
0x28, 0x09, 0x52, 0x10, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x6f, 0x6c, 0x69,
0x63, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x05, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x04, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x5a,
0x6f, 0x6e, 0x65, 0x52, 0x05, 0x7a, 0x6f, 0x6e, 0x65, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x4e, 0x6f,
0x64, 0x65, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x32, 0x71, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f,
0x67, 0x79, 0x12, 0x61, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65,
0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x12, 0x24, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x6c,
0x6f, 0x67, 0x79, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x54,
0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25,
0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72,
0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x38, 0x5a, 0x36, 0x73, 0x69, 0x67, 0x73, 0x2e, 0x6b, 0x38,
0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2d, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
0x65, 0x2d, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x70, 0x6b, 0x67, 0x2f,
0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_topology_updater_proto_rawDescOnce sync.Once
file_topology_updater_proto_rawDescData = file_topology_updater_proto_rawDesc
)
func file_topology_updater_proto_rawDescGZIP() []byte {
file_topology_updater_proto_rawDescOnce.Do(func() {
file_topology_updater_proto_rawDescData = protoimpl.X.CompressGZIP(file_topology_updater_proto_rawDescData)
})
return file_topology_updater_proto_rawDescData
}
var file_topology_updater_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_topology_updater_proto_goTypes = []interface{}{
(*NodeTopologyRequest)(nil), // 0: topologyupdater.NodeTopologyRequest
(*NodeTopologyResponse)(nil), // 1: topologyupdater.NodeTopologyResponse
(*v1alpha1.Zone)(nil), // 2: v1alpha1.Zone
}
var file_topology_updater_proto_depIdxs = []int32{
2, // 0: topologyupdater.NodeTopologyRequest.zones:type_name -> v1alpha1.Zone
0, // 1: topologyupdater.NodeTopology.UpdateNodeTopology:input_type -> topologyupdater.NodeTopologyRequest
1, // 2: topologyupdater.NodeTopology.UpdateNodeTopology:output_type -> topologyupdater.NodeTopologyResponse
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_topology_updater_proto_init() }
func file_topology_updater_proto_init() {
if File_topology_updater_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_topology_updater_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeTopologyRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_topology_updater_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeTopologyResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_topology_updater_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_topology_updater_proto_goTypes,
DependencyIndexes: file_topology_updater_proto_depIdxs,
MessageInfos: file_topology_updater_proto_msgTypes,
}.Build()
File_topology_updater_proto = out.File
file_topology_updater_proto_rawDesc = nil
file_topology_updater_proto_goTypes = nil
file_topology_updater_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// NodeTopologyClient is the client API for NodeTopology service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type NodeTopologyClient interface {
UpdateNodeTopology(ctx context.Context, in *NodeTopologyRequest, opts ...grpc.CallOption) (*NodeTopologyResponse, error)
}
type nodeTopologyClient struct {
cc grpc.ClientConnInterface
}
func NewNodeTopologyClient(cc grpc.ClientConnInterface) NodeTopologyClient {
return &nodeTopologyClient{cc}
}
func (c *nodeTopologyClient) UpdateNodeTopology(ctx context.Context, in *NodeTopologyRequest, opts ...grpc.CallOption) (*NodeTopologyResponse, error) {
out := new(NodeTopologyResponse)
err := c.cc.Invoke(ctx, "/topologyupdater.NodeTopology/UpdateNodeTopology", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// NodeTopologyServer is the server API for NodeTopology service.
type NodeTopologyServer interface {
UpdateNodeTopology(context.Context, *NodeTopologyRequest) (*NodeTopologyResponse, error)
}
// UnimplementedNodeTopologyServer can be embedded to have forward compatible implementations.
type UnimplementedNodeTopologyServer struct {
}
func (*UnimplementedNodeTopologyServer) UpdateNodeTopology(context.Context, *NodeTopologyRequest) (*NodeTopologyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateNodeTopology not implemented")
}
func RegisterNodeTopologyServer(s *grpc.Server, srv NodeTopologyServer) {
s.RegisterService(&_NodeTopology_serviceDesc, srv)
}
func _NodeTopology_UpdateNodeTopology_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NodeTopologyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(NodeTopologyServer).UpdateNodeTopology(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/topologyupdater.NodeTopology/UpdateNodeTopology",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(NodeTopologyServer).UpdateNodeTopology(ctx, req.(*NodeTopologyRequest))
}
return interceptor(ctx, in, info, handler)
}
var _NodeTopology_serviceDesc = grpc.ServiceDesc{
ServiceName: "topologyupdater.NodeTopology",
HandlerType: (*NodeTopologyServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "UpdateNodeTopology",
Handler: _NodeTopology_UpdateNodeTopology_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "topology-updater.proto",
}

View file

@ -1,35 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
option go_package = "sigs.k8s.io/node-feature-discovery/pkg/topologyupdater";
import "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1/generated.proto";
package topologyupdater;
service NodeTopology{
rpc UpdateNodeTopology(NodeTopologyRequest) returns (NodeTopologyResponse);
}
message NodeTopologyRequest {
string nfd_version = 1;
string node_name = 2;
repeated string topology_policies = 3;
repeated v1alpha1.Zone zones = 4;
}
message NodeTopologyResponse {
}

View file

@ -19,9 +19,10 @@ package e2e
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"time"
"k8s.io/apimachinery/pkg/api/resource"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@ -36,7 +37,6 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
admissionapi "k8s.io/pod-security-admission/api"
testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
@ -72,18 +72,9 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
By("Creating the node resource topologies CRD")
Expect(testutils.CreateNodeResourceTopologies(extClient)).ToNot(BeNil())
By("Configuring RBAC")
Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
imageOpt := testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))
f.PodClient().CreateSync(testpod.NFDMaster(imageOpt))
// Create nfd-master service
masterService, err := testutils.CreateService(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the nfd-master service to be up")
Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, masterService.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
By("Creating nfd-topology-updater daemonset")
topologyUpdaterDaemonSet, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), topologyUpdaterDaemonSet, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
@ -114,7 +105,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
}
})
Context("with single nfd-master pod", func() {
Context("with topology-updater daemonset running", func() {
BeforeEach(func() {
cfg, err := testutils.GetConfig()
Expect(err).ToNot(HaveOccurred())

View file

@ -367,8 +367,7 @@ func NFDTopologyUpdaterSpec(kc utils.KubeletConfig, opts ...SpecOption) *corev1.
"--kubelet-config-uri=file:///podresources/config.yaml",
"--podresources-socket=unix:///podresources/kubelet.sock",
"--sleep-interval=3s",
"--watch-namespace=rte",
"--server=nfd-master-e2e:8080"},
"--watch-namespace=rte"},
Env: []corev1.EnvVar{
{
Name: "NODE_NAME",

View file

@ -133,15 +133,6 @@ func createClusterRoleMaster(cs clientset.Interface) (*rbacv1.ClusterRole, error
Resources: []string{"nodefeaturerules"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"topology.node.k8s.io"},
Resources: []string{"noderesourcetopologies"},
Verbs: []string{
"create",
"get",
"update",
},
},
},
}
if *openShift {
@ -172,6 +163,15 @@ func createClusterRoleTopologyUpdater(cs clientset.Interface) (*rbacv1.ClusterRo
Resources: []string{"pods"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"topology.node.k8s.io"},
Resources: []string{"noderesourcetopologies"},
Verbs: []string{
"create",
"get",
"update",
},
},
},
}
if *openShift {