mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2025-03-31 04:04:51 +00:00
Merge pull request #429 from slintes/configmap-hostname-labels
Added nodename rule to custom source
This commit is contained in:
commit
d36500789e
10 changed files with 534 additions and 104 deletions
deployment/node-feature-discovery
docs/get-started
nfd-daemonset-combined.yaml.templatenfd-worker-daemonset.yaml.templatenfd-worker-job.yaml.templatenfd-worker.conf.examplesource/custom
test/e2e
|
@ -162,6 +162,10 @@ worker:
|
||||||
# vendor: ["15b3"]
|
# vendor: ["15b3"]
|
||||||
# device: ["1014", "1017"]
|
# device: ["1014", "1017"]
|
||||||
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
||||||
|
# - name: "feature.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["worker-0", "my-.*-node"]
|
||||||
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
||||||
|
|
||||||
podSecurityContext: {}
|
podSecurityContext: {}
|
||||||
|
|
|
@ -134,6 +134,17 @@ examples how to set-up and manage the worker configuration.
|
||||||
To aid in making Custom Features clearer, we define a general and a per rule
|
To aid in making Custom Features clearer, we define a general and a per rule
|
||||||
nomenclature, keeping things as consistent as possible.
|
nomenclature, keeping things as consistent as possible.
|
||||||
|
|
||||||
|
#### Additional configuration directory
|
||||||
|
|
||||||
|
Additionally to the rules defined in the nfd-worker configuration file, the
|
||||||
|
Custom feature can read more configuration files located in the
|
||||||
|
`/etc/kubernetes/node-feature-discovery/custom.d/` directory. This makes more
|
||||||
|
dynamic and flexible configuration easier. This directory must be available
|
||||||
|
inside the NFD worker container, so Volumes and VolumeMounts must be used for
|
||||||
|
mounting e.g. ConfigMap(s). The example deployment manifests provide an example
|
||||||
|
(commented out) for providing Custom configuration with an additional
|
||||||
|
ConfigMap, mounted into the `custom.d` directory.
|
||||||
|
|
||||||
#### General Nomenclature & Definitions
|
#### General Nomenclature & Definitions
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -151,6 +162,7 @@ file.
|
||||||
sources:
|
sources:
|
||||||
custom:
|
custom:
|
||||||
- name: <feature name>
|
- name: <feature name>
|
||||||
|
value: <optional feature value, defaults to "true">
|
||||||
matchOn:
|
matchOn:
|
||||||
- <Rule-1>: <Rule-1 Input>
|
- <Rule-1>: <Rule-1 Input>
|
||||||
[<Rule-2>: <Rule-2 Input>]
|
[<Rule-2>: <Rule-2 Input>]
|
||||||
|
@ -291,6 +303,26 @@ Matching is done by performing logical _AND_ for each provided Element, i.e the
|
||||||
Rule will match if all provided Elements (kernel config options) are enabled
|
Rule will match if all provided Elements (kernel config options) are enabled
|
||||||
(`y` or `m`) or matching `=<value>` in the kernel.
|
(`y` or `m`) or matching `=<value>` in the kernel.
|
||||||
|
|
||||||
|
##### Nodename Rule
|
||||||
|
|
||||||
|
###### Nomenclature
|
||||||
|
|
||||||
|
```
|
||||||
|
Element :A nodename regexp pattern
|
||||||
|
```
|
||||||
|
|
||||||
|
The Rule allows matching the node's name against a provided list of Elements.
|
||||||
|
|
||||||
|
###### Format
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
nodename: [ <nodename regexp pattern>, ... ]
|
||||||
|
```
|
||||||
|
|
||||||
|
Matching is done by performing logical _OR_ for each provided Element, i.e the
|
||||||
|
Rule will match if one of the provided Elements (nodename regexp pattern)
|
||||||
|
matches the node's name.
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -328,6 +360,10 @@ custom:
|
||||||
matchOn:
|
matchOn:
|
||||||
- kConfig: ["GCC_VERSION=100101"]
|
- kConfig: ["GCC_VERSION=100101"]
|
||||||
loadedKMod: ["kmod1"]
|
loadedKMod: ["kmod1"]
|
||||||
|
- name: "my.datacenter"
|
||||||
|
value: "datacenter-1"
|
||||||
|
matchOn:
|
||||||
|
- nodename: [ "node-datacenter1-rack.*-server.*" ]
|
||||||
```
|
```
|
||||||
|
|
||||||
__In the example above:__
|
__In the example above:__
|
||||||
|
@ -360,6 +396,10 @@ __In the example above:__
|
||||||
`feature.node.kubernetes.io/custom-my.kernel.modulecompiler=true` if the
|
`feature.node.kubernetes.io/custom-my.kernel.modulecompiler=true` if the
|
||||||
in-tree `kmod1` kernel module is loaded __AND__ it's built with
|
in-tree `kmod1` kernel module is loaded __AND__ it's built with
|
||||||
`GCC_VERSION=100101`.
|
`GCC_VERSION=100101`.
|
||||||
|
- A node would contain the label:
|
||||||
|
`feature.node.kubernetes.io/my.datacenter=datacenter-1` if the node's name
|
||||||
|
matches the `node-datacenter1-rack.*-server.*` pattern, e.g.
|
||||||
|
`node-datacenter1-rack2-server42`
|
||||||
|
|
||||||
#### Statically defined features
|
#### Statically defined features
|
||||||
|
|
||||||
|
|
|
@ -115,6 +115,11 @@ spec:
|
||||||
- name: nfd-worker-conf
|
- name: nfd-worker-conf
|
||||||
mountPath: "/etc/kubernetes/node-feature-discovery"
|
mountPath: "/etc/kubernetes/node-feature-discovery"
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
## Example for more custom configs in an additional configmap (1/3)
|
||||||
|
## Mounting into subdirectories of custom.d makes it easy to use multiple configmaps
|
||||||
|
# - name: custom-source-extra-rules
|
||||||
|
# mountPath: "/etc/kubernetes/node-feature-discovery/custom.d/extra-rules-1"
|
||||||
|
# readOnly: true
|
||||||
volumes:
|
volumes:
|
||||||
- name: host-boot
|
- name: host-boot
|
||||||
hostPath:
|
hostPath:
|
||||||
|
@ -134,6 +139,10 @@ spec:
|
||||||
- name: nfd-worker-conf
|
- name: nfd-worker-conf
|
||||||
configMap:
|
configMap:
|
||||||
name: nfd-worker-conf
|
name: nfd-worker-conf
|
||||||
|
## Example for more custom configs in an additional configmap (2/3)
|
||||||
|
# - name: custom-source-extra-rules
|
||||||
|
# configMap:
|
||||||
|
# name: custom-source-extra-rules
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
|
@ -232,4 +241,25 @@ data:
|
||||||
# vendor: ["15b3"]
|
# vendor: ["15b3"]
|
||||||
# device: ["1014", "1017"]
|
# device: ["1014", "1017"]
|
||||||
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
||||||
|
# - name: "feature.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["worker-0", "my-.*-node"]
|
||||||
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
||||||
|
---
|
||||||
|
## Example for more custom configs in an additional configmap (3/3)
|
||||||
|
#apiVersion: v1
|
||||||
|
#kind: ConfigMap
|
||||||
|
#metadata:
|
||||||
|
# name: custom-source-extra-rules
|
||||||
|
# namespace: node-feature-discovery
|
||||||
|
#data:
|
||||||
|
## Filename doesn't matter, and there can be multiple. They just need to be unique.
|
||||||
|
# custom.conf: |
|
||||||
|
# - name: "more.kernel.features"
|
||||||
|
# matchOn:
|
||||||
|
# - loadedKMod: ["example_kmod3"]
|
||||||
|
# - name: "more.features.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["special-.*-node-.*"]
|
||||||
|
|
|
@ -63,6 +63,11 @@ spec:
|
||||||
- name: nfd-worker-conf
|
- name: nfd-worker-conf
|
||||||
mountPath: "/etc/kubernetes/node-feature-discovery"
|
mountPath: "/etc/kubernetes/node-feature-discovery"
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
## Example for more custom configs in an additional configmap (1/3)
|
||||||
|
## Mounting into subdirectories of custom.d makes it easy to use multiple configmaps
|
||||||
|
# - name: custom-source-extra-rules
|
||||||
|
# mountPath: "/etc/kubernetes/node-feature-discovery/custom.d/extra-rules-1"
|
||||||
|
# readOnly: true
|
||||||
## Enable TLS authentication (2/3)
|
## Enable TLS authentication (2/3)
|
||||||
# - name: nfd-ca-cert
|
# - name: nfd-ca-cert
|
||||||
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
|
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
|
||||||
|
@ -89,6 +94,10 @@ spec:
|
||||||
- name: nfd-worker-conf
|
- name: nfd-worker-conf
|
||||||
configMap:
|
configMap:
|
||||||
name: nfd-worker-conf
|
name: nfd-worker-conf
|
||||||
|
## Example for more custom configs in an additional configmap (2/3)
|
||||||
|
# - name: custom-source-extra-rules
|
||||||
|
# configMap:
|
||||||
|
# name: custom-source-extra-rules
|
||||||
## Enable TLS authentication (3/3)
|
## Enable TLS authentication (3/3)
|
||||||
# - name: nfd-ca-cert
|
# - name: nfd-ca-cert
|
||||||
# configMap:
|
# configMap:
|
||||||
|
@ -194,4 +203,25 @@ data:
|
||||||
# vendor: ["15b3"]
|
# vendor: ["15b3"]
|
||||||
# device: ["1014", "1017"]
|
# device: ["1014", "1017"]
|
||||||
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
||||||
|
# - name: "feature.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["worker-0", "my-.*-node"]
|
||||||
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
||||||
|
---
|
||||||
|
## Example for more custom configs in an additional configmap (3/3)
|
||||||
|
#apiVersion: v1
|
||||||
|
#kind: ConfigMap
|
||||||
|
#metadata:
|
||||||
|
# name: custom-source-extra-rules
|
||||||
|
# namespace: node-feature-discovery
|
||||||
|
#data:
|
||||||
|
## Filename doesn't matter, and there can be multiple. They just need to be unique.
|
||||||
|
# custom.conf: |
|
||||||
|
# - name: "more.kernel.features"
|
||||||
|
# matchOn:
|
||||||
|
# - loadedKMod: ["example_kmod3"]
|
||||||
|
# - name: "more.features.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["special-.*-node-.*"]
|
||||||
|
|
|
@ -72,6 +72,11 @@ spec:
|
||||||
- name: nfd-worker-conf
|
- name: nfd-worker-conf
|
||||||
mountPath: "/etc/kubernetes/node-feature-discovery"
|
mountPath: "/etc/kubernetes/node-feature-discovery"
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
## Example for more custom configs in an additional configmap (1/3)
|
||||||
|
## Mounting into subdirectories of custom.d makes it easy to use multiple configmaps
|
||||||
|
# - name: custom-source-extra-rules
|
||||||
|
# mountPath: "/etc/kubernetes/node-feature-discovery/custom.d/extra-rules-1"
|
||||||
|
# readOnly: true
|
||||||
## Enable TLS authentication (2/3)
|
## Enable TLS authentication (2/3)
|
||||||
# - name: nfd-ca-cert
|
# - name: nfd-ca-cert
|
||||||
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
|
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
|
||||||
|
@ -99,6 +104,10 @@ spec:
|
||||||
- name: nfd-worker-conf
|
- name: nfd-worker-conf
|
||||||
configMap:
|
configMap:
|
||||||
name: nfd-worker-conf
|
name: nfd-worker-conf
|
||||||
|
## Example for more custom configs in an additional configmap (2/3)
|
||||||
|
# - name: custom-source-extra-rules
|
||||||
|
# configMap:
|
||||||
|
# name: custom-source-extra-rules
|
||||||
## Enable TLS authentication (3/3)
|
## Enable TLS authentication (3/3)
|
||||||
# - name: nfd-ca-cert
|
# - name: nfd-ca-cert
|
||||||
# configMap:
|
# configMap:
|
||||||
|
@ -204,4 +213,25 @@ data:
|
||||||
# vendor: ["15b3"]
|
# vendor: ["15b3"]
|
||||||
# device: ["1014", "1017"]
|
# device: ["1014", "1017"]
|
||||||
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
||||||
|
# - name: "feature.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["worker-0", "my-.*-node"]
|
||||||
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
|
||||||
|
---
|
||||||
|
## Example for more custom configs in an additional configmap (3/3)
|
||||||
|
#apiVersion: v1
|
||||||
|
#kind: ConfigMap
|
||||||
|
#metadata:
|
||||||
|
# name: custom-source-extra-rules
|
||||||
|
# namespace: node-feature-discovery
|
||||||
|
#data:
|
||||||
|
## Filename doesn't matter, and there can be multiple. They just need to be unique.
|
||||||
|
# custom.conf: |
|
||||||
|
# - name: "more.kernel.features"
|
||||||
|
# matchOn:
|
||||||
|
# - loadedKMod: ["example_kmod3"]
|
||||||
|
# - name: "more.features.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["special-.*-node-.*"]
|
||||||
|
|
|
@ -88,3 +88,7 @@
|
||||||
# vendor: ["15b3"]
|
# vendor: ["15b3"]
|
||||||
# device: ["1014", "1017"]
|
# device: ["1014", "1017"]
|
||||||
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
|
||||||
|
# - name: "feature.by.nodename"
|
||||||
|
# value: customValue
|
||||||
|
# matchOn:
|
||||||
|
# - nodename: ["worker-0", "my-.*-node"]
|
||||||
|
|
|
@ -18,6 +18,7 @@ package custom
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
"sigs.k8s.io/node-feature-discovery/source"
|
"sigs.k8s.io/node-feature-discovery/source"
|
||||||
"sigs.k8s.io/node-feature-discovery/source/custom/rules"
|
"sigs.k8s.io/node-feature-discovery/source/custom/rules"
|
||||||
|
@ -30,10 +31,12 @@ type MatchRule struct {
|
||||||
LoadedKMod *rules.LoadedKModRule `json:"loadedKMod,omitempty"`
|
LoadedKMod *rules.LoadedKModRule `json:"loadedKMod,omitempty"`
|
||||||
CpuID *rules.CpuIDRule `json:"cpuId,omitempty"`
|
CpuID *rules.CpuIDRule `json:"cpuId,omitempty"`
|
||||||
Kconfig *rules.KconfigRule `json:"kConfig,omitempty"`
|
Kconfig *rules.KconfigRule `json:"kConfig,omitempty"`
|
||||||
|
Nodename *rules.NodenameRule `json:"nodename,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FeatureSpec struct {
|
type FeatureSpec struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
Value *string `json:"value"`
|
||||||
MatchOn []MatchRule `json:"matchOn"`
|
MatchOn []MatchRule `json:"matchOn"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,6 +75,7 @@ func (s *Source) SetConfig(conf source.Config) {
|
||||||
func (s Source) Discover() (source.Features, error) {
|
func (s Source) Discover() (source.Features, error) {
|
||||||
features := source.Features{}
|
features := source.Features{}
|
||||||
allFeatureConfig := append(getStaticFeatureConfig(), *s.config...)
|
allFeatureConfig := append(getStaticFeatureConfig(), *s.config...)
|
||||||
|
allFeatureConfig = append(allFeatureConfig, getDirectoryFeatureConfig()...)
|
||||||
log.Printf("INFO: Custom features: %+v", allFeatureConfig)
|
log.Printf("INFO: Custom features: %+v", allFeatureConfig)
|
||||||
// Iterate over features
|
// Iterate over features
|
||||||
for _, customFeature := range allFeatureConfig {
|
for _, customFeature := range allFeatureConfig {
|
||||||
|
@ -81,7 +85,11 @@ func (s Source) Discover() (source.Features, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if featureExist {
|
if featureExist {
|
||||||
features[customFeature.Name] = true
|
var value interface{} = true
|
||||||
|
if customFeature.Value != nil {
|
||||||
|
value = *customFeature.Value
|
||||||
|
}
|
||||||
|
features[customFeature.Name] = value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return features, nil
|
return features, nil
|
||||||
|
@ -90,58 +98,37 @@ func (s Source) Discover() (source.Features, error) {
|
||||||
// Process a single feature by Matching on the defined rules.
|
// Process a single feature by Matching on the defined rules.
|
||||||
// A feature is present if all defined Rules in a MatchRule return a match.
|
// A feature is present if all defined Rules in a MatchRule return a match.
|
||||||
func (s Source) discoverFeature(feature FeatureSpec) (bool, error) {
|
func (s Source) discoverFeature(feature FeatureSpec) (bool, error) {
|
||||||
for _, rule := range feature.MatchOn {
|
for _, matchRules := range feature.MatchOn {
|
||||||
// PCI ID rule
|
|
||||||
if rule.PciID != nil {
|
allRules := []rules.Rule{
|
||||||
match, err := rule.PciID.Match()
|
matchRules.PciID,
|
||||||
if err != nil {
|
matchRules.UsbID,
|
||||||
return false, err
|
matchRules.LoadedKMod,
|
||||||
}
|
matchRules.CpuID,
|
||||||
if !match {
|
matchRules.Kconfig,
|
||||||
continue
|
matchRules.Nodename,
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// USB ID rule
|
|
||||||
if rule.UsbID != nil {
|
// return true, nil if all rules match
|
||||||
match, err := rule.UsbID.Match()
|
matchRules := func(rules []rules.Rule) (bool, error) {
|
||||||
if err != nil {
|
for _, rule := range rules {
|
||||||
return false, err
|
if reflect.ValueOf(rule).IsNil() {
|
||||||
}
|
continue
|
||||||
if !match {
|
}
|
||||||
continue
|
if match, err := rule.Match(); err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if !match {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return true, nil
|
||||||
}
|
}
|
||||||
// Loaded kernel module rule
|
|
||||||
if rule.LoadedKMod != nil {
|
if match, err := matchRules(allRules); err != nil {
|
||||||
match, err := rule.LoadedKMod.Match()
|
return false, err
|
||||||
if err != nil {
|
} else if match {
|
||||||
return false, err
|
return true, nil
|
||||||
}
|
|
||||||
if !match {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// cpuid rule
|
|
||||||
if rule.CpuID != nil {
|
|
||||||
match, err := rule.CpuID.Match()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !match {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// kconfig rule
|
|
||||||
if rule.Kconfig != nil {
|
|
||||||
match, err := rule.Kconfig.Match()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !match {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
88
source/custom/directory_features.go
Normal file
88
source/custom/directory_features.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package custom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"sigs.k8s.io/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
const Directory = "/etc/kubernetes/node-feature-discovery/custom.d"
|
||||||
|
|
||||||
|
// getDirectoryFeatureConfig returns features configured in the "/etc/kubernetes/node-feature-discovery/custom.d"
|
||||||
|
// host directory and its 1st level subdirectories, which can be populated e.g. by ConfigMaps
|
||||||
|
func getDirectoryFeatureConfig() []FeatureSpec {
|
||||||
|
features := readDir(Directory, true)
|
||||||
|
//log.Printf("DEBUG: all configmap based custom feature specs: %+v", features)
|
||||||
|
return features
|
||||||
|
}
|
||||||
|
|
||||||
|
func readDir(dirName string, recursive bool) []FeatureSpec {
|
||||||
|
features := make([]FeatureSpec, 0)
|
||||||
|
|
||||||
|
log.Printf("DEBUG: getting files in %s", dirName)
|
||||||
|
files, err := ioutil.ReadDir(dirName)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
log.Printf("DEBUG: custom config directory %q does not exist", dirName)
|
||||||
|
} else {
|
||||||
|
log.Printf("ERROR: unable to access custom config directory %q, %v", dirName, err)
|
||||||
|
}
|
||||||
|
return features
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
fileName := filepath.Join(dirName, file.Name())
|
||||||
|
|
||||||
|
if file.IsDir() {
|
||||||
|
if recursive {
|
||||||
|
//log.Printf("DEBUG: going into dir %q", fileName)
|
||||||
|
features = append(features, readDir(fileName, false)...)
|
||||||
|
//} else {
|
||||||
|
// log.Printf("DEBUG: skipping dir %q", fileName)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(file.Name(), ".") {
|
||||||
|
//log.Printf("DEBUG: skipping hidden file %q", fileName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//log.Printf("DEBUG: processing file %q", fileName)
|
||||||
|
|
||||||
|
bytes, err := ioutil.ReadFile(fileName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR: could not read custom config file %q, %v", fileName, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//log.Printf("DEBUG: custom config rules raw: %s", string(bytes))
|
||||||
|
|
||||||
|
config := &[]FeatureSpec{}
|
||||||
|
err = yaml.UnmarshalStrict(bytes, config)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR: could not parse custom config file %q, %v", fileName, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
features = append(features, *config...)
|
||||||
|
}
|
||||||
|
return features
|
||||||
|
}
|
50
source/custom/rules/nodename_rule.go
Normal file
50
source/custom/rules/nodename_rule.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
package rules
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
nodeName = os.Getenv("NODE_NAME")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Rule that matches on nodenames configured in a ConfigMap
|
||||||
|
type NodenameRule []string
|
||||||
|
|
||||||
|
// Force implementation of Rule
|
||||||
|
var _ Rule = NodenameRule{}
|
||||||
|
|
||||||
|
func (n NodenameRule) Match() (bool, error) {
|
||||||
|
for _, nodenamePattern := range n {
|
||||||
|
log.Printf("DEBUG: matchNodename %s", nodenamePattern)
|
||||||
|
match, err := regexp.MatchString(nodenamePattern, nodeName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR: nodename rule: invalid nodename regexp %q: %v", nodenamePattern, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !match {
|
||||||
|
//log.Printf("DEBUG: nodename rule: No match for pattern %q with node %q", nodenamePattern, nodeName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//log.Printf("DEBUG: nodename rule: Match for pattern %q with node %q", nodenamePattern, nodeName)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
|
@ -21,12 +21,13 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
@ -40,6 +41,7 @@ import (
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
master "sigs.k8s.io/node-feature-discovery/pkg/nfd-master"
|
master "sigs.k8s.io/node-feature-discovery/pkg/nfd-master"
|
||||||
|
"sigs.k8s.io/node-feature-discovery/source/custom"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -90,18 +92,18 @@ func readConfig() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Reading end-to-end test configuration file")
|
By("Reading end-to-end test configuration file")
|
||||||
data, err := ioutil.ReadFile(*e2eConfigFile)
|
data, err := ioutil.ReadFile(*e2eConfigFile)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ginkgo.By("Parsing end-to-end test configuration data")
|
By("Parsing end-to-end test configuration data")
|
||||||
err = yaml.Unmarshal(data, &conf)
|
err = yaml.Unmarshal(data, &conf)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Pre-compile node name matching regexps
|
// Pre-compile node name matching regexps
|
||||||
for name, nodeConf := range conf.DefaultFeatures.Nodes {
|
for name, nodeConf := range conf.DefaultFeatures.Nodes {
|
||||||
nodeConf.nameRe, err = regexp.Compile(name)
|
nodeConf.nameRe, err = regexp.Compile(name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
conf.DefaultFeatures.Nodes[name] = nodeConf
|
conf.DefaultFeatures.Nodes[name] = nodeConf
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,6 +168,13 @@ func createClusterRole(cs clientset.Interface) (*rbacv1.ClusterRole, error) {
|
||||||
Resources: []string{"nodes"},
|
Resources: []string{"nodes"},
|
||||||
Verbs: []string{"get", "patch", "update"},
|
Verbs: []string{"get", "patch", "update"},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// needed on OpenShift clusters
|
||||||
|
APIGroups: []string{"security.openshift.io"},
|
||||||
|
Resources: []string{"securitycontextconstraints"},
|
||||||
|
ResourceNames: []string{"hostaccess"},
|
||||||
|
Verbs: []string{"use"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return cs.RbacV1().ClusterRoles().Update(context.TODO(), cr, metav1.UpdateOptions{})
|
return cs.RbacV1().ClusterRoles().Update(context.TODO(), cr, metav1.UpdateOptions{})
|
||||||
|
@ -373,14 +382,14 @@ func newHostPathType(typ v1.HostPathType) *v1.HostPathType {
|
||||||
// labels and annotations
|
// labels and annotations
|
||||||
func cleanupNode(cs clientset.Interface) {
|
func cleanupNode(cs clientset.Interface) {
|
||||||
nodeList, err := cs.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
nodeList, err := cs.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
for _, n := range nodeList.Items {
|
for _, n := range nodeList.Items {
|
||||||
var err error
|
var err error
|
||||||
var node *v1.Node
|
var node *v1.Node
|
||||||
for retry := 0; retry < 5; retry++ {
|
for retry := 0; retry < 5; retry++ {
|
||||||
node, err = cs.CoreV1().Nodes().Get(context.TODO(), n.Name, metav1.GetOptions{})
|
node, err = cs.CoreV1().Nodes().Get(context.TODO(), n.Name, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
update := false
|
update := false
|
||||||
// Remove labels
|
// Remove labels
|
||||||
|
@ -403,7 +412,7 @@ func cleanupNode(cs clientset.Interface) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Deleting NFD labels and annotations from node " + node.Name)
|
By("Deleting NFD labels and annotations from node " + node.Name)
|
||||||
_, err = cs.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
_, err = cs.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
@ -412,7 +421,7 @@ func cleanupNode(cs clientset.Interface) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,42 +429,42 @@ func cleanupNode(cs clientset.Interface) {
|
||||||
var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
||||||
f := framework.NewDefaultFramework("node-feature-discovery")
|
f := framework.NewDefaultFramework("node-feature-discovery")
|
||||||
|
|
||||||
ginkgo.Context("when deploying a single nfd-master pod", func() {
|
Context("when deploying a single nfd-master pod", func() {
|
||||||
var masterPod *v1.Pod
|
var masterPod *v1.Pod
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
err := configureRBAC(f.ClientSet, f.Namespace.Name)
|
err := configureRBAC(f.ClientSet, f.Namespace.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Launch nfd-master
|
// Launch nfd-master
|
||||||
ginkgo.By("Creating nfd master pod and nfd-master service")
|
By("Creating nfd master pod and nfd-master service")
|
||||||
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
||||||
masterPod = nfdMasterPod(image, false)
|
masterPod = nfdMasterPod(image, false)
|
||||||
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), masterPod, metav1.CreateOptions{})
|
masterPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), masterPod, metav1.CreateOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Create nfd-master service
|
// Create nfd-master service
|
||||||
nfdSvc, err := createService(f.ClientSet, f.Namespace.Name)
|
nfdSvc, err := createService(f.ClientSet, f.Namespace.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ginkgo.By("Waiting for the nfd-master pod to be running")
|
By("Waiting for the nfd-master pod to be running")
|
||||||
gomega.Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(gomega.HaveOccurred())
|
Expect(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, masterPod.Name, masterPod.Namespace, time.Minute)).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ginkgo.By("Waiting for the nfd-master service to be up")
|
By("Waiting for the nfd-master service to be up")
|
||||||
gomega.Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(gomega.HaveOccurred())
|
Expect(e2enetwork.WaitForService(f.ClientSet, f.Namespace.Name, nfdSvc.ObjectMeta.Name, true, time.Second, 10*time.Second)).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
AfterEach(func() {
|
||||||
err := deconfigureRBAC(f.ClientSet, f.Namespace.Name)
|
err := deconfigureRBAC(f.ClientSet, f.Namespace.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
//
|
//
|
||||||
// Simple test with only the fake source enabled
|
// Simple test with only the fake source enabled
|
||||||
//
|
//
|
||||||
ginkgo.Context("and a single worker pod with fake source enabled", func() {
|
Context("and a single worker pod with fake source enabled", func() {
|
||||||
ginkgo.It("it should decorate the node with the fake feature labels", func() {
|
It("it should decorate the node with the fake feature labels", func() {
|
||||||
|
|
||||||
fakeFeatureLabels := map[string]string{
|
fakeFeatureLabels := map[string]string{
|
||||||
master.LabelNs + "/fake-fakefeature1": "true",
|
master.LabelNs + "/fake-fakefeature1": "true",
|
||||||
|
@ -467,34 +476,34 @@ var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
||||||
cleanupNode(f.ClientSet)
|
cleanupNode(f.ClientSet)
|
||||||
|
|
||||||
// Launch nfd-worker
|
// Launch nfd-worker
|
||||||
ginkgo.By("Creating a nfd worker pod")
|
By("Creating a nfd worker pod")
|
||||||
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
image := fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)
|
||||||
workerPod := nfdWorkerPod(image, []string{"--oneshot", "--sources=fake"})
|
workerPod := nfdWorkerPod(image, []string{"--oneshot", "--sources=fake"})
|
||||||
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
|
workerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), workerPod, metav1.CreateOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ginkgo.By("Waiting for the nfd-worker pod to succeed")
|
By("Waiting for the nfd-worker pod to succeed")
|
||||||
gomega.Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, f.Namespace.Name)).NotTo(gomega.HaveOccurred())
|
Expect(e2epod.WaitForPodSuccessInNamespace(f.ClientSet, workerPod.ObjectMeta.Name, f.Namespace.Name)).NotTo(HaveOccurred())
|
||||||
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), workerPod.ObjectMeta.Name, metav1.GetOptions{})
|
workerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), workerPod.ObjectMeta.Name, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
|
By(fmt.Sprintf("Making sure '%s' was decorated with the fake feature labels", workerPod.Spec.NodeName))
|
||||||
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), workerPod.Spec.NodeName, metav1.GetOptions{})
|
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), workerPod.Spec.NodeName, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
for k, v := range fakeFeatureLabels {
|
for k, v := range fakeFeatureLabels {
|
||||||
gomega.Expect(node.Labels[k]).To(gomega.Equal(v))
|
Expect(node.Labels[k]).To(Equal(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that there are no unexpected NFD labels
|
// Check that there are no unexpected NFD labels
|
||||||
for k := range node.Labels {
|
for k := range node.Labels {
|
||||||
if strings.HasPrefix(k, master.LabelNs) {
|
if strings.HasPrefix(k, master.LabelNs) {
|
||||||
gomega.Expect(fakeFeatureLabels).Should(gomega.HaveKey(k))
|
Expect(fakeFeatureLabels).Should(HaveKey(k))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Deleting the node-feature-discovery worker pod")
|
By("Deleting the node-feature-discovery worker pod")
|
||||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), workerPod.ObjectMeta.Name, metav1.DeleteOptions{})
|
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), workerPod.ObjectMeta.Name, metav1.DeleteOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
cleanupNode(f.ClientSet)
|
cleanupNode(f.ClientSet)
|
||||||
})
|
})
|
||||||
|
@ -503,31 +512,31 @@ var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
||||||
//
|
//
|
||||||
// More comprehensive test when --e2e-node-config is enabled
|
// More comprehensive test when --e2e-node-config is enabled
|
||||||
//
|
//
|
||||||
ginkgo.Context("and nfd-workers as a daemonset with default sources enabled", func() {
|
Context("and nfd-workers as a daemonset with default sources enabled", func() {
|
||||||
ginkgo.It("the node labels and annotations listed in the e2e config should be present", func() {
|
It("the node labels and annotations listed in the e2e config should be present", func() {
|
||||||
readConfig()
|
readConfig()
|
||||||
if conf == nil {
|
if conf == nil {
|
||||||
ginkgo.Skip("no e2e-config was specified")
|
Skip("no e2e-config was specified")
|
||||||
}
|
}
|
||||||
if conf.DefaultFeatures == nil {
|
if conf.DefaultFeatures == nil {
|
||||||
ginkgo.Skip("no 'defaultFeatures' specified in e2e-config")
|
Skip("no 'defaultFeatures' specified in e2e-config")
|
||||||
}
|
}
|
||||||
fConf := conf.DefaultFeatures
|
fConf := conf.DefaultFeatures
|
||||||
|
|
||||||
// Remove pre-existing stale annotations and labels
|
// Remove pre-existing stale annotations and labels
|
||||||
cleanupNode(f.ClientSet)
|
cleanupNode(f.ClientSet)
|
||||||
|
|
||||||
ginkgo.By("Creating nfd-worker daemonset")
|
By("Creating nfd-worker daemonset")
|
||||||
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
||||||
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
workerDS, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ginkgo.By("Waiting for daemonset pods to be ready")
|
By("Waiting for daemonset pods to be ready")
|
||||||
gomega.Expect(e2epod.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(gomega.HaveOccurred())
|
Expect(e2epod.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||||
|
|
||||||
ginkgo.By("Getting node objects")
|
By("Getting node objects")
|
||||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
var nodeConf *nodeConfig
|
var nodeConf *nodeConfig
|
||||||
|
@ -546,10 +555,10 @@ var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
||||||
// Check labels
|
// Check labels
|
||||||
e2elog.Logf("verifying labels of node %q...", node.Name)
|
e2elog.Logf("verifying labels of node %q...", node.Name)
|
||||||
for k, v := range nodeConf.ExpectedLabelValues {
|
for k, v := range nodeConf.ExpectedLabelValues {
|
||||||
gomega.Expect(node.Labels).To(gomega.HaveKeyWithValue(k, v))
|
Expect(node.Labels).To(HaveKeyWithValue(k, v))
|
||||||
}
|
}
|
||||||
for k := range nodeConf.ExpectedLabelKeys {
|
for k := range nodeConf.ExpectedLabelKeys {
|
||||||
gomega.Expect(node.Labels).To(gomega.HaveKey(k))
|
Expect(node.Labels).To(HaveKey(k))
|
||||||
}
|
}
|
||||||
for k := range node.Labels {
|
for k := range node.Labels {
|
||||||
if strings.HasPrefix(k, master.LabelNs) {
|
if strings.HasPrefix(k, master.LabelNs) {
|
||||||
|
@ -560,17 +569,17 @@ var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Ignore if the label key was not whitelisted
|
// Ignore if the label key was not whitelisted
|
||||||
gomega.Expect(fConf.LabelWhitelist).NotTo(gomega.HaveKey(k))
|
Expect(fConf.LabelWhitelist).NotTo(HaveKey(k))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check annotations
|
// Check annotations
|
||||||
e2elog.Logf("verifying annotations of node %q...", node.Name)
|
e2elog.Logf("verifying annotations of node %q...", node.Name)
|
||||||
for k, v := range nodeConf.ExpectedAnnotationValues {
|
for k, v := range nodeConf.ExpectedAnnotationValues {
|
||||||
gomega.Expect(node.Annotations).To(gomega.HaveKeyWithValue(k, v))
|
Expect(node.Annotations).To(HaveKeyWithValue(k, v))
|
||||||
}
|
}
|
||||||
for k := range nodeConf.ExpectedAnnotationKeys {
|
for k := range nodeConf.ExpectedAnnotationKeys {
|
||||||
gomega.Expect(node.Annotations).To(gomega.HaveKey(k))
|
Expect(node.Annotations).To(HaveKey(k))
|
||||||
}
|
}
|
||||||
for k := range node.Annotations {
|
for k := range node.Annotations {
|
||||||
if strings.HasPrefix(k, master.AnnotationNsBase) {
|
if strings.HasPrefix(k, master.AnnotationNsBase) {
|
||||||
|
@ -581,23 +590,181 @@ var _ = framework.KubeDescribe("[NFD] Node Feature Discovery", func() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Ignore if the annotation was not whitelisted
|
// Ignore if the annotation was not whitelisted
|
||||||
gomega.Expect(fConf.AnnotationWhitelist).NotTo(gomega.HaveKey(k))
|
Expect(fConf.AnnotationWhitelist).NotTo(HaveKey(k))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node running nfd-master should have master version annotation
|
// Node running nfd-master should have master version annotation
|
||||||
if node.Name == masterPod.Spec.NodeName {
|
if node.Name == masterPod.Spec.NodeName {
|
||||||
gomega.Expect(node.Annotations).To(gomega.HaveKey(master.AnnotationNsBase + "master.version"))
|
Expect(node.Annotations).To(HaveKey(master.AnnotationNsBase + "master.version"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Deleting nfd-worker daemonset")
|
By("Deleting nfd-worker daemonset")
|
||||||
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.ObjectMeta.Name, metav1.DeleteOptions{})
|
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.ObjectMeta.Name, metav1.DeleteOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
cleanupNode(f.ClientSet)
|
cleanupNode(f.ClientSet)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
//
|
||||||
|
// Test custom nodename source configured in 2 additional ConfigMaps
|
||||||
|
//
|
||||||
|
Context("and nfd-workers as a daemonset with 2 additional configmaps for the custom source configured", func() {
|
||||||
|
It("the nodename matching features listed in the configmaps should be present", func() {
|
||||||
|
// Remove pre-existing stale annotations and labels
|
||||||
|
cleanupNode(f.ClientSet)
|
||||||
|
|
||||||
|
By("Getting a worker node")
|
||||||
|
|
||||||
|
// We need a valid nodename for the configmap
|
||||||
|
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(len(nodeList.Items)).ToNot(BeZero())
|
||||||
|
|
||||||
|
targetNodeName := ""
|
||||||
|
for _, node := range nodeList.Items {
|
||||||
|
if _, ok := node.Labels["node-role.kubernetes.io/master"]; !ok {
|
||||||
|
targetNodeName = node.Name
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Expect(targetNodeName).ToNot(BeEmpty(), "No worker node found")
|
||||||
|
|
||||||
|
// create a wildcard name as well for this node
|
||||||
|
targetNodeNameWildcard := fmt.Sprintf("%s.*%s", targetNodeName[:2], targetNodeName[4:])
|
||||||
|
|
||||||
|
By("Creating the configmaps")
|
||||||
|
targetLabelName := "nodename-test"
|
||||||
|
targetLabelValue := "true"
|
||||||
|
|
||||||
|
targetLabelNameWildcard := "nodename-test-wildcard"
|
||||||
|
targetLabelValueWildcard := "customValue"
|
||||||
|
|
||||||
|
targetLabelNameNegative := "nodename-test-negative"
|
||||||
|
|
||||||
|
// create 2 configmaps
|
||||||
|
data1 := make(map[string]string)
|
||||||
|
data1["custom1.conf"] = `
|
||||||
|
- name: ` + targetLabelName + `
|
||||||
|
matchOn:
|
||||||
|
# default value is true
|
||||||
|
- nodename:
|
||||||
|
- ` + targetNodeName
|
||||||
|
|
||||||
|
cm1 := &v1.ConfigMap{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "custom-config-extra-" + string(uuid.NewUUID()),
|
||||||
|
},
|
||||||
|
Data: data1,
|
||||||
|
}
|
||||||
|
cm1, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm1, metav1.CreateOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
data2 := make(map[string]string)
|
||||||
|
data2["custom1.conf"] = `
|
||||||
|
- name: ` + targetLabelNameWildcard + `
|
||||||
|
value: ` + targetLabelValueWildcard + `
|
||||||
|
matchOn:
|
||||||
|
- nodename:
|
||||||
|
- ` + targetNodeNameWildcard + `
|
||||||
|
- name: ` + targetLabelNameNegative + `
|
||||||
|
matchOn:
|
||||||
|
- nodename:
|
||||||
|
- "thisNameShouldNeverMatch"`
|
||||||
|
|
||||||
|
cm2 := &v1.ConfigMap{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "custom-config-extra-" + string(uuid.NewUUID()),
|
||||||
|
},
|
||||||
|
Data: data2,
|
||||||
|
}
|
||||||
|
cm2, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm2, metav1.CreateOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Creating nfd-worker daemonset with configmap mounted")
|
||||||
|
workerDS := nfdWorkerDaemonSet(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag), []string{})
|
||||||
|
|
||||||
|
// add configmap mount config
|
||||||
|
volumeName1 := "custom-configs-extra1"
|
||||||
|
volumeName2 := "custom-configs-extra2"
|
||||||
|
workerDS.Spec.Template.Spec.Volumes = append(workerDS.Spec.Template.Spec.Volumes,
|
||||||
|
v1.Volume{
|
||||||
|
Name: volumeName1,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: cm1.Name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
v1.Volume{
|
||||||
|
Name: volumeName2,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: cm2.Name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
workerDS.Spec.Template.Spec.Containers[0].VolumeMounts = append(workerDS.Spec.Template.Spec.Containers[0].VolumeMounts,
|
||||||
|
v1.VolumeMount{
|
||||||
|
Name: volumeName1,
|
||||||
|
ReadOnly: true,
|
||||||
|
MountPath: filepath.Join(custom.Directory, "cm1"),
|
||||||
|
},
|
||||||
|
v1.VolumeMount{
|
||||||
|
Name: volumeName2,
|
||||||
|
ReadOnly: true,
|
||||||
|
MountPath: filepath.Join(custom.Directory, "cm2"),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
workerDS, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), workerDS, metav1.CreateOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Waiting for daemonset pods to be ready")
|
||||||
|
Expect(e2epod.WaitForPodsReady(f.ClientSet, f.Namespace.Name, workerDS.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Getting target node and checking labels")
|
||||||
|
targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), targetNodeName, metav1.GetOptions{})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
labelFound := false
|
||||||
|
labelWildcardFound := false
|
||||||
|
labelNegativeFound := false
|
||||||
|
for k := range targetNode.Labels {
|
||||||
|
if strings.Contains(k, targetLabelName) {
|
||||||
|
if targetNode.Labels[k] == targetLabelValue {
|
||||||
|
labelFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if strings.Contains(k, targetLabelNameWildcard) {
|
||||||
|
if targetNode.Labels[k] == targetLabelValueWildcard {
|
||||||
|
labelWildcardFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if strings.Contains(k, targetLabelNameNegative) {
|
||||||
|
labelNegativeFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Expect(labelFound).To(BeTrue(), "label not found!")
|
||||||
|
Expect(labelWildcardFound).To(BeTrue(), "label for wildcard nodename not found!")
|
||||||
|
Expect(labelNegativeFound).To(BeFalse(), "label for not existing nodename found!")
|
||||||
|
|
||||||
|
By("Deleting nfd-worker daemonset")
|
||||||
|
err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Delete(context.TODO(), workerDS.ObjectMeta.Name, metav1.DeleteOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
cleanupNode(f.ClientSet)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
Loading…
Add table
Reference in a new issue