1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-16 21:38:23 +00:00

Drop deployment templates

Resort to kustomize, instead. Update docs and scripts, accordingly.

Bump cert-manager version in the deployment instructions to v1.5.1.
This commit is contained in:
Markus Lehtonen 2021-08-13 17:35:22 +03:00
parent 1f8a6d7819
commit 63c1256d08
15 changed files with 91 additions and 1197 deletions

6
.gitignore vendored
View file

@ -1,9 +1,3 @@
bin/ bin/
demo/helper-scripts/*.pdf demo/helper-scripts/*.pdf
demo/helper-scripts/*.log demo/helper-scripts/*.log
nfd-daemonset-combined.yaml
nfd-master.yaml
nfd-worker-daemonset.yaml
nfd-worker-job.yaml
nfd-prune.yaml
nfd-cert-manager.yaml

View file

@ -57,12 +57,6 @@ E2E_TEST_CONFIG ?=
LDFLAGS = -ldflags "-s -w -X sigs.k8s.io/node-feature-discovery/pkg/version.version=$(VERSION) -X sigs.k8s.io/node-feature-discovery/source.pathPrefix=$(HOSTMOUNT_PREFIX)" LDFLAGS = -ldflags "-s -w -X sigs.k8s.io/node-feature-discovery/pkg/version.version=$(VERSION) -X sigs.k8s.io/node-feature-discovery/source.pathPrefix=$(HOSTMOUNT_PREFIX)"
yaml_templates := $(wildcard *.yaml.template)
# Let's treat values.yaml as template to sync configmap
# and allow users to install without modifications
yaml_templates := $(yaml_templates) deployment/helm/node-feature-discovery/values.yaml
yaml_instances := $(patsubst %.yaml.template,%.yaml,$(yaml_templates))
all: image all: image
build: build:
@ -90,31 +84,18 @@ image: yamls
$(foreach tag,$(IMAGE_EXTRA_TAGS),-t $(tag)-minimal) \ $(foreach tag,$(IMAGE_EXTRA_TAGS),-t $(tag)-minimal) \
$(IMAGE_BUILD_EXTRA_OPTS) ./ $(IMAGE_BUILD_EXTRA_OPTS) ./
yamls: $(yaml_instances) yamls:
@./scripts/kustomize.sh $(K8S_NAMESPACE) $(IMAGE_REPO) $(IMAGE_TAG_NAME)
%.yaml: %.yaml.template .FORCE templates:
@echo "$@: namespace: ${K8S_NAMESPACE}"
@echo "$@: image: ${IMAGE_TAG}"
@sed -E \
-e s',^(\s*)name: node-feature-discovery # NFD namespace,\1name: ${K8S_NAMESPACE},' \
-e s',^(\s*)image:.+$$,\1image: ${IMAGE_TAG},' \
-e s',^(\s*)namespace:.+$$,\1namespace: ${K8S_NAMESPACE},' \
-e s',^(\s*- |\s*- nfd-master.|\s*- nfd-worker.)node-feature-discovery,\1${K8S_NAMESPACE},' \
-e s',^(\s*)mountPath: "/host-,\1mountPath: "${CONTAINER_HOSTMOUNT_PREFIX},' \
-e '/nfd-worker.conf:/r nfd-worker.conf.tmp' \
$< > $@
templates: $(yaml_templates)
@# Need to prepend each line in the sample config with spaces in order to @# Need to prepend each line in the sample config with spaces in order to
@# fit correctly in the configmap spec. @# fit correctly in the configmap spec.
@sed s'/^/ /' deployment/components/worker-config/nfd-worker.conf.example > nfd-worker.conf.tmp @sed s'/^/ /' deployment/components/worker-config/nfd-worker.conf.example > nfd-worker.conf.tmp
@# The sed magic below replaces the block of text between the lines with start and end markers @# The sed magic below replaces the block of text between the lines with start and end markers
@for f in $+; do \ @start=NFD-WORKER-CONF-START-DO-NOT-REMOVE; \
start=NFD-WORKER-CONF-START-DO-NOT-REMOVE; \ end=NFD-WORKER-CONF-END-DO-NOT-REMOVE; \
end=NFD-WORKER-CONF-END-DO-NOT-REMOVE; \ sed -e "/$$start/,/$$end/{ /$$start/{ p; r nfd-worker.conf.tmp" \
sed -e "/$$start/,/$$end/{ /$$start/{ p; r nfd-worker.conf.tmp" \ -e "}; /$$end/p; d }" -i deployment/helm/node-feature-discovery/values.yaml
-e "}; /$$end/p; d }" -i $$f; \
done
@rm nfd-worker.conf.tmp @rm nfd-worker.conf.tmp
mock: mock:

View file

@ -54,32 +54,19 @@ attribute in the spec template(s) to the new location
### Deployment ### Deployment
The `yamls` makefile generates deployment specs matching your locally built The `yamls` makefile generates a `kustomization.yaml` matching your locally
image. See [build customization](#customizing-the-build) below for built image and using the `deploy/overlays/default` deployment. See
configurability, e.g. changing the deployment namespace. [build customization](#customizing-the-build) below for configurability, e.g.
changing the deployment namespace.
```bash ```bash
K8S_NAMESPACE=my-ns make yamls K8S_NAMESPACE=my-ns make yamls
kubectl apply -f nfd-master.yaml kubectl apply -k .
kubectl apply -f nfd-worker-daemonset.yaml
``` ```
Alternatively, deploying worker and master in the same pod: You can use alternative deployment methods by modifying the auto-generated
kustomization file. For example, deploying worker and master in the same pod by
```bash pointing to `deployment/overlays/default-combined`.
K8S_NAMESPACE=my-ns make yamls
kubectl apply -f nfd-master.yaml
kubectl apply -f nfd-daemonset-combined.yaml
```
Or worker as a one-shot job:
```bash
K8S_NAMESPACE=my-ns make yamls
kubectl apply -f nfd-master.yaml
NUM_NODES=$(kubectl get no -o jsonpath='{.items[*].metadata.name}' | wc -w)
sed s"/NUM_NODES/$NUM_NODES/" nfd-worker-job.yaml | kubectl apply -f -
```
### Building locally ### Building locally
@ -254,7 +241,7 @@ Usage of nfd-worker:
host mounted inside the NFD container. Thus, you need to provide Docker with the host mounted inside the NFD container. Thus, you need to provide Docker with the
correct `--volume` options in order for them to work correctly when run correct `--volume` options in order for them to work correctly when run
stand-alone directly with `docker run`. See the stand-alone directly with `docker run`. See the
[template spec](https://github.com/kubernetes-sigs/node-feature-discovery/blob/{{site.release}}/nfd-worker-daemonset.yaml.template) [default deployment](https://github.com/kubernetes-sigs/node-feature-discovery/blob/{{site.release}}/deployment/components/common/worker-mounts.yaml)
for up-to-date information about the required volume mounts. for up-to-date information about the required volume mounts.
## Documentation ## Documentation

View file

@ -89,27 +89,29 @@ In order to deploy the [minimal](#minimal) image you need to add
to the metadata of NodeFeatureDiscovery object above. to the metadata of NodeFeatureDiscovery object above.
### Deployment templates ### Deployment with kustomize
The template specs provided in the repo can be used directly: The kustomize overlays provided in the repo can be used directly:
```bash ```bash
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-master.yaml.template kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/default?ref={{ site.release }}
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-worker-daemonset.yaml.template
``` ```
This will required RBAC rules and deploy nfd-master (as a deployment) and This will required RBAC rules and deploy nfd-master (as a deployment) and
nfd-worker (as a daemonset) in the `node-feature-discovery` namespace. nfd-worker (as a daemonset) in the `node-feature-discovery` namespace.
Alternatively you can download the templates and customize the deployment Alternatively you can clone the repository and customize the deployment by
manually. For example, to deploy the [minimal](#minimal) image. creating your own overlays. For example, to deploy the [minimal](#minimal)
image. See [kustomize][kustomize] for more information about managing
deployment configurations.
#### Master-worker pod #### Master-worker pod
You can also run nfd-master and nfd-worker inside the same pod You can also run nfd-master and nfd-worker inside the same pod
```bash ```bash
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-daemonset-combined.yaml.template kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/default-combined?ref={{ site.release }}
``` ```
This creates a DaemonSet runs both nfd-worker and nfd-master in the same Pod. This creates a DaemonSet runs both nfd-worker and nfd-master in the same Pod.
@ -119,11 +121,11 @@ are able to label themselves which may be desirable e.g. in single-node setups.
#### Worker one-shot #### Worker one-shot
Feature discovery can alternatively be configured as a one-shot job. Feature discovery can alternatively be configured as a one-shot job.
The Job template may be used to achieve this: The `default-job` overlay may be used to achieve this:
```bash ```bash
NUM_NODES=$(kubectl get no -o jsonpath='{.items[*].metadata.name}' | wc -w) NUM_NODES=$(kubectl get no -o jsonpath='{.items[*].metadata.name}' | wc -w)
curl -fs https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-worker-job.yaml.template | \ kubectl kustomize https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/default-job?ref={{ site.release }} | \
sed s"/NUM_NODES/$NUM_NODES/" | \ sed s"/NUM_NODES/$NUM_NODES/" | \
kubectl apply -f - kubectl apply -f -
``` ```
@ -288,12 +290,16 @@ re-labeling on regular intervals capturing changes in the system configuration
and mames sure that new nodes are labeled as they are added to the cluster. and mames sure that new nodes are labeled as they are added to the cluster.
Worker connects to the nfd-master service to advertise hardware features. Worker connects to the nfd-master service to advertise hardware features.
When run as a daemonset, nodes are re-labeled at an interval specified using When run as a daemonset, nodes are re-labeled at an default interval of 60s.
the `-sleep-interval` option. In the This can be changed by using the
[template](https://github.com/kubernetes-sigs/node-feature-discovery/blob/{{site.release}}/nfd-worker-daemonset.yaml.template#L26) [`core.sleepInterval`](../advanced/worker-configuration-reference.html#coresleepinterval)
the default interval is set to 60s which is also the default when no config option (or
`-sleep-interval` is specified. Also, the configuration file is re-read on [`-sleep-interval`](../advanced/worker-commandline-reference.html#-sleep-interval)
each iteration providing a simple mechanism of run-time reconfiguration. command line flag).
The worker configuration file is watched and re-read on every change which
provides a simple mechanism of dynamic run-time reconfiguration. See
[worker configuration](#worker-configuration) for more details.
### Communication security with TLS ### Communication security with TLS
@ -317,24 +323,24 @@ of its certificate.
#### Automated TLS certificate management using cert-manager #### Automated TLS certificate management using cert-manager
[cert-manager](https://cert-manager.io/) can be used to automate certificate [cert-manager](https://cert-manager.io/) can be used to automate certificate
management between nfd-master and the nfd-worker pods. The instructions below describe management between nfd-master and the nfd-worker pods.
steps how to set up cert-manager's
[CA Issuer](https://cert-manager.io/docs/configuration/ca/) to NFD source code repository contains an example kustomize overlay that can be
sign `Certificate` requests for NFD components in `node-feature-discovery` namespace. used to deploy NFD with cert-manager supplied certificates enabled. The
instructions below describe steps how to generate a self-signed CA certificate
and set up cert-manager's
[CA Issuer](https://cert-manager.io/docs/configuration/ca/) to sign
`Certificate` requests for NFD components in `node-feature-discovery`
namespace.
```bash ```bash
$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.yaml
$ make yamls openssl genrsa -out deployment/overlays/samples/cert-manager/tls.key 2048
$ openssl genrsa -out ca.key 2048 openssl req -x509 -new -nodes -key deployment/overlays/samples/cert-manager/tls.key -subj "/CN=nfd-ca" \
$ openssl req -x509 -new -nodes -key ca.key -subj "/CN=nfd-ca" -days 10000 -out ca.crt -days 10000 -out deployment/overlays/samples/cert-manager/tls.crt
$ sed s"/tls.key:.*/tls.key: $(cat ca.key|base64 -w 0)/" -i nfd-cert-manager.yaml kubectl apply -k deployment/overlays/samples/cert-manager
$ sed s"/tls.crt:.*/tls.crt: $(cat ca.crt|base64 -w 0)/" -i nfd-cert-manager.yaml
$ kubectl apply -f nfd-cert-manager.yaml
``` ```
Finally, deploy `nfd-master.yaml` and `nfd-worker-daemonset.yaml` with the Secrets
(`nfd-master-cert` and `nfd-worker-cert`) mounts enabled.
## Worker configuration ## Worker configuration
NFD-Worker supports dynamic configuration through a configuration file. The NFD-Worker supports dynamic configuration through a configuration file. The
@ -421,11 +427,11 @@ lifecycle manager, respectively.
Simplest way is to invoke `kubectl delete` on the deployment files you used. Simplest way is to invoke `kubectl delete` on the deployment files you used.
Beware that this will also delete the namespace that NFD is running in. For Beware that this will also delete the namespace that NFD is running in. For
example: example, in case the default deployment from the repo was used:
```bash ```bash
kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-worker-daemonset.yaml.template
kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-master.yaml.template kubectl delete -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/default?ref={{ site.release }}
``` ```
Alternatively you can delete create objects one-by-one, depending on the type Alternatively you can delete create objects one-by-one, depending on the type
@ -447,14 +453,15 @@ NFD-Master has a special `-prune` command line flag for removing all
nfd-related node labels, annotations and extended resources from the cluster. nfd-related node labels, annotations and extended resources from the cluster.
```bash ```bash
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-prune.yaml.template kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/prune?ref={{ site.release }}
kubectl -n node-feature-discovery wait job.batch/nfd-prune --for=condition=complete && \ kubectl -n node-feature-discovery wait job.batch/nfd-prune --for=condition=complete && \
kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-prune.yaml.template kubectl delete -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/prune?ref={{ site.release }}
``` ```
**NOTE:** You must run prune before removing the RBAC rules (serviceaccount, **NOTE:** You must run prune before removing the RBAC rules (serviceaccount,
clusterrole and clusterrolebinding). clusterrole and clusterrolebinding).
<!-- Links --> <!-- Links -->
[kustomize]: https://github.com/kubernetes-sigs/kustomize
[nfd-operator]: https://github.com/kubernetes-sigs/node-feature-discovery-operator [nfd-operator]: https://github.com/kubernetes-sigs/node-feature-discovery-operator
[OLM]: https://github.com/operator-framework/operator-lifecycle-manager [OLM]: https://github.com/operator-framework/operator-lifecycle-manager

View file

@ -595,10 +595,9 @@ Pods outside NFD, e.g. in Kubernetes device plugins. It is possible to mount
the `source.d` and/or `features.d` directories common with the NFD Pod and the `source.d` and/or `features.d` directories common with the NFD Pod and
deploy the custom hooks/features there. NFD will periodically scan the deploy the custom hooks/features there. NFD will periodically scan the
directories and run any hooks and read any feature files it finds. The directories and run any hooks and read any feature files it finds. The
[example nfd-worker deployment template](https://github.com/kubernetes-sigs/node-feature-discovery/blob/{{site.release}}/nfd-worker-daemonset.yaml.template#L69) default deployments contain `hostPath` mounts for `sources.d` and `features.d`
contains `hostPath` mounts for `sources.d` and `features.d` directories. By directories. By using the same mounts in the secondary Pod (e.g. device plugin)
using the same mounts in the secondary Pod (e.g. device plugin) you have you have created a shared area for delivering hooks and feature files to NFD.
created a shared area for delivering hooks and feature files to NFD.
#### A hook example #### A hook example

View file

@ -20,11 +20,14 @@ Continue to:
## Quick-start -- the short-short version ## Quick-start -- the short-short version
```bash ```bash
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-master.yaml.template $ kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/default?ref={{ site.release }}
namespace/node-feature-discovery created namespace/node-feature-discovery created
... serviceaccount/nfd-master created
clusterrole.rbac.authorization.k8s.io/nfd-master created
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-worker-daemonset.yaml.template clusterrolebinding.rbac.authorization.k8s.io/nfd-master created
configmap/nfd-worker-conf created
service/nfd-master created
deployment.apps/nfd-master created
daemonset.apps/nfd-worker created daemonset.apps/nfd-worker created
$ kubectl -n node-feature-discovery get all $ kubectl -n node-feature-discovery get all

View file

@ -10,16 +10,11 @@ Minimal steps to deploy latest released version of NFD in your cluster.
## Installation ## Installation
Deploy nfd-master -- creates a new namespace, service and required RBAC rules Deploy with kustomize -- creates a new namespace, service and required RBAC
rules and deploys nfd-master and nfd-worker daemons.
```bash ```bash
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-master.yaml.template kubectl apply -k https://github.com/kubernetes-sigs/node-feature-discovery/deployment/overlays/default?ref={{ site.release }}
```
Deploy nfd-worker as a daemonset
```bash
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/{{ site.release }}/nfd-worker-daemonset.yaml.template
``` ```
## Verify ## Verify

View file

@ -1,55 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: nfd-ca-key-pair
namespace: node-feature-discovery
data:
tls.key:
tls.crt:
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: nfd-ca-issuer
namespace: node-feature-discovery
spec:
ca:
secretName: nfd-ca-key-pair
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: nfd-master-cert
namespace: node-feature-discovery
spec:
secretName: nfd-master-cert
subject:
organizations:
- node-feature-discovery
commonName: nfd-master
dnsNames:
- nfd-master.node-feature-discovery.svc
- nfd-master.node-feature-discovery.svc.cluster.local
- nfd-master
issuerRef:
name: nfd-ca-issuer
kind: Issuer
group: cert-manager.io
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: nfd-workers-cert
namespace: node-feature-discovery
spec:
secretName: nfd-worker-cert
subject:
organizations:
- node-feature-discovery
commonName: nfd-worker
dnsNames:
- nfd-worker.node-feature-discovery.svc.cluster.local
issuerRef:
name: nfd-ca-issuer
kind: Issuer
group: cert-manager.io

View file

@ -1,290 +0,0 @@
# This template contains an example of running nfd-master and nfd-worker in the
# same pod. All changes in this template should be applied to Helm chart too.
#
apiVersion: v1
kind: Namespace
metadata:
name: node-feature-discovery # NFD namespace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfd-master
rules:
- apiGroups:
- ""
resources:
- nodes
# when using command line flag --resource-labels to create extended resources
# you will need to uncomment "- nodes/status"
# - nodes/status
verbs:
- get
- patch
- update
# List only needed for --prune
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfd-master
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nfd-master
subjects:
- kind: ServiceAccount
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: nfd
name: nfd
namespace: node-feature-discovery
spec:
selector:
matchLabels:
app: nfd
template:
metadata:
labels:
app: nfd
spec:
serviceAccount: nfd-master
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: gcr.io/k8s-staging-nfd/node-feature-discovery:master
imagePullPolicy: Always
name: nfd-master
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
command:
- "nfd-master"
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: gcr.io/k8s-staging-nfd/node-feature-discovery:master
imagePullPolicy: Always
name: nfd-worker
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
command:
- "nfd-worker"
volumeMounts:
- name: host-boot
mountPath: "/host-boot"
readOnly: true
- name: host-os-release
mountPath: "/host-etc/os-release"
readOnly: true
- name: host-sys
mountPath: "/host-sys"
readOnly: true
- name: host-usr-lib
mountPath: "/host-usr/lib"
readOnly: true
- name: host-usr-src
mountPath: "/host-usr/src"
readOnly: true
- name: source-d
mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
readOnly: true
- name: features-d
mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
readOnly: true
- name: nfd-worker-conf
mountPath: "/etc/kubernetes/node-feature-discovery"
readOnly: true
## Example for more custom configs in an additional configmap (1/3)
## Mounting into subdirectories of custom.d makes it easy to use multiple configmaps
# - name: custom-source-extra-rules
# mountPath: "/etc/kubernetes/node-feature-discovery/custom.d/extra-rules-1"
# readOnly: true
volumes:
- name: host-boot
hostPath:
path: "/boot"
- name: host-os-release
hostPath:
path: "/etc/os-release"
- name: host-sys
hostPath:
path: "/sys"
- name: host-usr-lib
hostPath:
path: "/usr/lib"
- name: host-usr-src
hostPath:
path: "/usr/src"
- name: source-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/source.d/"
- name: features-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/features.d/"
- name: nfd-worker-conf
configMap:
name: nfd-worker-conf
## Example for more custom configs in an additional configmap (2/3)
# - name: custom-source-extra-rules
# configMap:
# name: custom-source-extra-rules
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nfd-worker-conf
namespace: node-feature-discovery
data:
nfd-worker.conf: | ### <NFD-WORKER-CONF-START-DO-NOT-REMOVE>
#core:
# labelWhiteList:
# noPublish: false
# sleepInterval: 60s
# sources: [all]
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-worker restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
#sources:
# cpu:
# cpuid:
## NOTE: whitelist has priority over blacklist
# attributeBlacklist:
# - "BMI1"
# - "BMI2"
# - "CLMUL"
# - "CMOV"
# - "CX16"
# - "ERMS"
# - "F16C"
# - "HTT"
# - "LZCNT"
# - "MMX"
# - "MMXEXT"
# - "NX"
# - "POPCNT"
# - "RDRAND"
# - "RDSEED"
# - "RDTSCP"
# - "SGX"
# - "SSE"
# - "SSE2"
# - "SSE3"
# - "SSE4"
# - "SSE42"
# - "SSSE3"
# attributeWhitelist:
# kernel:
# kconfigFile: "/path/to/kconfig"
# configOpts:
# - "NO_HZ"
# - "X86"
# - "DMI"
# pci:
# deviceClassWhitelist:
# - "0200"
# - "03"
# - "12"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# - "subsystem_vendor"
# - "subsystem_device"
# usb:
# deviceClassWhitelist:
# - "0e"
# - "ef"
# - "fe"
# - "ff"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# custom:
# - name: "my.kernel.feature"
# matchOn:
# - loadedKMod: ["example_kmod1", "example_kmod2"]
# - name: "my.pci.feature"
# matchOn:
# - pciId:
# class: ["0200"]
# vendor: ["15b3"]
# device: ["1014", "1017"]
# - pciId :
# vendor: ["8086"]
# device: ["1000", "1100"]
# - name: "my.usb.feature"
# matchOn:
# - usbId:
# class: ["ff"]
# vendor: ["03e7"]
# device: ["2485"]
# - usbId:
# class: ["fe"]
# vendor: ["1a6e"]
# device: ["089a"]
# - name: "my.combined.feature"
# matchOn:
# - pciId:
# vendor: ["15b3"]
# device: ["1014", "1017"]
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
# - name: "feature.by.nodename"
# value: customValue
# matchOn:
# - nodename: ["worker-0", "my-.*-node"]
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
---
## Example for more custom configs in an additional configmap (3/3)
#apiVersion: v1
#kind: ConfigMap
#metadata:
# name: custom-source-extra-rules
# namespace: node-feature-discovery
#data:
## Filename doesn't matter, and there can be multiple. They just need to be unique.
# custom.conf: |
# - name: "more.kernel.features"
# matchOn:
# - loadedKMod: ["example_kmod3"]
# - name: "more.features.by.nodename"
# value: customValue
# matchOn:
# - nodename: ["special-.*-node-.*"]

View file

@ -1,136 +0,0 @@
# All changes in this template should be applied to Helm chart too.
#
apiVersion: v1
kind: Namespace
metadata:
name: node-feature-discovery # NFD namespace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfd-master
rules:
- apiGroups:
- ""
resources:
- nodes
# when using command line flag --resource-labels to create extended resources
# you will need to uncomment "- nodes/status"
# - nodes/status
verbs:
- get
- patch
- update
# List only needed for --prune
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfd-master
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nfd-master
subjects:
- kind: ServiceAccount
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfd-master
name: nfd-master
namespace: node-feature-discovery
spec:
replicas: 1
selector:
matchLabels:
app: nfd-master
template:
metadata:
labels:
app: nfd-master
spec:
serviceAccount: nfd-master
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values: [""]
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/control-plane"
operator: In
values: [""]
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: gcr.io/k8s-staging-nfd/node-feature-discovery:master
imagePullPolicy: Always
name: nfd-master
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
command:
- "nfd-master"
## Enable TLS authentication
## The example below assumes having a Secret named nfd-master-cert with
## the TLS authentication credentials and a root certificate named ca.crt created.
## cert-manager can be used to automate the Secret creation and updates.
## Additional hardening can be enabled by specifying --verify-node-name in
## args, in which case node name will be checked against the worker's
## TLS certificate.
# args:
# - "--ca-file=/etc/kubernetes/node-feature-discovery/certs/ca.crt"
# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
# volumeMounts:
# - name: nfd-master-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
# readOnly: true
# volumes:
# - name: nfd-master-cert
# secret:
# secretName: nfd-master-cert
---
apiVersion: v1
kind: Service
metadata:
name: nfd-master
namespace: node-feature-discovery
spec:
selector:
app: nfd-master
ports:
- protocol: TCP
port: 8080
type: ClusterIP

View file

@ -1,99 +0,0 @@
# All changes in this template should be applied to Helm chart too.
#
apiVersion: v1
kind: Namespace
metadata:
name: node-feature-discovery # NFD namespace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfd-master
rules:
- apiGroups:
- ""
resources:
- nodes
# when using command line flag --resource-labels to create extended resources
# you will need to uncomment "- nodes/status"
# - nodes/status
verbs:
- get
- patch
- update
# List only needed for --prune
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfd-master
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nfd-master
subjects:
- kind: ServiceAccount
name: nfd-master
namespace: node-feature-discovery
---
apiVersion: batch/v1
kind: Job
metadata:
name: nfd-prune
namespace: node-feature-discovery
labels:
app: nfe-prune
spec:
completions: 1
template:
metadata:
labels:
app: nfd-prune
spec:
serviceAccount: nfd-master
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values: [""]
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/control-plane"
operator: In
values: [""]
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"
containers:
- name: nfd-master
image: gcr.io/k8s-staging-nfd/node-feature-discovery:master
imagePullPolicy: Always
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
command:
- "nfd-master"
args:
- "--prune"
restartPolicy: Never

View file

@ -1,247 +0,0 @@
# All changes in this template should be applied to Helm chart too.
#
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: nfd-worker
name: nfd-worker
namespace: node-feature-discovery
spec:
selector:
matchLabels:
app: nfd-worker
template:
metadata:
labels:
app: nfd-worker
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: gcr.io/k8s-staging-nfd/node-feature-discovery:master
imagePullPolicy: Always
name: nfd-worker
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
command:
- "nfd-worker"
args:
- "--server=nfd-master:8080"
## Enable TLS authentication (1/3)
## The example below assumes having a Secret named nfd-worker-cert with
## the TLS authentication credentials and a root certificate named ca.crt created.
## cert-manager can be used to automate the Secret creation and updates.
# - "--ca-file=/etc/kubernetes/node-feature-discovery/certs/ca.crt"
# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
volumeMounts:
- name: host-boot
mountPath: "/host-boot"
readOnly: true
- name: host-os-release
mountPath: "/host-etc/os-release"
readOnly: true
- name: host-sys
mountPath: "/host-sys"
readOnly: true
- name: host-usr-lib
mountPath: "/host-usr/lib"
readOnly: true
- name: host-usr-src
mountPath: "/host-usr/src"
readOnly: true
- name: source-d
mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
readOnly: true
- name: features-d
mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
readOnly: true
- name: nfd-worker-conf
mountPath: "/etc/kubernetes/node-feature-discovery"
readOnly: true
## Example for more custom configs in an additional configmap (1/3)
## Mounting into subdirectories of custom.d makes it easy to use multiple configmaps
# - name: custom-source-extra-rules
# mountPath: "/etc/kubernetes/node-feature-discovery/custom.d/extra-rules-1"
# readOnly: true
## Enable TLS authentication (2/3)
# - name: nfd-worker-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
# readOnly: true
volumes:
- name: host-boot
hostPath:
path: "/boot"
- name: host-os-release
hostPath:
path: "/etc/os-release"
- name: host-sys
hostPath:
path: "/sys"
- name: host-usr-lib
hostPath:
path: "/usr/lib"
- name: host-usr-src
hostPath:
path: "/usr/src"
- name: source-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/source.d/"
- name: features-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/features.d/"
- name: nfd-worker-conf
configMap:
name: nfd-worker-conf
## Example for more custom configs in an additional configmap (2/3)
# - name: custom-source-extra-rules
# configMap:
# name: custom-source-extra-rules
## Enable TLS authentication (3/3)
# - name: nfd-worker-cert
# secret:
# secretName: nfd-worker-cert
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nfd-worker-conf
namespace: node-feature-discovery
data:
nfd-worker.conf: | ### <NFD-WORKER-CONF-START-DO-NOT-REMOVE>
#core:
# labelWhiteList:
# noPublish: false
# sleepInterval: 60s
# sources: [all]
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-worker restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
#sources:
# cpu:
# cpuid:
## NOTE: whitelist has priority over blacklist
# attributeBlacklist:
# - "BMI1"
# - "BMI2"
# - "CLMUL"
# - "CMOV"
# - "CX16"
# - "ERMS"
# - "F16C"
# - "HTT"
# - "LZCNT"
# - "MMX"
# - "MMXEXT"
# - "NX"
# - "POPCNT"
# - "RDRAND"
# - "RDSEED"
# - "RDTSCP"
# - "SGX"
# - "SSE"
# - "SSE2"
# - "SSE3"
# - "SSE4"
# - "SSE42"
# - "SSSE3"
# attributeWhitelist:
# kernel:
# kconfigFile: "/path/to/kconfig"
# configOpts:
# - "NO_HZ"
# - "X86"
# - "DMI"
# pci:
# deviceClassWhitelist:
# - "0200"
# - "03"
# - "12"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# - "subsystem_vendor"
# - "subsystem_device"
# usb:
# deviceClassWhitelist:
# - "0e"
# - "ef"
# - "fe"
# - "ff"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# custom:
# - name: "my.kernel.feature"
# matchOn:
# - loadedKMod: ["example_kmod1", "example_kmod2"]
# - name: "my.pci.feature"
# matchOn:
# - pciId:
# class: ["0200"]
# vendor: ["15b3"]
# device: ["1014", "1017"]
# - pciId :
# vendor: ["8086"]
# device: ["1000", "1100"]
# - name: "my.usb.feature"
# matchOn:
# - usbId:
# class: ["ff"]
# vendor: ["03e7"]
# device: ["2485"]
# - usbId:
# class: ["fe"]
# vendor: ["1a6e"]
# device: ["089a"]
# - name: "my.combined.feature"
# matchOn:
# - pciId:
# vendor: ["15b3"]
# device: ["1014", "1017"]
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
# - name: "feature.by.nodename"
# value: customValue
# matchOn:
# - nodename: ["worker-0", "my-.*-node"]
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
---
## Example for more custom configs in an additional configmap (3/3)
#apiVersion: v1
#kind: ConfigMap
#metadata:
# name: custom-source-extra-rules
# namespace: node-feature-discovery
#data:
## Filename doesn't matter, and there can be multiple. They just need to be unique.
# custom.conf: |
# - name: "more.kernel.features"
# matchOn:
# - loadedKMod: ["example_kmod3"]
# - name: "more.features.by.nodename"
# value: customValue
# matchOn:
# - nodename: ["special-.*-node-.*"]

View file

@ -1,264 +0,0 @@
# All changes in this template should be applied to Helm chart too.
#
apiVersion: batch/v1
kind: Job
metadata:
labels:
app: node-feature-discovery
name: nfd-worker
namespace: node-feature-discovery
spec:
completions: NUM_NODES
parallelism: NUM_NODES
template:
metadata:
labels:
app: node-feature-discovery
spec:
dnsPolicy: ClusterFirstWithHostNet
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- node-feature-discovery
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: gcr.io/k8s-staging-nfd/node-feature-discovery:master
imagePullPolicy: Always
name: nfd-worker
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
command:
- "nfd-worker"
args:
- "--oneshot"
- "--server=nfd-master:8080"
## Enable TLS authentication (1/3)
## The example below assumes having the root certificate named ca.crt stored in
## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored
## in a TLS Secret named nfd-worker-cert
# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt"
# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
volumeMounts:
- name: host-boot
mountPath: "/host-boot"
readOnly: true
- name: host-os-release
mountPath: "/host-etc/os-release"
readOnly: true
- name: host-sys
mountPath: "/host-sys"
readOnly: true
- name: host-usr-lib
mountPath: "/host-usr/lib"
readOnly: true
- name: host-usr-src
mountPath: "/host-usr/src"
readOnly: true
- name: source-d
mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
readOnly: true
- name: features-d
mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
readOnly: true
- name: nfd-worker-conf
mountPath: "/etc/kubernetes/node-feature-discovery"
readOnly: true
## Example for more custom configs in an additional configmap (1/3)
## Mounting into subdirectories of custom.d makes it easy to use multiple configmaps
# - name: custom-source-extra-rules
# mountPath: "/etc/kubernetes/node-feature-discovery/custom.d/extra-rules-1"
# readOnly: true
## Enable TLS authentication (2/3)
# - name: nfd-ca-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
# readOnly: true
# - name: nfd-worker-cert
# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
# readOnly: true
restartPolicy: Never
volumes:
- name: host-boot
hostPath:
path: "/boot"
- name: host-os-release
hostPath:
path: "/etc/os-release"
- name: host-sys
hostPath:
path: "/sys"
- name: host-usr-lib
hostPath:
path: "/usr/lib"
- name: host-usr-src
hostPath:
path: "/usr/src"
- name: source-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/source.d/"
- name: features-d
hostPath:
path: "/etc/kubernetes/node-feature-discovery/features.d/"
- name: nfd-worker-conf
configMap:
name: nfd-worker-conf
## Example for more custom configs in an additional configmap (2/3)
# - name: custom-source-extra-rules
# configMap:
# name: custom-source-extra-rules
## Enable TLS authentication (3/3)
# - name: nfd-ca-cert
# configMap:
# name: nfd-ca-cert
# - name: nfd-worker-cert
# secret:
# secretName: nfd-worker-cert
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nfd-worker-conf
namespace: node-feature-discovery
data:
nfd-worker.conf: | ### <NFD-WORKER-CONF-START-DO-NOT-REMOVE>
#core:
# labelWhiteList:
# noPublish: false
# sleepInterval: 60s
# sources: [all]
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-worker restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
#sources:
# cpu:
# cpuid:
## NOTE: whitelist has priority over blacklist
# attributeBlacklist:
# - "BMI1"
# - "BMI2"
# - "CLMUL"
# - "CMOV"
# - "CX16"
# - "ERMS"
# - "F16C"
# - "HTT"
# - "LZCNT"
# - "MMX"
# - "MMXEXT"
# - "NX"
# - "POPCNT"
# - "RDRAND"
# - "RDSEED"
# - "RDTSCP"
# - "SGX"
# - "SSE"
# - "SSE2"
# - "SSE3"
# - "SSE4"
# - "SSE42"
# - "SSSE3"
# attributeWhitelist:
# kernel:
# kconfigFile: "/path/to/kconfig"
# configOpts:
# - "NO_HZ"
# - "X86"
# - "DMI"
# pci:
# deviceClassWhitelist:
# - "0200"
# - "03"
# - "12"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# - "subsystem_vendor"
# - "subsystem_device"
# usb:
# deviceClassWhitelist:
# - "0e"
# - "ef"
# - "fe"
# - "ff"
# deviceLabelFields:
# - "class"
# - "vendor"
# - "device"
# custom:
# - name: "my.kernel.feature"
# matchOn:
# - loadedKMod: ["example_kmod1", "example_kmod2"]
# - name: "my.pci.feature"
# matchOn:
# - pciId:
# class: ["0200"]
# vendor: ["15b3"]
# device: ["1014", "1017"]
# - pciId :
# vendor: ["8086"]
# device: ["1000", "1100"]
# - name: "my.usb.feature"
# matchOn:
# - usbId:
# class: ["ff"]
# vendor: ["03e7"]
# device: ["2485"]
# - usbId:
# class: ["fe"]
# vendor: ["1a6e"]
# device: ["089a"]
# - name: "my.combined.feature"
# matchOn:
# - pciId:
# vendor: ["15b3"]
# device: ["1014", "1017"]
# loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
# - name: "feature.by.nodename"
# value: customValue
# matchOn:
# - nodename: ["worker-0", "my-.*-node"]
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
---
## Example for more custom configs in an additional configmap (3/3)
#apiVersion: v1
#kind: ConfigMap
#metadata:
# name: custom-source-extra-rules
# namespace: node-feature-discovery
#data:
## Filename doesn't matter, and there can be multiple. They just need to be unique.
# custom.conf: |
# - name: "more.kernel.features"
# matchOn:
# - loadedKMod: ["example_kmod3"]
# - name: "more.features.by.nodename"
# value: customValue
# matchOn:
# - nodename: ["special-.*-node-.*"]

19
scripts/kustomize.sh Executable file
View file

@ -0,0 +1,19 @@
#!/bin/bash -e
echo "namespace: $1"
echo "image: $2:$3"
cat > kustomization.yaml << EOF
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: $1
images:
- name: '*'
newName: $2
newTag: $3
resources:
- deployment/overlays/default
EOF

View file

@ -96,10 +96,10 @@ if [ -z "$assets_only" ]; then
sed s"!node-feature-discovery/v.*/!node-feature-discovery/$release/!" -i README.md sed s"!node-feature-discovery/v.*/!node-feature-discovery/$release/!" -i README.md
# Patch deployment templates # Patch deployment templates
echo Patching '*.yaml.template' to use $container_image echo Patching kustomize templates to use $container_image
sed -E -e s",^([[:space:]]+)image:.+$,\1image: $container_image," \ sed -E -e s",^([[:space:]]+)image:.+$,\1image: $container_image," \
-e s",^([[:space:]]+)imagePullPolicy:.+$,\1imagePullPolicy: IfNotPresent," \ -e s",^([[:space:]]+)imagePullPolicy:.+$,\1imagePullPolicy: IfNotPresent," \
-i *yaml.template -i deployment/base/*/*yaml
# Patch Helm chart # Patch Helm chart
echo "Patching Helm chart" echo "Patching Helm chart"