From e80900b8ee6e5b532a7aa1f6163f77fbfbe03279 Mon Sep 17 00:00:00 2001 From: Adrian Chiris Date: Wed, 16 Dec 2020 17:27:31 +0000 Subject: [PATCH] Add helm chart for NFD This commit adds Helm chart for node-feature-discovery Signed-off-by: Adrian Chiris Signed-off-by: Ivan Kolodiazhnyi --- Makefile | 3 + deployment/node-feature-discovery/.helmignore | 23 ++ deployment/node-feature-discovery/Chart.yaml | 15 ++ .../templates/_helpers.tpl | 63 ++++++ .../templates/clusterrole.yaml | 21 ++ .../templates/clusterrolebinding.yaml | 16 ++ .../templates/master.yaml | 83 +++++++ .../templates/nfd-worker-conf.yaml | 9 + .../templates/service.yaml | 16 ++ .../templates/serviceaccount.yaml | 12 ++ .../templates/worker.yaml | 119 ++++++++++ deployment/node-feature-discovery/values.yaml | 203 ++++++++++++++++++ docs/get-started/deployment-and-usage.md | 95 ++++++++ nfd-daemonset-combined.yaml.template | 2 +- nfd-master.yaml.template | 2 + nfd-prune.yaml.template | 2 + nfd-worker-daemonset.yaml.template | 7 + nfd-worker-job.yaml.template | 2 + scripts/prepare-release.sh | 6 + 19 files changed, 698 insertions(+), 1 deletion(-) create mode 100644 deployment/node-feature-discovery/.helmignore create mode 100644 deployment/node-feature-discovery/Chart.yaml create mode 100644 deployment/node-feature-discovery/templates/_helpers.tpl create mode 100644 deployment/node-feature-discovery/templates/clusterrole.yaml create mode 100644 deployment/node-feature-discovery/templates/clusterrolebinding.yaml create mode 100644 deployment/node-feature-discovery/templates/master.yaml create mode 100644 deployment/node-feature-discovery/templates/nfd-worker-conf.yaml create mode 100644 deployment/node-feature-discovery/templates/service.yaml create mode 100644 deployment/node-feature-discovery/templates/serviceaccount.yaml create mode 100644 deployment/node-feature-discovery/templates/worker.yaml create mode 100644 deployment/node-feature-discovery/values.yaml diff --git a/Makefile b/Makefile index 6b1cff7a1..66135034c 100644 --- a/Makefile +++ b/Makefile @@ -52,6 +52,9 @@ E2E_TEST_CONFIG ?= LDFLAGS = -ldflags "-s -w -X sigs.k8s.io/node-feature-discovery/pkg/version.version=$(VERSION) -X sigs.k8s.io/node-feature-discovery/source.pathPrefix=$(HOSTMOUNT_PREFIX)" yaml_templates := $(wildcard *.yaml.template) +# Let's treat values.yaml as template to sync configmap +# and allow users to install without modifications +yaml_templates := $(yaml_templates) deployment/node-feature-discovery/values.yaml yaml_instances := $(patsubst %.yaml.template,%.yaml,$(yaml_templates)) all: image diff --git a/deployment/node-feature-discovery/.helmignore b/deployment/node-feature-discovery/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/node-feature-discovery/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/node-feature-discovery/Chart.yaml b/deployment/node-feature-discovery/Chart.yaml new file mode 100644 index 000000000..42323324e --- /dev/null +++ b/deployment/node-feature-discovery/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +appVersion: master +description: | + Detects hardware features available on each node in a Kubernetes cluster, and advertises + those features using node labels. +name: node-feature-discovery +sources: + - https://github.com/kubernetes-sigs/node-feature-discovery +home: https://github.com/kubernetes-sigs/node-feature-discovery +keywords: + - feature-discovery + - feature-detection + - node-labels +type: application +version: 0.1.0 diff --git a/deployment/node-feature-discovery/templates/_helpers.tpl b/deployment/node-feature-discovery/templates/_helpers.tpl new file mode 100644 index 000000000..73784a547 --- /dev/null +++ b/deployment/node-feature-discovery/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "node-feature-discovery.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "node-feature-discovery.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "node-feature-discovery.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "node-feature-discovery.labels" -}} +helm.sh/chart: {{ include "node-feature-discovery.chart" . }} +{{ include "node-feature-discovery.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "node-feature-discovery.selectorLabels" -}} +app.kubernetes.io/name: {{ include "node-feature-discovery.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "node-feature-discovery.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "node-feature-discovery.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/deployment/node-feature-discovery/templates/clusterrole.yaml b/deployment/node-feature-discovery/templates/clusterrole.yaml new file mode 100644 index 000000000..a4da2303d --- /dev/null +++ b/deployment/node-feature-discovery/templates/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "node-feature-discovery.fullname" . }} + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - nodes + # when using command line flag --resource-labels to create extended resources + # you will need to uncomment "- nodes/status" + # - nodes/status + verbs: + - get + - patch + - update + - list +{{- end }} diff --git a/deployment/node-feature-discovery/templates/clusterrolebinding.yaml b/deployment/node-feature-discovery/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..4766d9a1b --- /dev/null +++ b/deployment/node-feature-discovery/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "node-feature-discovery.fullname" . }} + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "node-feature-discovery.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ include "node-feature-discovery.serviceAccountName" . }} + namespace: {{ $.Release.Namespace }} +{{- end }} diff --git a/deployment/node-feature-discovery/templates/master.yaml b/deployment/node-feature-discovery/templates/master.yaml new file mode 100644 index 000000000..41e6d6a60 --- /dev/null +++ b/deployment/node-feature-discovery/templates/master.yaml @@ -0,0 +1,83 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "node-feature-discovery.fullname" . }}-master + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + role: master +spec: + replicas: {{ .Values.master.replicaCount }} + selector: + matchLabels: + {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }} + role: master + template: + metadata: + labels: + {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }} + role: master + annotations: + {{- toYaml .Values.master.annotations | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "node-feature-discovery.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.master.podSecurityContext | nindent 8 }} + containers: + - name: master + securityContext: + {{- toYaml .Values.master.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 8080 + name: grpc + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - "nfd-master" + resources: + {{- toYaml .Values.master.resources | nindent 12 }} +## Enable TLS authentication +## The example below assumes having the root certificate named ca.crt stored in +## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored +## in a TLS Secret named nfd-master-cert. +## Additional hardening can be enabled by specifying --verify-node-name in +## args, in which case every nfd-worker requires a individual node-specific +## TLS certificate. +# args: +# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt" +# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key" +# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt" +# volumeMounts: +# - name: nfd-ca-cert +# mountPath: "/etc/kubernetes/node-feature-discovery/trust" +# readOnly: true +# - name: nfd-master-cert +# mountPath: "/etc/kubernetes/node-feature-discovery/certs" +# readOnly: true +# volumes: +# - name: nfd-ca-cert +# configMap: +# name: nfd-ca-cert +# - name: nfd-master-cert +# secret: +# secretName: nfd-master-cert + {{- with .Values.master.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/deployment/node-feature-discovery/templates/nfd-worker-conf.yaml b/deployment/node-feature-discovery/templates/nfd-worker-conf.yaml new file mode 100644 index 000000000..b3b735cd1 --- /dev/null +++ b/deployment/node-feature-discovery/templates/nfd-worker-conf.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.worker.configmapName }} + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} +data: + nfd-worker.conf: | + {{ .Values.worker.config | indent 4 }} diff --git a/deployment/node-feature-discovery/templates/service.yaml b/deployment/node-feature-discovery/templates/service.yaml new file mode 100644 index 000000000..97d0a5878 --- /dev/null +++ b/deployment/node-feature-discovery/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "node-feature-discovery.fullname" . }}-master + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + role: master +spec: + type: {{ .Values.master.service.type }} + ports: + - port: {{ .Values.master.service.port }} + targetPort: grpc + protocol: TCP + name: grpc + selector: + {{- include "node-feature-discovery.selectorLabels" . | nindent 4 }} diff --git a/deployment/node-feature-discovery/templates/serviceaccount.yaml b/deployment/node-feature-discovery/templates/serviceaccount.yaml new file mode 100644 index 000000000..e4b09bad9 --- /dev/null +++ b/deployment/node-feature-discovery/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "node-feature-discovery.serviceAccountName" . }} + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/deployment/node-feature-discovery/templates/worker.yaml b/deployment/node-feature-discovery/templates/worker.yaml new file mode 100644 index 000000000..7aefef812 --- /dev/null +++ b/deployment/node-feature-discovery/templates/worker.yaml @@ -0,0 +1,119 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "node-feature-discovery.fullname" . }}-worker + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + role: worker +spec: + selector: + matchLabels: + {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }} + role: worker + template: + metadata: + labels: + {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }} + role: worker + annotations: + {{- toYaml .Values.worker.annotations | nindent 8 }} + spec: + dnsPolicy: ClusterFirstWithHostNet + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + containers: + - name: worker + securityContext: + {{- toYaml .Values.worker.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + {{- toYaml .Values.worker.resources | nindent 12 }} + command: + - "nfd-worker" + args: + - "--sleep-interval=60s" + - "--server={{ include "node-feature-discovery.fullname" . }}-master:{{ .Values.master.service.port }}" +## Enable TLS authentication (1/3) +## The example below assumes having the root certificate named ca.crt stored in +## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored +## in a TLS Secret named nfd-worker-cert +# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt" +# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key" +# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt" + volumeMounts: + - name: host-boot + mountPath: "/host-boot" + readOnly: true + - name: host-os-release + mountPath: "/host-etc/os-release" + readOnly: true + - name: host-sys + mountPath: "/host-sys" + readOnly: true + - name: source-d + mountPath: "/etc/kubernetes/node-feature-discovery/source.d/" + readOnly: true + - name: features-d + mountPath: "/etc/kubernetes/node-feature-discovery/features.d/" + readOnly: true + - name: nfd-worker-conf + mountPath: "/etc/kubernetes/node-feature-discovery" + readOnly: true +## Enable TLS authentication (2/3) +# - name: nfd-ca-cert +# mountPath: "/etc/kubernetes/node-feature-discovery/trust" +# readOnly: true +# - name: nfd-worker-cert +# mountPath: "/etc/kubernetes/node-feature-discovery/certs" +# readOnly: true + volumes: + - name: host-boot + hostPath: + path: "/boot" + - name: host-os-release + hostPath: + path: "/etc/os-release" + - name: host-sys + hostPath: + path: "/sys" + - name: source-d + hostPath: + path: "/etc/kubernetes/node-feature-discovery/source.d/" + - name: features-d + hostPath: + path: "/etc/kubernetes/node-feature-discovery/features.d/" + - name: nfd-worker-conf + configMap: + name: {{ .Values.worker.configmapName }} + items: + - key: nfd-worker.conf + path: nfd-worker.conf +## Enable TLS authentication (3/3) +# - name: nfd-ca-cert +# configMap: +# name: nfd-ca-cert +# - name: nfd-worker-cert +# secret: +# secretName: nfd-worker-cert + {{- with .Values.worker.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.worker.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.worker.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/deployment/node-feature-discovery/values.yaml b/deployment/node-feature-discovery/values.yaml new file mode 100644 index 000000000..29500851e --- /dev/null +++ b/deployment/node-feature-discovery/values.yaml @@ -0,0 +1,203 @@ +image: + repository: gcr.io/k8s-staging-nfd/node-feature-discovery + # This should be set to 'IfNotPresent' for released version + pullPolicy: Always + # tag, if defined will use the given image tag, else Chart.AppVersion will be used + # tag +imagePullSecrets: [] + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +nameOverride: "" +fullnameOverride: "" + +master: + replicaCount: 1 + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + readOnlyRootFilesystem: true + runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 8080 + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "" + effect: "NoSchedule" + + annotations: {} + + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: In + values: [""] + +worker: + configmapName: nfd-worker-conf + config: |### + #core: + # labelWhiteList: + # noPublish: false + # sleepInterval: 60s + # sources: [all] + #sources: + # cpu: + # cpuid: + ## NOTE: whitelist has priority over blacklist + # attributeBlacklist: + # - "BMI1" + # - "BMI2" + # - "CLMUL" + # - "CMOV" + # - "CX16" + # - "ERMS" + # - "F16C" + # - "HTT" + # - "LZCNT" + # - "MMX" + # - "MMXEXT" + # - "NX" + # - "POPCNT" + # - "RDRAND" + # - "RDSEED" + # - "RDTSCP" + # - "SGX" + # - "SSE" + # - "SSE2" + # - "SSE3" + # - "SSE4.1" + # - "SSE4.2" + # - "SSSE3" + # attributeWhitelist: + # kernel: + # kconfigFile: "/path/to/kconfig" + # configOpts: + # - "NO_HZ" + # - "X86" + # - "DMI" + # pci: + # deviceClassWhitelist: + # - "0200" + # - "03" + # - "12" + # deviceLabelFields: + # - "class" + # - "vendor" + # - "device" + # - "subsystem_vendor" + # - "subsystem_device" + # usb: + # deviceClassWhitelist: + # - "0e" + # - "ef" + # - "fe" + # - "ff" + # deviceLabelFields: + # - "class" + # - "vendor" + # - "device" + # custom: + # - name: "my.kernel.feature" + # matchOn: + # - loadedKMod: ["example_kmod1", "example_kmod2"] + # - name: "my.pci.feature" + # matchOn: + # - pciId: + # class: ["0200"] + # vendor: ["15b3"] + # device: ["1014", "1017"] + # - pciId : + # vendor: ["8086"] + # device: ["1000", "1100"] + # - name: "my.usb.feature" + # matchOn: + # - usbId: + # class: ["ff"] + # vendor: ["03e7"] + # device: ["2485"] + # - usbId: + # class: ["fe"] + # vendor: ["1a6e"] + # device: ["089a"] + # - name: "my.combined.feature" + # matchOn: + # - pciId: + # vendor: ["15b3"] + # device: ["1014", "1017"] + # loadedKMod : ["vendor_kmod1", "vendor_kmod2"] +### + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + readOnlyRootFilesystem: true + runAsNonRoot: true + # runAsUser: 1000 + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: {} + + annotations: {} + +## RBAC parameteres +## https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +## +rbac: + create: true + serviceAccountName: + ## Annotations for the Service Account + ## + serviceAccountAnnotations: {} diff --git a/docs/get-started/deployment-and-usage.md b/docs/get-started/deployment-and-usage.md index d4ec9dcc7..e09a9dc45 100644 --- a/docs/get-started/deployment-and-usage.md +++ b/docs/get-started/deployment-and-usage.md @@ -99,6 +99,101 @@ this approach does not guarantee running once on every node. For example, tainted, non-ready nodes or some other reasons in Job scheduling may cause some node(s) will run extra job instance(s) to satisfy the request. +### Deployment with Helm + +Node Feature Discovery Helm chart allow to easily deploy and manage NFD. + +#### Prerequisites + +[Helm package manager](https://helm.sh/) should be installed. + +#### Deployment with Helm + +To install the chart with the release name node-feature-discovery: + +```bash +git clone https://github.com/kubernetes-sigs/node-feature-discovery/ +cd node-feature-discovery/deployment +export NFD_NS=node-feature-discovery +helm install node-feature-discovery ./node-feature-discovery/ --namespace $NFD_NS --create-namespace +``` + +The command deploys Node Feature Discovery on the Kubernetes cluster in the default configuration. +The Configuration section describes how it can be configured during installation. + +#### Configuration + +You can override values from `values.yaml` and provide a file with custom values: + +```bash +export NFD_NS=node-feature-discovery +helm install node-feature-discovery ./node-feature-discovery/ -f --namespace $NFD_NS --create-namespace +``` + +To specify each parameter separately you can provide them to helm install command: + +```bash +export NFD_NS=node-feature-discovery +helm install node-feature-discovery ./node-feature-discovery/ --set nameOverride=NFDinstance --set master.replicaCount=2 --namespace $NFD_NS --create-namespace +``` + +#### Uninstalling the Chart + +To uninstall the `node-feature-discovery` deployment: + +```bash +export NFD_NS=node-feature-discovery +helm uninstall node-feature-discovery --namespace $NFD_NS +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +#### Chart Parameters + +In order to tailor the deployment of the Node Feature Discovery to your cluster needs +We have introduced the following Chart parameters. + +##### General parameters + +| Name | Type | Default | description | +| ---- | ---- | ------- | ----------- | +| `image.repository` | string | `gcr.io/k8s-staging-nfd/node-feature-discovery` | NFD image repository | +| `image.pullPolicy` | string | `Always` | Image pull policy | +| `imagePullSecrets` | list | [] | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. [https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod](More info) | +| `serviceAccount.create` | bool | true | Specifies whether a service account should be created | +| `serviceAccount.annotations` | dict | {} | Annotations to add to the service account | +| `serviceAccount.name` | string | | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| `rbac` | dict | | RBAC [parameteres](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) | +| `nameOverride` | string | | Override the name of the chart | +| `fullnameOverride` | string | | Override a default fully qualified app name | + + +##### Master pod parameters + +| `master.*` | dict | | NFD master deployment configuration | +| `master.replicaCount` | integer | 1 | Number of desired pods. This is a pointer to distinguish between explicit zero and not specified | +| `master.podSecurityContext` | dict | {} | SecurityContext holds pod-level security attributes and common container settings | +| `master.service.type` | string | ClusterIP | NFD master service type | +| `master.service.port` | integer | port | NFD master service port | +| `master.resources` | dict | {} | NFD master pod [resources management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) | +| `master.nodeSelector` | dict | {} | NFD master pod [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) | +| `master.tolerations` | dict | _Scheduling to master node is disabled_ | NFD master pod [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | +| `master.annotations` | dict | {} | NFD master pod [metadata](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) | +| `master.affinity` | dict | | NFD master pod required [node affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/) | + +##### Worker pod parameters + +| `worker.*` | dict | | NFD master daemonset configuration | +| `worker.configmapName` | string | `nfd-worker-conf` | NFD worker pod ConfigMap name | +| `worker.config` | string | `` | NFD worker service configuration | +| `worker.podSecurityContext` | dict | {} | SecurityContext holds pod-level security attributes and common container settings | +| `worker.securityContext` | dict | {} | Container [security settings](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) | +| `worker.resources` | dict | {} | NFD worker pod [resources management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) | +| `worker.nodeSelector` | dict | {} | NFD worker pod [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) | +| `worker.tolerations` | dict | {} | NFD worker pod [node tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | +| `worker.annotations` | dict | {} | NFD worker pod [metadata](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) | + + ### Build Your Own If you want to use the latest development version (master branch) you need to diff --git a/nfd-daemonset-combined.yaml.template b/nfd-daemonset-combined.yaml.template index 5dcc3c2a0..2bb4f148f 100644 --- a/nfd-daemonset-combined.yaml.template +++ b/nfd-daemonset-combined.yaml.template @@ -1,5 +1,5 @@ # This template contains an example of running nfd-master and nfd-worker in the -# same pod. +# same pod. All changes in this template should be applied to Helm chart too. # apiVersion: v1 kind: Namespace diff --git a/nfd-master.yaml.template b/nfd-master.yaml.template index 8af53a40a..c628324fd 100644 --- a/nfd-master.yaml.template +++ b/nfd-master.yaml.template @@ -1,3 +1,5 @@ +# All changes in this template should be applied to Helm chart too. +# apiVersion: v1 kind: Namespace metadata: diff --git a/nfd-prune.yaml.template b/nfd-prune.yaml.template index 90bc5c654..a154683ee 100644 --- a/nfd-prune.yaml.template +++ b/nfd-prune.yaml.template @@ -1,3 +1,5 @@ +# All changes in this template should be applied to Helm chart too. +# apiVersion: v1 kind: Namespace metadata: diff --git a/nfd-worker-daemonset.yaml.template b/nfd-worker-daemonset.yaml.template index 6f700e107..ec04a4e52 100644 --- a/nfd-worker-daemonset.yaml.template +++ b/nfd-worker-daemonset.yaml.template @@ -1,3 +1,5 @@ +# All changes in this template should be applied to Helm chart too. +# apiVersion: apps/v1 kind: DaemonSet metadata: @@ -30,6 +32,11 @@ spec: drop: ["ALL"] readOnlyRootFilesystem: true runAsNonRoot: true + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Equal" + value: "" + effect: "NoSchedule" command: - "nfd-worker" args: diff --git a/nfd-worker-job.yaml.template b/nfd-worker-job.yaml.template index bb19c023e..6eaf9ec49 100644 --- a/nfd-worker-job.yaml.template +++ b/nfd-worker-job.yaml.template @@ -1,3 +1,5 @@ +# All changes in this template should be applied to Helm chart too. +# apiVersion: batch/v1 kind: Job metadata: diff --git a/scripts/prepare-release.sh b/scripts/prepare-release.sh index 5e3d5097a..cc9b52a63 100755 --- a/scripts/prepare-release.sh +++ b/scripts/prepare-release.sh @@ -70,6 +70,12 @@ sed -E -e s",^([[:space:]]+)image:.+$,\1image: $container_image," \ -e s",^([[:space:]]+)imagePullPolicy:.+$,\1imagePullPolicy: IfNotPresent," \ -i *yaml.template +# Patch Helm chart +sed -e s"/appVersion:.*/appVersion: $release/" -i deployment/node-feature-discovery/Chart.yaml +sed -e s"/pullPolicy:.*/pullPolicy: IfNotPresent/" \ + -e s"!gcr.io/k8s-staging-nfd/node-feature-discovery!k8s.gcr.io/nfd/node-feature-discovery!" \ + -i deployment/node-feature-discovery/values.yaml + # Patch e2e test echo Patching test/e2e/node_feature_discovery.go flag defaults to k8s.gcr.io/nfd/node-feature-discovery and $release sed -e s'!"nfd\.repo",.*,!"nfd.repo", "k8s.gcr.io/nfd/node-feature-discovery",!' \