mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
feat: improve cleanup policies controller and chart (#5628)
* feat: improve cleanup policies controller Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * helm improvements Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * todo Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
parent
99d988e98c
commit
d16bdba1d4
10 changed files with 368 additions and 63 deletions
|
@ -214,26 +214,41 @@ The command removes all the Kubernetes components associated with the chart and
|
||||||
| grafana.enabled | bool | `false` | Enable grafana dashboard creation. |
|
| grafana.enabled | bool | `false` | Enable grafana dashboard creation. |
|
||||||
| grafana.namespace | string | `nil` | Namespace to create the grafana dashboard configmap. If not set, it will be created in the same namespace where the chart is deployed. |
|
| grafana.namespace | string | `nil` | Namespace to create the grafana dashboard configmap. If not set, it will be created in the same namespace where the chart is deployed. |
|
||||||
| grafana.annotations | object | `{}` | Grafana dashboard configmap annotations. |
|
| grafana.annotations | object | `{}` | Grafana dashboard configmap annotations. |
|
||||||
|
| cleanupController.enabled | bool | `true` | Enable cleanup controller. |
|
||||||
| cleanupController.rbac.create | bool | `true` | Create RBAC resources |
|
| cleanupController.rbac.create | bool | `true` | Create RBAC resources |
|
||||||
| cleanupController.rbac.serviceAccount.name | string | `nil` | Service account name |
|
| cleanupController.rbac.serviceAccount.name | string | `nil` | Service account name |
|
||||||
| cleanupController.rbac.clusterRole.extraResources | list | `[]` | Extra resource permissions to add in the cluster role |
|
| cleanupController.rbac.clusterRole.extraResources | list | `[]` | Extra resource permissions to add in the cluster role |
|
||||||
| cleanupController.enabled | bool | `true` | Enable cleanup controller. |
|
|
||||||
| cleanupController.image.registry | string | `nil` | Image registry |
|
| cleanupController.image.registry | string | `nil` | Image registry |
|
||||||
| cleanupController.image.repository | string | `"ghcr.io/kyverno/cleanup-controller"` | Image repository |
|
| cleanupController.image.repository | string | `"ghcr.io/kyverno/cleanup-controller"` | Image repository |
|
||||||
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
|
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
|
||||||
| cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
|
| cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
|
||||||
| cleanupController.image.pullSecrets | list | `[]` | Image pull secrets |
|
| cleanupController.image.pullSecrets | list | `[]` | Image pull secrets |
|
||||||
|
| cleanupController.replicas | int | `nil` | Desired number of pods |
|
||||||
|
| cleanupController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy |
|
||||||
|
| cleanupController.priorityClassName | string | `""` | Optional priority class |
|
||||||
|
| cleanupController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
|
||||||
|
| cleanupController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
|
||||||
| cleanupController.args | list | `["--cleanupService=https://{{ template \"kyverno.cleanup-controller.deploymentName\" . }}.{{ template \"kyverno.namespace\" . }}.svc"]` | Arguments passed to the container on the command line |
|
| cleanupController.args | list | `["--cleanupService=https://{{ template \"kyverno.cleanup-controller.deploymentName\" . }}.{{ template \"kyverno.namespace\" . }}.svc"]` | Arguments passed to the container on the command line |
|
||||||
| cleanupController.service.port | int | `443` | Service port. |
|
| cleanupController.resources.limits | object | `{"memory":"128Mi"}` | Pod resource limits |
|
||||||
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
|
| cleanupController.resources.requests | object | `{"cpu":"100m","memory":"64Mi"}` | Pod resource requests |
|
||||||
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |
|
|
||||||
| cleanupController.service.annotations | object | `{}` | Service annotations. |
|
|
||||||
| cleanupController.startupProbe | object | See [values.yaml](values.yaml) | Startup probe. The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
| cleanupController.startupProbe | object | See [values.yaml](values.yaml) | Startup probe. The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||||
| cleanupController.livenessProbe | object | See [values.yaml](values.yaml) | Liveness probe. The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
| cleanupController.livenessProbe | object | See [values.yaml](values.yaml) | Liveness probe. The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||||
| cleanupController.readinessProbe | object | See [values.yaml](values.yaml) | Readiness Probe. The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
| cleanupController.readinessProbe | object | See [values.yaml](values.yaml) | Readiness Probe. The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||||
| cleanupController.nodeSelector | object | `{}` | Node labels for pod assignment |
|
| cleanupController.nodeSelector | object | `{}` | Node labels for pod assignment |
|
||||||
| cleanupController.tolerations | list | `[]` | List of node taints to tolerate |
|
| cleanupController.tolerations | list | `[]` | List of node taints to tolerate |
|
||||||
|
| cleanupController.antiAffinity.enabled | bool | `true` | Pod antiAffinities toggle. Enabled by default but can be disabled if you want to schedule pods to the same node. |
|
||||||
|
| cleanupController.podAntiAffinity | object | See [values.yaml](values.yaml) | Pod anti affinity constraints. |
|
||||||
|
| cleanupController.podAffinity | object | `{}` | Pod affinity constraints. |
|
||||||
|
| cleanupController.nodeAffinity | object | `{}` | Node affinity constraints. |
|
||||||
| cleanupController.topologySpreadConstraints | list | `[]` | Topology spread constraints. |
|
| cleanupController.topologySpreadConstraints | list | `[]` | Topology spread constraints. |
|
||||||
|
| cleanupController.podSecurityContext | object | `{}` | Security context for the pod |
|
||||||
|
| cleanupController.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the containers |
|
||||||
|
| cleanupController.podDisruptionBudget.minAvailable | int | `1` | Configures the minimum available pods for disruptions. Cannot be used if `maxUnavailable` is set. |
|
||||||
|
| cleanupController.podDisruptionBudget.maxUnavailable | string | `nil` | Configures the maximum unavailable pods for disruptions. Cannot be used if `minAvailable` is set. |
|
||||||
|
| cleanupController.service.port | int | `443` | Service port. |
|
||||||
|
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
|
||||||
|
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |
|
||||||
|
| cleanupController.service.annotations | object | `{}` | Service annotations. |
|
||||||
|
|
||||||
## TLS Configuration
|
## TLS Configuration
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
{{/* Create the default PodDisruptionBudget to use */}}
|
{{/* Create the default PodDisruptionBudget to use */}}
|
||||||
{{- define "podDisruptionBudget.spec" -}}
|
{{- define "kyverno.podDisruptionBudget.spec" -}}
|
||||||
{{- if and .Values.podDisruptionBudget.minAvailable .Values.podDisruptionBudget.maxUnavailable }}
|
{{- if and .Values.podDisruptionBudget.minAvailable .Values.podDisruptionBudget.maxUnavailable }}
|
||||||
{{- fail "Cannot set both .Values.podDisruptionBudget.minAvailable and .Values.podDisruptionBudget.maxUnavailable" -}}
|
{{- fail "Cannot set both .Values.podDisruptionBudget.minAvailable and .Values.podDisruptionBudget.maxUnavailable" -}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -10,7 +10,7 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/name: {{ template "kyverno.name" . }}
|
app.kubernetes.io/name: {{ template "kyverno.name" . }}
|
||||||
app.kubernetes.io/part-of: {{ template "kyverno.name" . }}
|
app.kubernetes.io/part-of: {{ template "kyverno.name" . }}
|
||||||
app.kubernetes.io/version: "{{ .Chart.Version }}"
|
app.kubernetes.io/version: "{{ .Chart.Version | replace "+" "_" }}"
|
||||||
helm.sh/chart: {{ template "kyverno.chart" . }}
|
helm.sh/chart: {{ template "kyverno.chart" . }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
|
@ -36,3 +36,25 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
{{ required "A service account name is required when `rbac.create` is set to `false`" .Values.cleanupController.rbac.serviceAccount.name }}
|
{{ required "A service account name is required when `rbac.create` is set to `false`" .Values.cleanupController.rbac.serviceAccount.name }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
|
{{- define "kyverno.cleanup-controller.securityContext" -}}
|
||||||
|
{{- if semverCompare "<1.19" .Capabilities.KubeVersion.Version }}
|
||||||
|
{{ toYaml (omit .Values.cleanupController.securityContext "seccompProfile") }}
|
||||||
|
{{- else }}
|
||||||
|
{{ toYaml .Values.cleanupController.securityContext }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/* Create the default PodDisruptionBudget to use */}}
|
||||||
|
{{- define "kyverno.cleanup-controller.podDisruptionBudget.spec" -}}
|
||||||
|
{{- if and .Values.cleanupController.podDisruptionBudget.minAvailable .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||||
|
{{- fail "Cannot set both .Values.cleanupController.podDisruptionBudget.minAvailable and .Values.cleanupController.podDisruptionBudget.maxUnavailable" -}}
|
||||||
|
{{- end }}
|
||||||
|
{{- if not .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||||
|
minAvailable: {{ default 1 .Values.cleanupController.podDisruptionBudget.minAvailable }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||||
|
maxUnavailable: {{ .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,11 @@ metadata:
|
||||||
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
|
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
|
||||||
namespace: {{ template "kyverno.namespace" . }}
|
namespace: {{ template "kyverno.namespace" . }}
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: {{ .Values.cleanupController.replicas }}
|
||||||
|
{{- with .Values.cleanupController.updateStrategy }}
|
||||||
|
strategy:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
{{- include "kyverno.cleanup-controller.matchLabels" . | nindent 6 }}
|
{{- include "kyverno.cleanup-controller.matchLabels" . | nindent 6 }}
|
||||||
|
@ -16,6 +20,14 @@ spec:
|
||||||
labels:
|
labels:
|
||||||
{{- include "kyverno.cleanup-controller.labels" . | nindent 8 }}
|
{{- include "kyverno.cleanup-controller.labels" . | nindent 8 }}
|
||||||
spec:
|
spec:
|
||||||
|
{{- with .Values.cleanupController.image.pullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.cleanupController.podSecurityContext }}
|
||||||
|
securityContext:
|
||||||
|
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.cleanupController.nodeSelector }}
|
{{- with .Values.cleanupController.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- tpl (toYaml .) $ | nindent 8 }}
|
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||||
|
@ -28,6 +40,32 @@ spec:
|
||||||
topologySpreadConstraints:
|
topologySpreadConstraints:
|
||||||
{{- tpl (toYaml .) $ | nindent 8 }}
|
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.cleanupController.priorityClassName }}
|
||||||
|
priorityClassName: {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.cleanupController.hostNetwork }}
|
||||||
|
hostNetwork: {{ . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.cleanupController.dnsPolicy }}
|
||||||
|
dnsPolicy: {{ . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if or .Values.cleanupController.antiAffinity.enabled .Values.cleanupController.podAffinity .Values.cleanupController.nodeAffinity }}
|
||||||
|
affinity:
|
||||||
|
{{- if .Values.cleanupController.antiAffinity.enabled }}
|
||||||
|
{{- with .Values.cleanupController.podAntiAffinity }}
|
||||||
|
podAntiAffinity:
|
||||||
|
{{- tpl (toYaml .) $ | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.cleanupController.podAffinity }}
|
||||||
|
podAffinity:
|
||||||
|
{{- tpl (toYaml .) $ | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.cleanupController.nodeAffinity }}
|
||||||
|
nodeAffinity:
|
||||||
|
{{- tpl (toYaml .) $ | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
serviceAccountName: {{ template "kyverno.cleanup-controller.serviceAccountName" . }}
|
serviceAccountName: {{ template "kyverno.cleanup-controller.serviceAccountName" . }}
|
||||||
containers:
|
containers:
|
||||||
- name: controller
|
- name: controller
|
||||||
|
@ -44,10 +82,18 @@ spec:
|
||||||
{{- tpl (toYaml .) $ | nindent 12 }}
|
{{- tpl (toYaml .) $ | nindent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
env:
|
env:
|
||||||
|
- name: METRICS_CONFIG
|
||||||
|
value: {{ template "kyverno.metricsConfigMapName" . }}
|
||||||
- name: KYVERNO_NAMESPACE
|
- name: KYVERNO_NAMESPACE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
|
{{- with .Values.cleanupController.resources }}
|
||||||
|
resources: {{ tpl (toYaml .) $ | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cleanupController.securityContext }}
|
||||||
|
securityContext: {{ include "kyverno.cleanup-controller.securityContext" . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.cleanupController.startupProbe }}
|
{{- with .Values.cleanupController.startupProbe }}
|
||||||
startupProbe:
|
startupProbe:
|
||||||
{{- tpl (toYaml .) $ | nindent 12 }}
|
{{- tpl (toYaml .) $ | nindent 12 }}
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
{{- if .Values.cleanupController.enabled -}}
|
||||||
|
{{- if (gt (int .Values.cleanupController.replicas) 1) -}}
|
||||||
|
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
|
||||||
|
apiVersion: policy/v1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: policy/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: PodDisruptionBudget
|
||||||
|
metadata:
|
||||||
|
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
|
||||||
|
namespace: {{ template "kyverno.namespace" . }}
|
||||||
|
spec:
|
||||||
|
{{- include "kyverno.cleanup-controller.podDisruptionBudget.spec" . | indent 2 }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "kyverno.cleanup-controller.matchLabels" . | nindent 6 }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
|
@ -7,11 +7,12 @@ apiVersion: policy/v1beta1
|
||||||
kind: PodDisruptionBudget
|
kind: PodDisruptionBudget
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ template "kyverno.fullname" . }}
|
name: {{ template "kyverno.fullname" . }}
|
||||||
labels: {{ include "kyverno.labels" . | nindent 4 }}
|
labels:
|
||||||
|
{{- include "kyverno.labels" . | nindent 4 }}
|
||||||
app: kyverno
|
app: kyverno
|
||||||
namespace: {{ template "kyverno.namespace" . }}
|
namespace: {{ template "kyverno.namespace" . }}
|
||||||
spec:
|
spec:
|
||||||
{{- include "podDisruptionBudget.spec" . | indent 2 }}
|
{{- include "kyverno.podDisruptionBudget.spec" . | indent 2 }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels: {{ include "kyverno.matchLabels" . | nindent 6 }}
|
matchLabels: {{ include "kyverno.matchLabels" . | nindent 6 }}
|
||||||
app: kyverno
|
app: kyverno
|
||||||
|
|
|
@ -477,6 +477,10 @@ grafana:
|
||||||
annotations: {}
|
annotations: {}
|
||||||
|
|
||||||
cleanupController:
|
cleanupController:
|
||||||
|
|
||||||
|
# -- Enable cleanup controller.
|
||||||
|
enabled: true
|
||||||
|
|
||||||
rbac:
|
rbac:
|
||||||
# -- Create RBAC resources
|
# -- Create RBAC resources
|
||||||
create: true
|
create: true
|
||||||
|
@ -493,10 +497,6 @@ cleanupController:
|
||||||
# resources:
|
# resources:
|
||||||
# - pods
|
# - pods
|
||||||
|
|
||||||
|
|
||||||
# -- Enable cleanup controller.
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
image:
|
image:
|
||||||
# -- Image registry
|
# -- Image registry
|
||||||
registry:
|
registry:
|
||||||
|
@ -514,10 +514,144 @@ cleanupController:
|
||||||
pullSecrets: []
|
pullSecrets: []
|
||||||
# - secretName
|
# - secretName
|
||||||
|
|
||||||
|
# -- (int) Desired number of pods
|
||||||
|
replicas: ~
|
||||||
|
|
||||||
|
# -- Deployment update strategy.
|
||||||
|
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||||
|
# @default -- See [values.yaml](values.yaml)
|
||||||
|
updateStrategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxSurge: 1
|
||||||
|
maxUnavailable: 40%
|
||||||
|
type: RollingUpdate
|
||||||
|
|
||||||
|
# -- Optional priority class
|
||||||
|
priorityClassName: ''
|
||||||
|
|
||||||
|
# -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace.
|
||||||
|
# Useful for situations like when you end up dealing with a custom CNI over Amazon EKS.
|
||||||
|
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
|
||||||
|
hostNetwork: false
|
||||||
|
|
||||||
|
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
|
||||||
|
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
|
||||||
|
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
|
||||||
|
dnsPolicy: ClusterFirst
|
||||||
|
|
||||||
# -- Arguments passed to the container on the command line
|
# -- Arguments passed to the container on the command line
|
||||||
args:
|
args:
|
||||||
- --cleanupService=https://{{ template "kyverno.cleanup-controller.deploymentName" . }}.{{ template "kyverno.namespace" . }}.svc
|
- --cleanupService=https://{{ template "kyverno.cleanup-controller.deploymentName" . }}.{{ template "kyverno.namespace" . }}.svc
|
||||||
|
|
||||||
|
resources:
|
||||||
|
# -- Pod resource limits
|
||||||
|
limits:
|
||||||
|
memory: 128Mi
|
||||||
|
# -- Pod resource requests
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 64Mi
|
||||||
|
|
||||||
|
# -- Startup probe.
|
||||||
|
# The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want.
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||||
|
# @default -- See [values.yaml](values.yaml)
|
||||||
|
startupProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health/liveness
|
||||||
|
port: 9443
|
||||||
|
scheme: HTTPS
|
||||||
|
failureThreshold: 20
|
||||||
|
initialDelaySeconds: 2
|
||||||
|
periodSeconds: 6
|
||||||
|
|
||||||
|
# -- Liveness probe.
|
||||||
|
# The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||||
|
# @default -- See [values.yaml](values.yaml)
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health/liveness
|
||||||
|
port: 9443
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
failureThreshold: 2
|
||||||
|
successThreshold: 1
|
||||||
|
|
||||||
|
# -- Readiness Probe.
|
||||||
|
# The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||||
|
# @default -- See [values.yaml](values.yaml)
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health/readiness
|
||||||
|
port: 9443
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 10
|
||||||
|
timeoutSeconds: 5
|
||||||
|
failureThreshold: 6
|
||||||
|
successThreshold: 1
|
||||||
|
|
||||||
|
# -- Node labels for pod assignment
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
# -- List of node taints to tolerate
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
antiAffinity:
|
||||||
|
# -- Pod antiAffinities toggle.
|
||||||
|
# Enabled by default but can be disabled if you want to schedule pods to the same node.
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
# -- Pod anti affinity constraints.
|
||||||
|
# @default -- See [values.yaml](values.yaml)
|
||||||
|
podAntiAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 1
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app.kubernetes.io/component
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- cleanup-controller
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
|
||||||
|
# -- Pod affinity constraints.
|
||||||
|
podAffinity: {}
|
||||||
|
|
||||||
|
# -- Node affinity constraints.
|
||||||
|
nodeAffinity: {}
|
||||||
|
|
||||||
|
# -- Topology spread constraints.
|
||||||
|
topologySpreadConstraints: []
|
||||||
|
|
||||||
|
# -- Security context for the pod
|
||||||
|
podSecurityContext: {}
|
||||||
|
|
||||||
|
# -- Security context for the containers
|
||||||
|
securityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
privileged: false
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
|
||||||
|
podDisruptionBudget:
|
||||||
|
# -- Configures the minimum available pods for disruptions.
|
||||||
|
# Cannot be used if `maxUnavailable` is set.
|
||||||
|
minAvailable: 1
|
||||||
|
# -- Configures the maximum unavailable pods for disruptions.
|
||||||
|
# Cannot be used if `minAvailable` is set.
|
||||||
|
maxUnavailable:
|
||||||
|
|
||||||
service:
|
service:
|
||||||
# -- Service port.
|
# -- Service port.
|
||||||
port: 443
|
port: 443
|
||||||
|
@ -528,30 +662,3 @@ cleanupController:
|
||||||
nodePort:
|
nodePort:
|
||||||
# -- Service annotations.
|
# -- Service annotations.
|
||||||
annotations: {}
|
annotations: {}
|
||||||
|
|
||||||
# -- Startup probe.
|
|
||||||
# The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want.
|
|
||||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
|
||||||
# @default -- See [values.yaml](values.yaml)
|
|
||||||
startupProbe: {}
|
|
||||||
|
|
||||||
# -- Liveness probe.
|
|
||||||
# The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
|
|
||||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
|
||||||
# @default -- See [values.yaml](values.yaml)
|
|
||||||
livenessProbe: {}
|
|
||||||
|
|
||||||
# -- Readiness Probe.
|
|
||||||
# The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
|
|
||||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
|
||||||
# @default -- See [values.yaml](values.yaml)
|
|
||||||
readinessProbe: {}
|
|
||||||
|
|
||||||
# -- Node labels for pod assignment
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
# -- List of node taints to tolerate
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
# -- Topology spread constraints.
|
|
||||||
topologySpreadConstraints: []
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/kyverno/kyverno/pkg/config"
|
"github.com/kyverno/kyverno/pkg/config"
|
||||||
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
|
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
|
||||||
"github.com/kyverno/kyverno/pkg/metrics"
|
"github.com/kyverno/kyverno/pkg/metrics"
|
||||||
|
"github.com/kyverno/kyverno/pkg/webhooks"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
)
|
)
|
||||||
|
@ -24,10 +25,34 @@ const (
|
||||||
resyncPeriod = 15 * time.Minute
|
resyncPeriod = 15 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// - implement probes
|
||||||
|
// - better certs management
|
||||||
|
// - supports certs in cronjob
|
||||||
|
// - leader election support
|
||||||
|
// - helm service monitor
|
||||||
|
// - helm name and fullname
|
||||||
|
// - helm review labels / selectors
|
||||||
|
// - helm metrics service
|
||||||
|
|
||||||
|
type probes struct{}
|
||||||
|
|
||||||
|
func (probes) IsReady() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (probes) IsLive() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var cleanupService string
|
var (
|
||||||
|
cleanupService string
|
||||||
|
dumpPayload bool
|
||||||
|
)
|
||||||
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
|
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
|
||||||
flagset.StringVar(&cleanupService, "cleanupService", "https://cleanup-controller.kyverno.svc", "The url to join the cleanup service.")
|
flagset.StringVar(&cleanupService, "cleanupService", "https://cleanup-controller.kyverno.svc", "The url to join the cleanup service.")
|
||||||
|
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
|
||||||
// config
|
// config
|
||||||
appConfig := internal.NewConfiguration(
|
appConfig := internal.NewConfiguration(
|
||||||
internal.WithProfiling(),
|
internal.WithProfiling(),
|
||||||
|
@ -91,6 +116,11 @@ func main() {
|
||||||
},
|
},
|
||||||
admissionHandlers.Validate,
|
admissionHandlers.Validate,
|
||||||
cleanupHandlers.Cleanup,
|
cleanupHandlers.Cleanup,
|
||||||
|
metricsConfig,
|
||||||
|
webhooks.DebugModeOptions{
|
||||||
|
DumpPayload: dumpPayload,
|
||||||
|
},
|
||||||
|
probes{},
|
||||||
)
|
)
|
||||||
// start server
|
// start server
|
||||||
server.Run(ctx.Done())
|
server.Run(ctx.Done())
|
||||||
|
|
|
@ -8,8 +8,11 @@ import (
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/julienschmidt/httprouter"
|
"github.com/julienschmidt/httprouter"
|
||||||
|
"github.com/kyverno/kyverno/pkg/config"
|
||||||
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
|
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
|
||||||
"github.com/kyverno/kyverno/pkg/logging"
|
"github.com/kyverno/kyverno/pkg/logging"
|
||||||
|
"github.com/kyverno/kyverno/pkg/metrics"
|
||||||
|
"github.com/kyverno/kyverno/pkg/webhooks"
|
||||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||||
admissionv1 "k8s.io/api/admission/v1"
|
admissionv1 "k8s.io/api/admission/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
@ -37,41 +40,57 @@ type (
|
||||||
CleanupHandler = func(context.Context, logr.Logger, string, time.Time) error
|
CleanupHandler = func(context.Context, logr.Logger, string, time.Time) error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Probes interface {
|
||||||
|
IsReady() bool
|
||||||
|
IsLive() bool
|
||||||
|
}
|
||||||
|
|
||||||
// NewServer creates new instance of server accordingly to given configuration
|
// NewServer creates new instance of server accordingly to given configuration
|
||||||
func NewServer(
|
func NewServer(
|
||||||
tlsProvider TlsProvider,
|
tlsProvider TlsProvider,
|
||||||
validationHandler ValidationHandler,
|
validationHandler ValidationHandler,
|
||||||
cleanupHandler CleanupHandler,
|
cleanupHandler CleanupHandler,
|
||||||
|
metricsConfig metrics.MetricsConfigManager,
|
||||||
|
debugModeOpts webhooks.DebugModeOptions,
|
||||||
|
probes Probes,
|
||||||
) Server {
|
) Server {
|
||||||
policyLogger := logging.WithName("cleanup-policy")
|
policyLogger := logging.WithName("cleanup-policy")
|
||||||
cleanupLogger := logging.WithName("cleanup")
|
cleanupLogger := logging.WithName("cleanup")
|
||||||
|
cleanupHandlerFunc := func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
policy := r.URL.Query().Get("policy")
|
||||||
|
logger := cleanupLogger.WithValues("policy", policy)
|
||||||
|
err := cleanupHandler(r.Context(), logger, policy, time.Now())
|
||||||
|
if err == nil {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
} else {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
mux := httprouter.New()
|
mux := httprouter.New()
|
||||||
mux.HandlerFunc(
|
mux.HandlerFunc(
|
||||||
"POST",
|
"POST",
|
||||||
validatingWebhookServicePath,
|
validatingWebhookServicePath,
|
||||||
handlers.FromAdmissionFunc("VALIDATE", validationHandler).
|
handlers.FromAdmissionFunc("VALIDATE", validationHandler).
|
||||||
|
WithDump(debugModeOpts.DumpPayload).
|
||||||
WithSubResourceFilter().
|
WithSubResourceFilter().
|
||||||
|
WithMetrics(policyLogger, metricsConfig.Config(), metrics.WebhookValidating).
|
||||||
WithAdmission(policyLogger.WithName("validate")).
|
WithAdmission(policyLogger.WithName("validate")).
|
||||||
ToHandlerFunc(),
|
ToHandlerFunc(),
|
||||||
)
|
)
|
||||||
mux.HandlerFunc(
|
mux.HandlerFunc(
|
||||||
"GET",
|
"GET",
|
||||||
cleanup.CleanupServicePath,
|
cleanup.CleanupServicePath,
|
||||||
func(w http.ResponseWriter, r *http.Request) {
|
handlers.HttpHandler(cleanupHandlerFunc).
|
||||||
policy := r.URL.Query().Get("policy")
|
WithMetrics(policyLogger).
|
||||||
logger := cleanupLogger.WithValues("policy", policy)
|
WithTrace("CLEANUP").
|
||||||
err := cleanupHandler(r.Context(), logger, policy, time.Now())
|
ToHandlerFunc(),
|
||||||
if err == nil {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
} else {
|
|
||||||
if apierrors.IsNotFound(err) {
|
|
||||||
w.WriteHeader(http.StatusNotFound)
|
|
||||||
} else {
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
mux.HandlerFunc("GET", config.LivenessServicePath, handlers.Probe(probes.IsLive))
|
||||||
|
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(probes.IsReady))
|
||||||
return &server{
|
return &server{
|
||||||
server: &http.Server{
|
server: &http.Server{
|
||||||
Addr: ":9443",
|
Addr: ":9443",
|
||||||
|
|
|
@ -2,6 +2,7 @@ package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -11,6 +12,7 @@ import (
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric/global"
|
"go.opentelemetry.io/otel/metric/global"
|
||||||
"go.opentelemetry.io/otel/metric/instrument"
|
"go.opentelemetry.io/otel/metric/instrument"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||||
admissionv1 "k8s.io/api/admission/v1"
|
admissionv1 "k8s.io/api/admission/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,14 +22,14 @@ func (inner AdmissionHandler) WithMetrics(logger logr.Logger, metricsConfig conf
|
||||||
|
|
||||||
func (inner AdmissionHandler) withMetrics(logger logr.Logger, metricsConfig config.MetricsConfiguration, attrs ...attribute.KeyValue) AdmissionHandler {
|
func (inner AdmissionHandler) withMetrics(logger logr.Logger, metricsConfig config.MetricsConfiguration, attrs ...attribute.KeyValue) AdmissionHandler {
|
||||||
meter := global.MeterProvider().Meter(metrics.MeterName)
|
meter := global.MeterProvider().Meter(metrics.MeterName)
|
||||||
admissionRequestsMetric, err := meter.SyncInt64().Counter(
|
requestsMetric, err := meter.SyncInt64().Counter(
|
||||||
"kyverno_admission_requests_total",
|
"kyverno_admission_requests_total",
|
||||||
instrument.WithDescription("can be used to track the number of admission requests encountered by Kyverno in the cluster"),
|
instrument.WithDescription("can be used to track the number of admission requests encountered by Kyverno in the cluster"),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to create instrument, kyverno_admission_requests_total")
|
logger.Error(err, "Failed to create instrument, kyverno_admission_requests_total")
|
||||||
}
|
}
|
||||||
admissionReviewDurationMetric, err := meter.SyncFloat64().Histogram(
|
durationMetric, err := meter.SyncFloat64().Histogram(
|
||||||
"kyverno_admission_review_duration_seconds",
|
"kyverno_admission_review_duration_seconds",
|
||||||
instrument.WithDescription("can be used to track the latencies (in seconds) associated with the entire individual admission review. For example, if an incoming request trigger, say, five policies, this metric will track the e2e latency associated with the execution of all those policies"),
|
instrument.WithDescription("can be used to track the latencies (in seconds) associated with the entire individual admission review. For example, if an incoming request trigger, say, five policies, this metric will track the e2e latency associated with the execution of all those policies"),
|
||||||
)
|
)
|
||||||
|
@ -50,17 +52,60 @@ func (inner AdmissionHandler) withMetrics(logger logr.Logger, metricsConfig conf
|
||||||
attribute.Bool("request_allowed", allowed),
|
attribute.Bool("request_allowed", allowed),
|
||||||
}
|
}
|
||||||
attributes = append(attributes, attrs...)
|
attributes = append(attributes, attrs...)
|
||||||
if admissionReviewDurationMetric != nil {
|
if durationMetric != nil {
|
||||||
defer func() {
|
defer func() {
|
||||||
latency := int64(time.Since(startTime))
|
latency := int64(time.Since(startTime))
|
||||||
admissionReviewLatencyDurationInSeconds := float64(latency) / float64(1000*1000*1000)
|
durationInSeconds := float64(latency) / float64(1000*1000*1000)
|
||||||
admissionReviewDurationMetric.Record(ctx, admissionReviewLatencyDurationInSeconds, attributes...)
|
durationMetric.Record(ctx, durationInSeconds, attributes...)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if admissionRequestsMetric != nil {
|
if requestsMetric != nil {
|
||||||
admissionRequestsMetric.Add(ctx, 1, attributes...)
|
requestsMetric.Add(ctx, 1, attributes...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (inner HttpHandler) WithMetrics(logger logr.Logger, attrs ...attribute.KeyValue) HttpHandler {
|
||||||
|
return inner.withMetrics(logger, attrs...).WithTrace("METRICS")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (inner HttpHandler) withMetrics(logger logr.Logger, attrs ...attribute.KeyValue) HttpHandler {
|
||||||
|
meter := global.MeterProvider().Meter(metrics.MeterName)
|
||||||
|
requestsMetric, err := meter.SyncInt64().Counter(
|
||||||
|
"kyverno_http_requests_total",
|
||||||
|
instrument.WithDescription("can be used to track the number of http requests"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to create instrument, kyverno_http_requests_total")
|
||||||
|
}
|
||||||
|
durationMetric, err := meter.SyncFloat64().Histogram(
|
||||||
|
"kyverno_http_requests_duration_seconds",
|
||||||
|
instrument.WithDescription("can be used to track the latencies (in seconds) associated with the entire individual http request."),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to create instrument, kyverno_http_requests_duration_seconds")
|
||||||
|
}
|
||||||
|
return func(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
startTime := time.Now()
|
||||||
|
attributes := []attribute.KeyValue{
|
||||||
|
semconv.HTTPRequestContentLengthKey.Int64(request.ContentLength),
|
||||||
|
semconv.HTTPHostKey.String(request.Host),
|
||||||
|
semconv.HTTPMethodKey.String(request.Method),
|
||||||
|
semconv.HTTPURLKey.String(request.RequestURI),
|
||||||
|
}
|
||||||
|
attributes = append(attributes, attrs...)
|
||||||
|
if requestsMetric != nil {
|
||||||
|
requestsMetric.Add(request.Context(), 1, attributes...)
|
||||||
|
}
|
||||||
|
if durationMetric != nil {
|
||||||
|
defer func() {
|
||||||
|
latency := int64(time.Since(startTime))
|
||||||
|
durationInSeconds := float64(latency) / float64(1000*1000*1000)
|
||||||
|
durationMetric.Record(request.Context(), durationInSeconds, attributes...)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
inner(writer, request)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue