1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-29 10:55:05 +00:00

feat: remove report controllers from kyverno admission controller (#6045)

* feat: remove reports controller from kyverno admission controller

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* helm

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* rbac

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* codegen

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* helm

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* helm

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* helm

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* codegen

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-01-19 14:28:28 +01:00 committed by GitHub
parent ad4c4da690
commit 3fa0bb1f27
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 1012 additions and 168 deletions

View file

@ -767,12 +767,14 @@ kind-load-all: kind-load-kyvernopre kind-load-kyverno kind-load-cleanup-controll
kind-deploy-kyverno: $(HELM) kind-load-all ## Build images, load them in kind cluster and deploy kyverno helm chart
@echo Install kyverno chart... >&2
@$(HELM) upgrade --install kyverno --namespace kyverno --create-namespace --wait ./charts/kyverno \
--set cleanupController.image.repository=$(LOCAL_CLEANUP_IMAGE) \
--set cleanupController.image.tag=$(IMAGE_TAG_DEV) \
--set image.repository=$(LOCAL_KYVERNO_IMAGE) \
--set image.tag=$(IMAGE_TAG_DEV) \
--set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \
--set initImage.tag=$(IMAGE_TAG_DEV) \
--set cleanupController.image.repository=$(LOCAL_CLEANUP_IMAGE) \
--set cleanupController.image.tag=$(IMAGE_TAG_DEV) \
--set reportsController.image.repository=$(LOCAL_REPORTS_IMAGE) \
--set reportsController.image.tag=$(IMAGE_TAG_DEV) \
--values ./scripts/config/$(USE_CONFIG)/kyverno.yaml
.PHONY: kind-deploy-kyverno-policies

View file

@ -275,6 +275,56 @@ The command removes all the Kubernetes components associated with the chart and
| cleanupController.metering.port | int | `8000` | Prometheus endpoint port |
| cleanupController.metering.collector | string | `""` | Otel collector endpoint |
| cleanupController.metering.creds | string | `""` | Otel collector credentials |
| reportsController.enabled | bool | `true` | Enable reports controller. |
| reportsController.rbac.create | bool | `true` | Create RBAC resources |
| reportsController.rbac.serviceAccount.name | string | `nil` | Service account name |
| reportsController.rbac.clusterRole.extraResources | list | `[]` | Extra resource permissions to add in the cluster role |
| reportsController.image.registry | string | `nil` | Image registry |
| reportsController.image.repository | string | `"ghcr.io/kyverno/reports-controller"` | Image repository |
| reportsController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
| reportsController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
| reportsController.image.pullSecrets | list | `[]` | Image pull secrets |
| reportsController.replicas | int | `nil` | Desired number of pods |
| reportsController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy |
| reportsController.priorityClassName | string | `""` | Optional priority class |
| reportsController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
| reportsController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
| reportsController.extraArgs | list | `[]` | Extra arguments passed to the container on the command line |
| reportsController.resources.limits | object | `{"memory":"128Mi"}` | Pod resource limits |
| reportsController.resources.requests | object | `{"cpu":"100m","memory":"64Mi"}` | Pod resource requests |
| reportsController.nodeSelector | object | `{}` | Node labels for pod assignment |
| reportsController.tolerations | list | `[]` | List of node taints to tolerate |
| reportsController.antiAffinity.enabled | bool | `true` | Pod antiAffinities toggle. Enabled by default but can be disabled if you want to schedule pods to the same node. |
| reportsController.podAntiAffinity | object | See [values.yaml](values.yaml) | Pod anti affinity constraints. |
| reportsController.podAffinity | object | `{}` | Pod affinity constraints. |
| reportsController.nodeAffinity | object | `{}` | Node affinity constraints. |
| reportsController.topologySpreadConstraints | list | `[]` | Topology spread constraints. |
| reportsController.podSecurityContext | object | `{}` | Security context for the pod |
| reportsController.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the containers |
| reportsController.podDisruptionBudget.minAvailable | int | `1` | Configures the minimum available pods for disruptions. Cannot be used if `maxUnavailable` is set. |
| reportsController.podDisruptionBudget.maxUnavailable | string | `nil` | Configures the maximum unavailable pods for disruptions. Cannot be used if `minAvailable` is set. |
| reportsController.metricsService.create | bool | `true` | Create service. |
| reportsController.metricsService.port | int | `8000` | Service port. Metrics server will be exposed at this port. |
| reportsController.metricsService.type | string | `"ClusterIP"` | Service type. |
| reportsController.metricsService.nodePort | string | `nil` | Service node port. Only used if `metricsService.type` is `NodePort`. |
| reportsController.metricsService.annotations | object | `{}` | Service annotations. |
| reportsController.serviceMonitor.enabled | bool | `false` | Create a `ServiceMonitor` to collect Prometheus metrics. |
| reportsController.serviceMonitor.additionalLabels | string | `nil` | Additional labels |
| reportsController.serviceMonitor.namespace | string | `nil` | Override namespace (default is the same as kyverno) |
| reportsController.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics |
| reportsController.serviceMonitor.scrapeTimeout | string | `"25s"` | Timeout if metrics can't be retrieved in given time interval |
| reportsController.serviceMonitor.secure | bool | `false` | Is TLS required for endpoint |
| reportsController.serviceMonitor.tlsConfig | object | `{}` | TLS Configuration for endpoint |
| reportsController.tracing.enabled | bool | `false` | Enable tracing |
| reportsController.tracing.address | string | `nil` | Traces receiver address |
| reportsController.tracing.port | string | `nil` | Traces receiver port |
| reportsController.tracing.creds | string | `""` | Traces receiver credentials |
| reportsController.logging.format | string | `"text"` | Logging format |
| reportsController.metering.disabled | bool | `false` | Disable metrics export |
| reportsController.metering.config | string | `"prometheus"` | Otel configuration, can be `prometheus` or `grpc` |
| reportsController.metering.port | int | `8000` | Prometheus endpoint port |
| reportsController.metering.collector | string | `""` | Otel collector endpoint |
| reportsController.metering.creds | string | `""` | Otel collector credentials |
## TLS Configuration

View file

@ -0,0 +1,67 @@
{{/* vim: set filetype=mustache: */}}
{{- define "kyverno.reports-controller.name" -}}
{{ template "kyverno.name" . }}-reports-controller
{{- end -}}
{{- define "kyverno.reports-controller.labels" -}}
app.kubernetes.io/part-of: {{ template "kyverno.name" . }}
{{- with (include "kyverno.helmLabels" .) }}
{{ . }}
{{- end }}
{{- with (include "kyverno.versionLabels" .) }}
{{ . }}
{{- end }}
{{- with (include "kyverno.reports-controller.matchLabels" .) }}
{{ . }}
{{- end }}
{{- end -}}
{{- define "kyverno.reports-controller.matchLabels" -}}
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: {{ template "kyverno.reports-controller.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "kyverno.reports-controller.image" -}}
{{- if .image.registry -}}
{{ .image.registry }}/{{ required "An image repository is required" .image.repository }}:{{ default .defaultTag .image.tag }}
{{- else -}}
{{ required "An image repository is required" .image.repository }}:{{ default .defaultTag .image.tag }}
{{- end -}}
{{- end -}}
{{- define "kyverno.reports-controller.roleName" -}}
{{ .Release.Name }}:reports-controller
{{- end -}}
{{/* Create the name of the service account to use */}}
{{- define "kyverno.reports-controller.serviceAccountName" -}}
{{- if .Values.reportsController.rbac.create -}}
{{ default (include "kyverno.reports-controller.name" .) .Values.reportsController.rbac.serviceAccount.name }}
{{- else -}}
{{ required "A service account name is required when `rbac.create` is set to `false`" .Values.reportsController.rbac.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- define "kyverno.reports-controller.securityContext" -}}
{{- if semverCompare "<1.19" .Capabilities.KubeVersion.Version }}
{{ toYaml (omit .Values.reportsController.securityContext "seccompProfile") }}
{{- else }}
{{ toYaml .Values.reportsController.securityContext }}
{{- end }}
{{- end }}
{{/* Create the default PodDisruptionBudget to use */}}
{{- define "kyverno.reports-controller.podDisruptionBudget.spec" -}}
{{- if and .Values.reportsController.podDisruptionBudget.minAvailable .Values.reportsController.podDisruptionBudget.maxUnavailable }}
{{- fail "Cannot set both .Values.reportsController.podDisruptionBudget.minAvailable and .Values.reportsController.podDisruptionBudget.maxUnavailable" -}}
{{- end }}
{{- if not .Values.reportsController.podDisruptionBudget.maxUnavailable }}
minAvailable: {{ default 1 .Values.reportsController.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.reportsController.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.reportsController.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,90 @@
{{- if .Values.reportsController.enabled -}}
{{- if .Values.reportsController.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.reports-controller.roleName" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
aggregationRule:
clusterRoleSelectors:
- matchLabels:
{{- include "kyverno.reports-controller.matchLabels" . | nindent 8 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.reports-controller.roleName" . }}:core
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
rules:
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- watch
- apiGroups:
- kyverno.io
resources:
- admissionreports
- clusteradmissionreports
- backgroundscanreports
- clusterbackgroundscanreports
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- deletecollection
- apiGroups:
- wgpolicyk8s.io
resources:
- policyreports
- policyreports/status
- clusterpolicyreports
- clusterpolicyreports/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- deletecollection
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
{{- with .Values.reportsController.rbac.clusterRole.extraResources }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.reports-controller.roleName" $ }}:additional
labels:
{{- include "kyverno.reports-controller.labels" $ | nindent 4 }}
rules:
{{- range . }}
- apiGroups:
{{- toYaml .apiGroups | nindent 6 }}
resources:
{{- toYaml .resources | nindent 6 }}
verbs:
- get
- list
- watch
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,18 @@
{{- if .Values.reportsController.enabled -}}
{{- if .Values.reportsController.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.reports-controller.roleName" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.reports-controller.roleName" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.reports-controller.serviceAccountName" . }}
namespace: {{ template "kyverno.namespace" . }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,125 @@
{{- if .Values.reportsController.enabled -}}
{{- if not .Values.templating.debug -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "kyverno.reports-controller.name" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
spec:
{{- with .Values.reportsController.replicas }}
replicas: {{ . }}
{{- end }}
{{- with .Values.reportsController.updateStrategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "kyverno.reports-controller.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 8 }}
spec:
{{- with .Values.reportsController.image.pullSecrets }}
imagePullSecrets:
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.reportsController.podSecurityContext }}
securityContext:
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.reportsController.nodeSelector }}
nodeSelector:
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.reportsController.tolerations }}
tolerations:
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.reportsController.topologySpreadConstraints }}
topologySpreadConstraints:
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.reportsController.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.reportsController.hostNetwork }}
hostNetwork: {{ . }}
{{- end }}
{{- with .Values.reportsController.dnsPolicy }}
dnsPolicy: {{ . }}
{{- end }}
{{- if or .Values.reportsController.antiAffinity.enabled .Values.reportsController.podAffinity .Values.reportsController.nodeAffinity }}
affinity:
{{- if .Values.reportsController.antiAffinity.enabled }}
{{- with .Values.reportsController.podAntiAffinity }}
podAntiAffinity:
{{- tpl (toYaml .) $ | nindent 10 }}
{{- end }}
{{- end }}
{{- with .Values.reportsController.podAffinity }}
podAffinity:
{{- tpl (toYaml .) $ | nindent 10 }}
{{- end }}
{{- with .Values.reportsController.nodeAffinity }}
nodeAffinity:
{{- tpl (toYaml .) $ | nindent 10 }}
{{- end }}
{{- end }}
serviceAccountName: {{ template "kyverno.reports-controller.serviceAccountName" . }}
containers:
- name: controller
image: {{ include "kyverno.reports-controller.image" (dict "image" .Values.reportsController.image "defaultTag" .Chart.AppVersion) | quote }}
ports:
- containerPort: 9443
name: https
protocol: TCP
- containerPort: 8000
name: metrics
protocol: TCP
args:
- --loggingFormat={{ .Values.reportsController.logging.format }}
{{- if .Values.reportsController.tracing.enabled }}
- --enableTracing
- --tracingAddress={{ .Values.reportsController.tracing.address }}
- --tracingPort={{ .Values.reportsController.tracing.port }}
{{- with .Values.reportsController.tracing.creds }}
- --tracingCreds={{ . }}
{{- end }}
{{- end }}
- --disableMetrics={{ .Values.reportsController.metering.disabled }}
{{- if not .Values.reportsController.metering.disabled }}
- --otelConfig={{ .Values.reportsController.metering.config }}
- --metricsPort={{ .Values.reportsController.metering.port }}
{{- with .Values.reportsController.metering.collector }}
- --otelCollector={{ . }}
{{- end }}
{{- with .Values.reportsController.metering.creds }}
- --transportCreds={{ . }}
{{- end }}
{{- end }}
{{- range .Values.reportsController.extraArgs }}
- {{ . }}
{{- end }}
env:
- name: METRICS_CONFIG
value: {{ template "kyverno.metricsConfigMapName" . }}
- name: KYVERNO_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KYVERNO_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.reportsController.resources }}
resources: {{ tpl (toYaml .) $ | nindent 12 }}
{{- end }}
{{- if .Values.reportsController.securityContext }}
securityContext: {{ include "kyverno.reports-controller.securityContext" . | nindent 12 }}
{{- end }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,20 @@
{{- if .Values.reportsController.enabled -}}
{{- if (gt (int .Values.reportsController.replicas) 1) -}}
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
apiVersion: policy/v1
{{- else -}}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ template "kyverno.reports-controller.name" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
spec:
{{- include "kyverno.reports-controller.podDisruptionBudget.spec" . | indent 2 }}
selector:
matchLabels:
{{- include "kyverno.reports-controller.matchLabels" . | nindent 6 }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,30 @@
{{- if .Values.reportsController.enabled -}}
{{- if .Values.reportsController.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "kyverno.reports-controller.roleName" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,19 @@
{{- if .Values.reportsController.enabled -}}
{{- if .Values.reportsController.rbac.create -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.reports-controller.roleName" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "kyverno.reports-controller.roleName" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.reports-controller.serviceAccountName" . }}
namespace: {{ template "kyverno.namespace" . }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,27 @@
{{- if .Values.reportsController.enabled -}}
{{- if .Values.reportsController.metricsService.create -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kyverno.reports-controller.name" . }}-metrics
namespace: {{ template "kyverno.namespace" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
{{- with .Values.reportsController.metricsService.annotations }}
annotations:
{{- tpl (toYaml .) $ | nindent 4 }}
{{- end }}
spec:
ports:
- port: {{ .Values.reportsController.metricsService.port }}
targetPort: 8000
protocol: TCP
name: metrics-port
{{- if and (eq .Values.reportsController.metricsService.type "NodePort") (not (empty .Values.reportsController.metricsService.nodePort)) }}
nodePort: {{ .Values.reportsController.metricsService.nodePort }}
{{- end }}
selector:
{{- include "kyverno.reports-controller.matchLabels" . | nindent 4 }}
type: {{ .Values.reportsController.metricsService.type }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,11 @@
{{- if .Values.reportsController.enabled -}}
{{- if .Values.reportsController.rbac.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kyverno.reports-controller.serviceAccountName" . }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,34 @@
{{- if .Values.reportsController.enabled -}}
{{- if .Values.reportsController.serviceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kyverno.reports-controller.name" . }}
{{- if .Values.reportsController.serviceMonitor.namespace }}
namespace: {{ .Values.reportsController.serviceMonitor.namespace }}
{{- else }}
namespace: {{ template "kyverno.namespace" . }}
{{- end }}
labels:
{{- include "kyverno.reports-controller.labels" . | nindent 4 }}
{{- with .Values.reportsController.serviceMonitor.additionalLabels }}
{{- toYaml .Values.reportsController.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "kyverno.reports-controller.matchLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ template "kyverno.namespace" . }}
endpoints:
- port: metrics-port
interval: {{ .Values.reportsController.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.reportsController.serviceMonitor.scrapeTimeout }}
{{- if .Values.reportsController.serviceMonitor.secure }}
scheme: https
tlsConfig:
{{- toYaml .Values.reportsController.serviceMonitor.tlsConfig | nindent 8 }}
{{- end }}
{{- end -}}
{{- end -}}

View file

@ -745,3 +745,237 @@ cleanupController:
collector: ''
# -- Otel collector credentials
creds: ''
reportsController:
# -- Enable reports controller.
enabled: true
rbac:
# -- Create RBAC resources
create: true
serviceAccount:
# -- Service account name
name:
clusterRole:
# -- Extra resource permissions to add in the cluster role
extraResources: []
# - apiGroups:
# - ''
# resources:
# - pods
image:
# -- Image registry
registry:
# If you want to manage the registry you should remove it from the repository
# registry: ghcr.io
# repository: kyverno/kyverno
# -- Image repository
repository: ghcr.io/kyverno/reports-controller # kyverno: replaced in e2e tests
# -- Image tag
# Defaults to appVersion in Chart.yaml if omitted
tag: # replaced in e2e tests
# -- Image pull policy
pullPolicy: IfNotPresent
# -- Image pull secrets
pullSecrets: []
# - secretName
# -- (int) Desired number of pods
replicas: ~
# -- Deployment update strategy.
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
# @default -- See [values.yaml](values.yaml)
updateStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 40%
type: RollingUpdate
# -- Optional priority class
priorityClassName: ''
# -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace.
# Useful for situations like when you end up dealing with a custom CNI over Amazon EKS.
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
hostNetwork: false
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
dnsPolicy: ClusterFirst
# -- Extra arguments passed to the container on the command line
extraArgs: []
resources:
# -- Pod resource limits
limits:
memory: 128Mi
# -- Pod resource requests
requests:
cpu: 100m
memory: 64Mi
# TODO
# # -- Startup probe.
# # The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want.
# # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# # @default -- See [values.yaml](values.yaml)
# startupProbe:
# httpGet:
# path: /health/liveness
# port: 9443
# scheme: HTTPS
# failureThreshold: 20
# initialDelaySeconds: 2
# periodSeconds: 6
# # -- Liveness probe.
# # The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
# # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# # @default -- See [values.yaml](values.yaml)
# livenessProbe:
# httpGet:
# path: /health/liveness
# port: 9443
# scheme: HTTPS
# initialDelaySeconds: 15
# periodSeconds: 30
# timeoutSeconds: 5
# failureThreshold: 2
# successThreshold: 1
# # -- Readiness Probe.
# # The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
# # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# # @default -- See [values.yaml](values.yaml)
# readinessProbe:
# httpGet:
# path: /health/readiness
# port: 9443
# scheme: HTTPS
# initialDelaySeconds: 5
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# -- Node labels for pod assignment
nodeSelector: {}
# -- List of node taints to tolerate
tolerations: []
antiAffinity:
# -- Pod antiAffinities toggle.
# Enabled by default but can be disabled if you want to schedule pods to the same node.
enabled: true
# -- Pod anti affinity constraints.
# @default -- See [values.yaml](values.yaml)
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- reports-controller
topologyKey: kubernetes.io/hostname
# -- Pod affinity constraints.
podAffinity: {}
# -- Node affinity constraints.
nodeAffinity: {}
# -- Topology spread constraints.
topologySpreadConstraints: []
# -- Security context for the pod
podSecurityContext: {}
# -- Security context for the containers
securityContext:
runAsNonRoot: true
privileged: false
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
podDisruptionBudget:
# -- Configures the minimum available pods for disruptions.
# Cannot be used if `maxUnavailable` is set.
minAvailable: 1
# -- Configures the maximum unavailable pods for disruptions.
# Cannot be used if `minAvailable` is set.
maxUnavailable:
metricsService:
# -- Create service.
create: true
# -- Service port.
# Metrics server will be exposed at this port.
port: 8000
# -- Service type.
type: ClusterIP
# -- Service node port.
# Only used if `metricsService.type` is `NodePort`.
nodePort:
# -- Service annotations.
annotations: {}
serviceMonitor:
# -- Create a `ServiceMonitor` to collect Prometheus metrics.
enabled: false
# -- Additional labels
additionalLabels:
# key: value
# -- Override namespace (default is the same as kyverno)
namespace:
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
# -- Is TLS required for endpoint
secure: false
# -- TLS Configuration for endpoint
tlsConfig: {}
tracing:
# -- Enable tracing
enabled: false
# -- Traces receiver address
address:
# -- Traces receiver port
port:
# -- Traces receiver credentials
creds: ''
logging:
# -- Logging format
format: text
metering:
# -- Disable metrics export
disabled: false
# -- Otel configuration, can be `prometheus` or `grpc`
config: prometheus
# -- Prometheus endpoint port
port: 8000
# -- Otel collector endpoint
collector: ''
# -- Otel collector credentials
creds: ''

View file

@ -20,7 +20,6 @@ import (
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
metadataclient "github.com/kyverno/kyverno/pkg/clients/metadata"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/controllers/certmanager"
configcontroller "github.com/kyverno/kyverno/pkg/controllers/config"
@ -28,10 +27,6 @@ import (
policymetricscontroller "github.com/kyverno/kyverno/pkg/controllers/metrics/policy"
openapicontroller "github.com/kyverno/kyverno/pkg/controllers/openapi"
policycachecontroller "github.com/kyverno/kyverno/pkg/controllers/policycache"
admissionreportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/admission"
aggregatereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/aggregate"
backgroundscancontroller "github.com/kyverno/kyverno/pkg/controllers/report/background"
resourcereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/resource"
webhookcontroller "github.com/kyverno/kyverno/pkg/controllers/webhook"
"github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine"
@ -58,7 +53,6 @@ import (
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
metadatainformers "k8s.io/client-go/metadata/metadatainformer"
kyamlopenapi "sigs.k8s.io/kustomize/kyaml/openapi"
)
@ -158,118 +152,14 @@ func createNonLeaderControllers(
}
}
func createReportControllers(
backgroundScan bool,
admissionReports bool,
reportsChunkSize int,
backgroundScanWorkers int,
client dclient.Interface,
kyvernoClient versioned.Interface,
rclient registryclient.Client,
metadataFactory metadatainformers.SharedInformerFactory,
kubeInformer kubeinformers.SharedInformerFactory,
kyvernoInformer kyvernoinformer.SharedInformerFactory,
configMapResolver resolvers.ConfigmapResolver,
backgroundScanInterval time.Duration,
configuration config.Configuration,
eventGenerator event.Interface,
enablePolicyException bool,
exceptionNamespace string,
) ([]internal.Controller, func(context.Context) error) {
var ctrls []internal.Controller
var warmups []func(context.Context) error
kyvernoV1 := kyvernoInformer.Kyverno().V1()
kyvernoV2Alpha1 := kyvernoInformer.Kyverno().V2alpha1()
if backgroundScan || admissionReports {
resourceReportController := resourcereportcontroller.NewController(
client,
kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(),
)
warmups = append(warmups, func(ctx context.Context) error {
return resourceReportController.Warmup(ctx)
})
ctrls = append(ctrls, internal.NewController(
resourcereportcontroller.ControllerName,
resourceReportController,
resourcereportcontroller.Workers,
))
ctrls = append(ctrls, internal.NewController(
aggregatereportcontroller.ControllerName,
aggregatereportcontroller.NewController(
kyvernoClient,
metadataFactory,
kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(),
resourceReportController,
reportsChunkSize,
),
aggregatereportcontroller.Workers,
))
if admissionReports {
ctrls = append(ctrls, internal.NewController(
admissionreportcontroller.ControllerName,
admissionreportcontroller.NewController(
kyvernoClient,
metadataFactory,
resourceReportController,
),
admissionreportcontroller.Workers,
))
}
if backgroundScan {
var exceptionsLister engine.PolicyExceptionLister
if enablePolicyException {
lister := kyvernoV2Alpha1.PolicyExceptions().Lister()
if exceptionNamespace != "" {
exceptionsLister = lister.PolicyExceptions(exceptionNamespace)
} else {
exceptionsLister = lister
}
}
ctrls = append(ctrls, internal.NewController(
backgroundscancontroller.ControllerName,
backgroundscancontroller.NewController(
client,
kyvernoClient,
rclient,
metadataFactory,
kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(),
kubeInformer.Core().V1().Namespaces(),
exceptionsLister,
resourceReportController,
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
),
backgroundScanWorkers,
))
}
}
return ctrls, func(ctx context.Context) error {
for _, warmup := range warmups {
if err := warmup(ctx); err != nil {
return err
}
}
return nil
}
}
func createrLeaderControllers(
backgroundScan bool,
admissionReports bool,
reportsChunkSize int,
backgroundScanWorkers int,
serverIP string,
webhookTimeout int,
autoUpdateWebhooks bool,
kubeInformer kubeinformers.SharedInformerFactory,
kubeKyvernoInformer kubeinformers.SharedInformerFactory,
kyvernoInformer kyvernoinformer.SharedInformerFactory,
metadataInformer metadatainformers.SharedInformerFactory,
kubeClient kubernetes.Interface,
kyvernoClient versioned.Interface,
dynamicClient dclient.Interface,
@ -280,9 +170,6 @@ func createrLeaderControllers(
certRenewer tls.CertRenewer,
runtime runtimeutils.Runtime,
configMapResolver resolvers.ConfigmapResolver,
backgroundScanInterval time.Duration,
enablePolicyException bool,
exceptionNamespace string,
) ([]internal.Controller, func(context.Context) error, error) {
policyCtrl, err := policy.NewPolicyController(
kyvernoClient,
@ -348,34 +235,13 @@ func createrLeaderControllers(
genericwebhookcontroller.Fail,
genericwebhookcontroller.None,
)
reportControllers, warmup := createReportControllers(
backgroundScan,
admissionReports,
reportsChunkSize,
backgroundScanWorkers,
dynamicClient,
kyvernoClient,
rclient,
metadataInformer,
kubeInformer,
kyvernoInformer,
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
enablePolicyException,
exceptionNamespace,
)
return append(
[]internal.Controller{
internal.NewController("policy-controller", policyCtrl, 2),
internal.NewController(certmanager.ControllerName, certManager, certmanager.Workers),
internal.NewController(webhookcontroller.ControllerName, webhookController, webhookcontroller.Workers),
internal.NewController(exceptionWebhookControllerName, exceptionWebhookController, 1),
},
reportControllers...,
),
warmup,
return []internal.Controller{
internal.NewController("policy-controller", policyCtrl, 2),
internal.NewController(certmanager.ControllerName, certManager, certmanager.Workers),
internal.NewController(webhookcontroller.ControllerName, webhookController, webhookcontroller.Workers),
internal.NewController(exceptionWebhookControllerName, exceptionWebhookController, 1),
},
nil,
nil
}
@ -392,13 +258,9 @@ func main() {
imageSignatureRepository string
allowInsecureRegistry bool
webhookRegistrationTimeout time.Duration
backgroundScan bool
admissionReports bool
reportsChunkSize int
backgroundScanWorkers int
dumpPayload bool
leaderElectionRetryPeriod time.Duration
backgroundScanInterval time.Duration
enablePolicyException bool
exceptionNamespace string
)
@ -414,13 +276,9 @@ func main() {
flagset.BoolVar(&autoUpdateWebhooks, "autoUpdateWebhooks", true, "Set this flag to 'false' to disable auto-configuration of the webhook.")
flagset.DurationVar(&webhookRegistrationTimeout, "webhookRegistrationTimeout", 120*time.Second, "Timeout for webhook registration, e.g., 30s, 1m, 5m.")
flagset.Func(toggle.ProtectManagedResourcesFlagName, toggle.ProtectManagedResourcesDescription, toggle.ProtectManagedResources.Parse)
flagset.BoolVar(&backgroundScan, "backgroundScan", true, "Enable or disable backgound scan.")
flagset.Func(toggle.ForceFailurePolicyIgnoreFlagName, toggle.ForceFailurePolicyIgnoreDescription, toggle.ForceFailurePolicyIgnore.Parse)
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
flagset.IntVar(&reportsChunkSize, "reportsChunkSize", 1000, "Max number of results in generated reports, reports will be split accordingly if there are more results to be stored.")
flagset.IntVar(&backgroundScanWorkers, "backgroundScanWorkers", backgroundscancontroller.Workers, "Configure the number of background scan workers.")
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
flagset.DurationVar(&backgroundScanInterval, "backgroundScanInterval", time.Hour, "Configure background scan interval.")
flagset.StringVar(&exceptionNamespace, "exceptionNamespace", "", "Configure the namespace to accept PolicyExceptions.")
flagset.BoolVar(&enablePolicyException, "enablePolicyException", false, "Enable PolicyException feature.")
// config
@ -447,7 +305,6 @@ func main() {
kubeClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
leaderElectionClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
kyvernoClient := internal.CreateKyvernoClient(logger, kyvernoclient.WithMetrics(metricsConfig, metrics.KyvernoClient), kyvernoclient.WithTracing())
metadataClient := internal.CreateMetadataClient(logger, metadataclient.WithMetrics(metricsConfig, metrics.KyvernoClient), metadataclient.WithTracing())
dynamicClient := internal.CreateDynamicClient(logger, dynamicclient.WithMetrics(metricsConfig, metrics.KyvernoClient), dynamicclient.WithTracing())
dClient, err := dclient.NewClient(signalCtx, dynamicClient, kubeClient, 15*time.Minute)
if err != nil {
@ -581,20 +438,15 @@ func main() {
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
metadataInformer := metadatainformers.NewSharedInformerFactory(metadataClient, 15*time.Minute)
// create leader controllers
leaderControllers, warmup, err := createrLeaderControllers(
backgroundScan,
admissionReports,
reportsChunkSize,
backgroundScanWorkers,
serverIP,
webhookTimeout,
autoUpdateWebhooks,
kubeInformer,
kubeKyvernoInformer,
kyvernoInformer,
metadataInformer,
kubeClient,
kyvernoClient,
dClient,
@ -605,9 +457,6 @@ func main() {
certRenewer,
runtime,
configMapResolver,
backgroundScanInterval,
enablePolicyException,
exceptionNamespace,
)
if err != nil {
logger.Error(err, "failed to create leader controllers")
@ -618,14 +467,11 @@ func main() {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
internal.StartInformers(signalCtx, metadataInformer)
if !internal.CheckCacheSync(metadataInformer.WaitForCacheSync(signalCtx.Done())) {
// TODO: shall we just exit ?
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
}
if err := warmup(ctx); err != nil {
logger.Error(err, "failed to run warmup")
os.Exit(1)
if warmup != nil {
if err := warmup(ctx); err != nil {
logger.Error(err, "failed to run warmup")
os.Exit(1)
}
}
// start leader controllers
var wg sync.WaitGroup

View file

@ -24,6 +24,18 @@ metadata:
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kyverno-reports-controller
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
namespace: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kyverno
labels:
@ -31698,6 +31710,83 @@ rules:
- watch
- deletecollection
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:reports-controller
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
aggregationRule:
clusterRoleSelectors:
- matchLabels:
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:reports-controller:core
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
rules:
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- watch
- apiGroups:
- kyverno.io
resources:
- admissionreports
- clusteradmissionreports
- backgroundscanreports
- clusterbackgroundscanreports
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- deletecollection
- apiGroups:
- wgpolicyk8s.io
resources:
- policyreports
- policyreports/status
- clusterpolicyreports
- clusterpolicyreports/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- deletecollection
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -31736,6 +31825,25 @@ subjects:
name: kyverno
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:reports-controller
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:reports-controller
subjects:
- kind: ServiceAccount
name: kyverno-reports-controller
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@ -31779,6 +31887,37 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kyverno:reports-controller
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
namespace: kyverno
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kyverno:leaderelection
namespace: kyverno
@ -31832,6 +31971,26 @@ subjects:
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:reports-controller
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
namespace: kyverno
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kyverno:reports-controller
subjects:
- kind: ServiceAccount
name: kyverno-reports-controller
namespace: kyverno
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:leaderelection
namespace: kyverno
@ -31898,6 +32057,29 @@ spec:
---
apiVersion: v1
kind: Service
metadata:
name: kyverno-reports-controller-metrics
namespace: kyverno
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
spec:
ports:
- port: 8000
targetPort: 8000
protocol: TCP
name: metrics-port
selector:
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: kyverno-svc
labels:
@ -32235,3 +32417,92 @@ spec:
path: api-token
expirationSeconds: 600
audience: kyverno-extension
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kyverno-reports-controller
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
namespace: kyverno
spec:
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 40%
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
template:
metadata:
labels:
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: latest
app.kubernetes.io/component: reports-controller
app.kubernetes.io/name: kyverno-reports-controller
app.kubernetes.io/instance: kyverno
spec:
dnsPolicy: ClusterFirst
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- reports-controller
topologyKey: kubernetes.io/hostname
weight: 1
serviceAccountName: kyverno-reports-controller
containers:
- name: controller
image: "ghcr.io/kyverno/reports-controller:latest"
ports:
- containerPort: 9443
name: https
protocol: TCP
- containerPort: 8000
name: metrics
protocol: TCP
args:
- --loggingFormat=text
- --disableMetrics=false
- --otelConfig=prometheus
- --metricsPort=8000
env:
- name: METRICS_CONFIG
value: kyverno-metrics
- name: KYVERNO_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KYVERNO_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault