1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-13 11:18:47 +00:00
kyverno/charts/kyverno/values.yaml

748 lines
23 KiB
YAML
Raw Normal View History

refactor: supress usage of kustomize in build (#5691) * refactor: supress usage of kustomize in build (part 1) Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * e2e Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * e2e Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * e2e Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * clean Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * labels Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * simplify templating flags Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
2022-12-19 16:26:07 +01:00
# -- Internal settings used with `helm template` to generate install manifest
# @ignored
templating:
enabled: false
debug: false
version:
# -- Override the name of the chart
2020-05-08 19:04:40 -07:00
nameOverride:
# -- Override the expanded name of the chart
2020-05-08 19:04:40 -07:00
fullnameOverride:
# -- Namespace the chart deploys to
namespace:
# -- Additional labels
customLabels: {}
2020-05-08 19:04:40 -07:00
rbac:
# -- Create ClusterRoles, ClusterRoleBindings, and ServiceAccount
2020-05-08 19:04:40 -07:00
create: true
serviceAccount:
# -- Create a ServiceAccount
2020-05-08 19:04:40 -07:00
create: true
# -- The ServiceAccount name
2020-05-08 19:04:40 -07:00
name:
# -- Annotations for the ServiceAccount
2020-05-08 19:04:40 -07:00
annotations: {}
# example.com/annotation: value
2020-05-08 19:04:40 -07:00
image:
# -- Image registry
registry:
# If you want to manage the registry you should remove it from the repository
# registry: ghcr.io
# repository: kyverno/kyverno
# -- Image repository
repository: ghcr.io/kyverno/kyverno # kyverno: replaced in e2e tests
# -- Image tag
2020-05-08 19:04:40 -07:00
# Defaults to appVersion in Chart.yaml if omitted
tag: # replaced in e2e tests
# -- Image pull policy
2020-05-08 19:04:40 -07:00
pullPolicy: IfNotPresent
# -- Image pull secrets
2020-05-08 19:04:40 -07:00
pullSecrets: []
# - secretName
2020-05-08 19:04:40 -07:00
initImage:
# -- Image registry
registry:
# If you want to manage the registry you should remove it from the repository
# registry: ghcr.io
# repository: kyverno/kyvernopre
# -- Image repository
repository: ghcr.io/kyverno/kyvernopre # init: replaced in e2e tests
# -- Image tag
2020-05-08 19:04:40 -07:00
# If initImage.tag is missing, defaults to image.tag
tag: # replaced in e2e tests
# -- Image pull policy
2020-05-08 19:04:40 -07:00
# If initImage.pullPolicy is missing, defaults to image.pullPolicy
pullPolicy:
initContainer:
# -- Extra arguments to give to the kyvernopre binary.
extraArgs:
- --loggingFormat=text
- --exceptionNamespace={{ include "kyverno.namespace" . }}
testImage:
# -- Image registry
registry:
# -- Image repository
repository: busybox
# -- Image tag
# Defaults to `latest` if omitted
tag:
# -- Image pull policy
# Defaults to image.pullPolicy if omitted
pullPolicy:
2020-05-08 19:04:40 -07:00
# -- (int) Desired number of pods
replicaCount: ~
2020-05-08 19:04:40 -07:00
# -- Additional labels to add to each pod
2020-05-08 19:04:40 -07:00
podLabels: {}
# example.com/label: foo
2020-05-08 19:04:40 -07:00
# -- Additional annotations to add to each pod
2020-05-08 19:04:40 -07:00
podAnnotations: {}
# example.com/annotation: foo
2020-05-08 19:04:40 -07:00
# -- Security context for the pod
2020-05-08 19:04:40 -07:00
podSecurityContext: {}
# -- Security context for the containers
securityContext:
runAsNonRoot: true
privileged: false
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
Add security context and resource block to test (#4712) * Add security context and resource block to test Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> * Add comment to test security context Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> * Add security context and resource block to other test container Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> * fix: shutdown controllers workers gracefully (#4681) Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com> Signed-off-by: AddilAfzal <> * fix: split webhook handlers per failure policy (#4650) * fix: split webhook handlers per failure policy Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * fix handlers Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * rolling update Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * better error message Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com> Signed-off-by: AddilAfzal <> * refactor: use pod name as leader id (#4680) * refactor: use pod name as leader id Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * fix manifests Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * makefile Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * leader client Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * fix: missing client wrapper (#4703) * fix: missing client wrapper Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * v1beta1 Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * v1alpha2 Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * policy report Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * chore: refactor manifests related makefile targets (#4706) Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * fix: change key used in test (#4718) Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * Add changelog entry Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> * Set user and group on test pod Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> * fix docs Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * Move securityContext back to container level Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> Co-authored-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com> Co-authored-by: AddilAfzal <>
2022-10-03 15:16:14 +01:00
# -- Security context for the test containers
testSecurityContext:
runAsUser: 65534
runAsGroup: 65534
runAsNonRoot: true
privileged: false
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
# -- Optional priority class to be used for kyverno pods
priorityClassName: ''
antiAffinity:
# -- Pod antiAffinities toggle.
# Enabled by default but can be disabled if you want to schedule pods to the same node.
enable: true
# -- Pod anti affinity constraints.
# @default -- See [values.yaml](values.yaml)
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- '{{ template "kyverno.name" . }}'
topologyKey: kubernetes.io/hostname
# -- Pod affinity constraints.
podAffinity: {}
# -- Node affinity constraints.
nodeAffinity: {}
podDisruptionBudget:
# -- Configures the minimum available pods for kyverno disruptions.
# Cannot be used if `maxUnavailable` is set.
minAvailable: 1
# -- Configures the maximum unavailable pods for kyverno disruptions.
# Cannot be used if `minAvailable` is set.
maxUnavailable:
# -- Node labels for pod assignment
2020-05-08 19:04:40 -07:00
nodeSelector: {}
# -- List of node taints to tolerate
2020-05-08 19:04:40 -07:00
tolerations: []
# -- Change `hostNetwork` to `true` when you want the kyverno's pod to share its host's network namespace.
# Useful for situations like when you end up dealing with a custom CNI over Amazon EKS.
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
hostNetwork: false
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
dnsPolicy: ClusterFirst
# -- Env variables for initContainers.
envVarsInit: {}
# -- Env variables for containers.
envVars: {}
# -- Extra arguments to give to the binary.
extraArgs:
- --loggingFormat=text
2020-05-08 19:04:40 -07:00
# -- Array of extra init containers
extraInitContainers: []
# Example:
# - name: init-container
# image: busybox
# command: ['sh', '-c', 'echo Hello']
# -- Array of extra containers to run alongside kyverno
extraContainers: []
# Example:
# - name: myapp-container
# image: busybox
# command: ['sh', '-c', 'echo Hello && sleep 3600']
# -- Image pull secrets for image verify and imageData policies.
# This will define the `--imagePullSecrets` Kyverno argument.
imagePullSecrets: {}
# Define two image pull secrets
# imagePullSecrets:
# regcred:
# registry: foo.example.com
# username: foobar
# password: secret
# regcred2:
# registry: bar.example.com
# username: barbaz
# password: secret2
# -- Existing Image pull secrets for image verify and imageData policies.
# This will define the `--imagePullSecrets` Kyverno argument.
existingImagePullSecrets: []
# Define an existing image pull secret
# existingImagePullSecrets:
# - test-registry
# - other-test-registry
2020-05-08 19:04:40 -07:00
resources:
# -- Pod resource limits
limits:
memory: 384Mi
# -- Pod resource requests
requests:
cpu: 100m
memory: 128Mi
2020-05-08 19:04:40 -07:00
initResources:
# -- Pod resource limits
limits:
cpu: 100m
memory: 256Mi
# -- Pod resource requests
requests:
cpu: 10m
memory: 64Mi
Add security context and resource block to test (#4712) * Add security context and resource block to test Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> * Add comment to test security context Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> * Add security context and resource block to other test container Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> * fix: shutdown controllers workers gracefully (#4681) Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com> Signed-off-by: AddilAfzal <> * fix: split webhook handlers per failure policy (#4650) * fix: split webhook handlers per failure policy Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * fix handlers Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * rolling update Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * better error message Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com> Signed-off-by: AddilAfzal <> * refactor: use pod name as leader id (#4680) * refactor: use pod name as leader id Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * fix manifests Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * makefile Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * leader client Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * fix: missing client wrapper (#4703) * fix: missing client wrapper Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * v1beta1 Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * v1alpha2 Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * policy report Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * chore: refactor manifests related makefile targets (#4706) Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * fix: change key used in test (#4718) Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <> * Add changelog entry Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> * Set user and group on test pod Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> * fix docs Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * Move securityContext back to container level Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: aaz10 <6935594+AddilAfzal@users.noreply.github.com> Signed-off-by: AddilAfzal <> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Signed-off-by: AddilAfzal <6935594+AddilAfzal@users.noreply.github.com> Co-authored-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com> Co-authored-by: AddilAfzal <>
2022-10-03 15:16:14 +01:00
testResources:
# -- Pod resource limits
limits:
cpu: 100m
memory: 256Mi
# -- Pod resource requests
requests:
cpu: 10m
memory: 64Mi
# -- Startup probe.
# The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# @default -- See [values.yaml](values.yaml)
startupProbe:
httpGet:
path: /health/liveness
port: 9443
scheme: HTTPS
failureThreshold: 20
initialDelaySeconds: 2
periodSeconds: 6
# -- Liveness probe.
# The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# @default -- See [values.yaml](values.yaml)
2020-05-08 19:04:40 -07:00
livenessProbe:
httpGet:
path: /health/liveness
port: 9443
scheme: HTTPS
initialDelaySeconds: 15
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 2
successThreshold: 1
2020-05-08 19:04:40 -07:00
# -- Readiness Probe.
# The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# @default -- See [values.yaml](values.yaml)
2020-05-08 19:04:40 -07:00
readinessProbe:
httpGet:
path: /health/readiness
port: 9443
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
2020-05-08 19:04:40 -07:00
# -- Additional resources to be added to controller RBAC permissions.
generatecontrollerExtraResources: []
2020-05-08 19:04:40 -07:00
# - ResourceA
# - ResourceB
# -- Exclude Kyverno namespace
# Determines if default Kyverno namespace exclusion is enabled for webhooks and resourceFilters
excludeKyvernoNamespace: true
# -- resourceFilter namespace exclude
# Namespaces to exclude from the default resourceFilters
resourceFiltersExcludeNamespaces: []
2020-05-08 19:04:40 -07:00
config:
# -- The registry hostname used for the image mutation.
defaultRegistry: docker.io
# -- Enable registry mutation for container images. Enabled by default.
enableDefaultRegistryMutation: true
# -- Resource types to be skipped by the Kyverno policy engine.
# Make sure to surround each entry in quotes so that it doesn't get parsed as a nested YAML list.
# These are joined together without spaces, run through `tpl`, and the result is set in the config map.
# @default -- See [values.yaml](values.yaml)
2020-05-08 19:04:40 -07:00
resourceFilters:
- '[Event,*,*]'
- '[*,kube-system,*]'
- '[*,kube-public,*]'
- '[*,kube-node-lease,*]'
- '[Node,*,*]'
- '[APIService,*,*]'
- '[TokenReview,*,*]'
- '[SubjectAccessReview,*,*]'
- '[SelfSubjectAccessReview,*,*]'
- '[Binding,*,*]'
- '[ReplicaSet,*,*]'
- '[AdmissionReport,*,*]'
- '[ClusterAdmissionReport,*,*]'
- '[BackgroundScanReport,*,*]'
- '[ClusterBackgroundScanReport,*,*]'
# exclude resources from the chart
- '[ClusterRole,*,{{ template "kyverno.fullname" . }}:*]'
- '[ClusterRoleBinding,*,{{ template "kyverno.fullname" . }}:*]'
- '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.serviceAccountName" . }}]'
- '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.configMapName" . }}]'
- '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.metricsConfigMapName" . }}]'
- '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}]'
- '[Job,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]'
- '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}]'
- '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}]'
- '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}:*]'
- '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}:*]'
- '[Secret,{{ include "kyverno.namespace" . }},{{ template "kyverno.serviceName" . }}.{{ template "kyverno.namespace" . }}.svc.*]'
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.serviceName" . }}]'
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.serviceName" . }}-metrics]'
- '[ServiceMonitor,{{ if .Values.serviceMonitor.namespace }}{{ .Values.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.serviceName" . }}-service-monitor]'
- '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-test]'
# -- Name of an existing config map (ignores default/provided resourceFilters)
existingConfig: ''
Enable adding annotations to configmaps in the helm chart (#4984) * fix: add user info in admission request logs (#4969) Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> * fix: missing autogen rules in status (#4971) Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> * fix: config reloading not working correctly (#4951) * fix: config reloading not working correctly Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * nits Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> * enable adding annotations to configmaps in the helm chart Signed-off-by: Brian Provenzano <bproven@gmail.com> * add entry to artifacthub.io release notes in Chart.yaml Signed-off-by: Brian Provenzano <bproven@gmail.com> * change name of annotation keys; codegen the readme docs Signed-off-by: Brian Provenzano <bproven@gmail.com> * feat: Add container registry setting on Helm Chart (#4281) To make the customization of the container registries easier, eg. a custom private registry, this change adds a new property on the images configuration to allow setting a custom image registry without needing to customize the repository of the image. Signed-off-by: Samuel Torres <samuelpirestorres@gmail.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> Signed-off-by: Samuel Torres <samuelpirestorres@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Samuel Torres <samuelpirestorres@gmail.com>
2022-10-18 00:52:57 -06:00
# -- Additional annotations to add to the configmap
annotations: {}
# example.com/annotation: foo
# -- Exclude group role
excludeGroupRole:
# - ''
# -- Exclude username
excludeUsername:
# - ''
# -- Defines the `namespaceSelector` in the webhook configurations.
# Note that it takes a list of `namespaceSelector` and/or `objectSelector` in the JSON format, and only the first element
# will be forwarded to the webhook configurations.
# The Kyverno namespace is excluded if `excludeKyvernoNamespace` is `true` (default)
webhooks:
# Exclude namespaces
# - namespaceSelector:
# matchExpressions:
# - key: kubernetes.io/metadata.name
# operator: NotIn
# values:
# - kube-system
# - kyverno
# Exclude objects
# - objectSelector:
# matchExpressions:
# - key: webhooks.kyverno.io/exclude
# operator: DoesNotExist
# -- Generate success events.
generateSuccessEvents: false
# -- Metrics config.
metricsConfig:
Enable adding annotations to configmaps in the helm chart (#4984) * fix: add user info in admission request logs (#4969) Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> * fix: missing autogen rules in status (#4971) Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> * fix: config reloading not working correctly (#4951) * fix: config reloading not working correctly Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * nits Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> * enable adding annotations to configmaps in the helm chart Signed-off-by: Brian Provenzano <bproven@gmail.com> * add entry to artifacthub.io release notes in Chart.yaml Signed-off-by: Brian Provenzano <bproven@gmail.com> * change name of annotation keys; codegen the readme docs Signed-off-by: Brian Provenzano <bproven@gmail.com> * feat: Add container registry setting on Helm Chart (#4281) To make the customization of the container registries easier, eg. a custom private registry, this change adds a new property on the images configuration to allow setting a custom image registry without needing to customize the repository of the image. Signed-off-by: Samuel Torres <samuelpirestorres@gmail.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Brian Provenzano <bproven@gmail.com> Signed-off-by: Samuel Torres <samuelpirestorres@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> Co-authored-by: Samuel Torres <samuelpirestorres@gmail.com>
2022-10-18 00:52:57 -06:00
# -- Additional annotations to add to the metricsconfigmap
annotations: {}
# example.com/annotation: foo
namespaces: {
"include": [],
"exclude": []
}
# 'namespaces.include': list of namespaces to capture metrics for. Default: metrics being captured for all namespaces except excludeNamespaces.
# 'namespaces.exclude': list of namespaces to NOT capture metrics for. Default: []
# metricsRefreshInterval: 24h
# rate at which metrics should reset so as to clean up the memory footprint of kyverno metrics, if you might be expecting high memory footprint of Kyverno's metrics. Default: 0, no refresh of metrics
# Or provide an existing metrics config-map by uncommenting the below line
# existingMetricsConfig: sample-metrics-configmap. Refer to the ./templates/metricsconfigmap.yaml for the structure of metrics configmap.
# -- Deployment update strategy.
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
# @default -- See [values.yaml](values.yaml)
updateStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 40%
type: RollingUpdate
2020-05-08 19:04:40 -07:00
service:
# -- Service port.
2020-05-08 19:04:40 -07:00
port: 443
# -- Service type.
2020-05-08 19:04:40 -07:00
type: ClusterIP
# -- Service node port.
# Only used if `service.type` is `NodePort`.
2020-05-08 19:04:40 -07:00
nodePort:
# -- Service annotations.
annotations: {}
# -- Topology spread constraints.
topologySpreadConstraints: []
metricsService:
# -- Create service.
create: true
# -- Service port.
# Kyverno's metrics server will be exposed at this port.
port: 8000
# -- Service type.
type: ClusterIP
# -- Service node port.
# Only used if `metricsService.type` is `NodePort`.
nodePort:
# -- Service annotations.
2020-05-08 19:04:40 -07:00
annotations: {}
serviceMonitor:
# -- Create a `ServiceMonitor` to collect Prometheus metrics.
enabled: false
# -- Additional labels
additionalLabels:
# key: value
# -- Override namespace (default is the same as kyverno)
namespace:
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
# -- Is TLS required for endpoint
secure: false
# -- TLS Configuration for endpoint
tlsConfig: {}
# -- Kyverno requires a certificate key pair and corresponding certificate authority
2020-05-08 19:04:40 -07:00
# to properly register its webhooks. This can be done in one of 3 ways:
# 1) Use kube-controller-manager to generate a CA-signed certificate (preferred)
# 2) Provide your own CA and cert.
# In this case, you will need to create a certificate with a specific name and data structure.
# As long as you follow the naming scheme, it will be automatically picked up.
# kyverno-svc.(namespace).svc.kyverno-tls-ca (with data entries named tls.key and tls.crt)
2020-09-29 16:15:27 -07:00
# kyverno-svc.kyverno.svc.kyverno-tls-pair (with data entries named tls.key and tls.crt)
2020-05-08 19:04:40 -07:00
# 3) Let Helm generate a self signed cert, by setting createSelfSignedCert true
# If letting Kyverno create its own CA or providing your own, make createSelfSignedCert is false
createSelfSignedCert: false
# -- Whether to have Helm install the Kyverno CRDs.
# If the CRDs are not installed by Helm, they must be added before policies can be created.
installCRDs: true
crds:
# -- Additional CRDs annotations.
annotations: {}
# argocd.argoproj.io/sync-options: Replace=true
# strategy.spinnaker.io/replace: 'true'
networkPolicy:
# -- When true, use a NetworkPolicy to allow ingress to the webhook
# This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup.
enabled: false
# -- A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies.
ingressFrom: []
webhooksCleanup:
# -- Create a helm pre-delete hook to cleanup webhooks.
enabled: false
# -- `kubectl` image to run commands for deleting webhooks.
image: bitnami/kubectl:latest
# -- A writable volume to use for the TUF root initialization.
tufRootMountPath: /.sigstore
grafana:
# -- Enable grafana dashboard creation.
enabled: false
# -- Namespace to create the grafana dashboard configmap.
# If not set, it will be created in the same namespace where the chart is deployed.
namespace:
# -- Grafana dashboard configmap annotations.
annotations: {}
cleanupController:
# -- Enable cleanup controller.
enabled: true
rbac:
# -- Create RBAC resources
create: true
serviceAccount:
# -- Service account name
name:
clusterRole:
# -- Extra resource permissions to add in the cluster role
extraResources: []
# - apiGroups:
# - ''
# resources:
# - pods
# -- Create self-signed certificates at deployment time.
# The certificates won't be automatically renewed if this is set to `true`.
createSelfSignedCert: false
image:
# -- Image registry
registry:
# If you want to manage the registry you should remove it from the repository
# registry: ghcr.io
# repository: kyverno/kyverno
# -- Image repository
repository: ghcr.io/kyverno/cleanup-controller # kyverno: replaced in e2e tests
# -- Image tag
# Defaults to appVersion in Chart.yaml if omitted
tag: # replaced in e2e tests
# -- Image pull policy
pullPolicy: IfNotPresent
# -- Image pull secrets
pullSecrets: []
# - secretName
# -- (int) Desired number of pods
replicas: ~
# -- Deployment update strategy.
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
# @default -- See [values.yaml](values.yaml)
updateStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 40%
type: RollingUpdate
# -- Optional priority class
priorityClassName: ''
# -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace.
# Useful for situations like when you end up dealing with a custom CNI over Amazon EKS.
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
hostNetwork: false
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
dnsPolicy: ClusterFirst
# -- Extra arguments passed to the container on the command line
extraArgs: []
resources:
# -- Pod resource limits
limits:
memory: 128Mi
# -- Pod resource requests
requests:
cpu: 100m
memory: 64Mi
# -- Startup probe.
# The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# @default -- See [values.yaml](values.yaml)
startupProbe:
httpGet:
path: /health/liveness
port: 9443
scheme: HTTPS
failureThreshold: 20
initialDelaySeconds: 2
periodSeconds: 6
# -- Liveness probe.
# The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# @default -- See [values.yaml](values.yaml)
livenessProbe:
httpGet:
path: /health/liveness
port: 9443
scheme: HTTPS
initialDelaySeconds: 15
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 2
successThreshold: 1
# -- Readiness Probe.
# The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# @default -- See [values.yaml](values.yaml)
readinessProbe:
httpGet:
path: /health/readiness
port: 9443
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
# -- Node labels for pod assignment
nodeSelector: {}
# -- List of node taints to tolerate
tolerations: []
antiAffinity:
# -- Pod antiAffinities toggle.
# Enabled by default but can be disabled if you want to schedule pods to the same node.
enabled: true
# -- Pod anti affinity constraints.
# @default -- See [values.yaml](values.yaml)
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- cleanup-controller
topologyKey: kubernetes.io/hostname
# -- Pod affinity constraints.
podAffinity: {}
# -- Node affinity constraints.
nodeAffinity: {}
# -- Topology spread constraints.
topologySpreadConstraints: []
# -- Security context for the pod
podSecurityContext: {}
# -- Security context for the containers
securityContext:
runAsNonRoot: true
privileged: false
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
podDisruptionBudget:
# -- Configures the minimum available pods for disruptions.
# Cannot be used if `maxUnavailable` is set.
minAvailable: 1
# -- Configures the maximum unavailable pods for disruptions.
# Cannot be used if `minAvailable` is set.
maxUnavailable:
service:
# -- Service port.
port: 443
# -- Service type.
type: ClusterIP
# -- Service node port.
# Only used if `service.type` is `NodePort`.
nodePort:
# -- Service annotations.
annotations: {}
metricsService:
# -- Create service.
create: true
# -- Service port.
# Metrics server will be exposed at this port.
port: 8000
# -- Service type.
type: ClusterIP
# -- Service node port.
# Only used if `metricsService.type` is `NodePort`.
nodePort:
# -- Service annotations.
annotations: {}
serviceMonitor:
# -- Create a `ServiceMonitor` to collect Prometheus metrics.
enabled: false
# -- Additional labels
additionalLabels:
# key: value
# -- Override namespace (default is the same as kyverno)
namespace:
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
# -- Is TLS required for endpoint
secure: false
# -- TLS Configuration for endpoint
tlsConfig: {}
tracing:
# -- Enable tracing
enabled: false
# -- Traces receiver address
address:
# -- Traces receiver port
port:
# -- Traces receiver credentials
creds: ''
logging:
# -- Logging format
format: text
metering:
# -- Disable metrics export
disabled: false
# -- Otel configuration, can be `prometheus` or `grpc`
config: prometheus
# -- Prometheus endpoint port
port: 8000
# -- Otel collector endpoint
collector: ''
# -- Otel collector credentials
creds: ''