mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
feat: improve cleanup policies controller and chart (#5628)
* feat: improve cleanup policies controller Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * helm improvements Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * todo Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
parent
99d988e98c
commit
d16bdba1d4
10 changed files with 368 additions and 63 deletions
|
@ -214,26 +214,41 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| grafana.enabled | bool | `false` | Enable grafana dashboard creation. |
|
||||
| grafana.namespace | string | `nil` | Namespace to create the grafana dashboard configmap. If not set, it will be created in the same namespace where the chart is deployed. |
|
||||
| grafana.annotations | object | `{}` | Grafana dashboard configmap annotations. |
|
||||
| cleanupController.enabled | bool | `true` | Enable cleanup controller. |
|
||||
| cleanupController.rbac.create | bool | `true` | Create RBAC resources |
|
||||
| cleanupController.rbac.serviceAccount.name | string | `nil` | Service account name |
|
||||
| cleanupController.rbac.clusterRole.extraResources | list | `[]` | Extra resource permissions to add in the cluster role |
|
||||
| cleanupController.enabled | bool | `true` | Enable cleanup controller. |
|
||||
| cleanupController.image.registry | string | `nil` | Image registry |
|
||||
| cleanupController.image.repository | string | `"ghcr.io/kyverno/cleanup-controller"` | Image repository |
|
||||
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
|
||||
| cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
|
||||
| cleanupController.image.pullSecrets | list | `[]` | Image pull secrets |
|
||||
| cleanupController.replicas | int | `nil` | Desired number of pods |
|
||||
| cleanupController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy |
|
||||
| cleanupController.priorityClassName | string | `""` | Optional priority class |
|
||||
| cleanupController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
|
||||
| cleanupController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
|
||||
| cleanupController.args | list | `["--cleanupService=https://{{ template \"kyverno.cleanup-controller.deploymentName\" . }}.{{ template \"kyverno.namespace\" . }}.svc"]` | Arguments passed to the container on the command line |
|
||||
| cleanupController.service.port | int | `443` | Service port. |
|
||||
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |
|
||||
| cleanupController.service.annotations | object | `{}` | Service annotations. |
|
||||
| cleanupController.resources.limits | object | `{"memory":"128Mi"}` | Pod resource limits |
|
||||
| cleanupController.resources.requests | object | `{"cpu":"100m","memory":"64Mi"}` | Pod resource requests |
|
||||
| cleanupController.startupProbe | object | See [values.yaml](values.yaml) | Startup probe. The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||
| cleanupController.livenessProbe | object | See [values.yaml](values.yaml) | Liveness probe. The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||
| cleanupController.readinessProbe | object | See [values.yaml](values.yaml) | Readiness Probe. The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||
| cleanupController.nodeSelector | object | `{}` | Node labels for pod assignment |
|
||||
| cleanupController.tolerations | list | `[]` | List of node taints to tolerate |
|
||||
| cleanupController.antiAffinity.enabled | bool | `true` | Pod antiAffinities toggle. Enabled by default but can be disabled if you want to schedule pods to the same node. |
|
||||
| cleanupController.podAntiAffinity | object | See [values.yaml](values.yaml) | Pod anti affinity constraints. |
|
||||
| cleanupController.podAffinity | object | `{}` | Pod affinity constraints. |
|
||||
| cleanupController.nodeAffinity | object | `{}` | Node affinity constraints. |
|
||||
| cleanupController.topologySpreadConstraints | list | `[]` | Topology spread constraints. |
|
||||
| cleanupController.podSecurityContext | object | `{}` | Security context for the pod |
|
||||
| cleanupController.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the containers |
|
||||
| cleanupController.podDisruptionBudget.minAvailable | int | `1` | Configures the minimum available pods for disruptions. Cannot be used if `maxUnavailable` is set. |
|
||||
| cleanupController.podDisruptionBudget.maxUnavailable | string | `nil` | Configures the maximum unavailable pods for disruptions. Cannot be used if `minAvailable` is set. |
|
||||
| cleanupController.service.port | int | `443` | Service port. |
|
||||
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
|
||||
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |
|
||||
| cleanupController.service.annotations | object | `{}` | Service annotations. |
|
||||
|
||||
## TLS Configuration
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
|||
{{- end -}}
|
||||
|
||||
{{/* Create the default PodDisruptionBudget to use */}}
|
||||
{{- define "podDisruptionBudget.spec" -}}
|
||||
{{- define "kyverno.podDisruptionBudget.spec" -}}
|
||||
{{- if and .Values.podDisruptionBudget.minAvailable .Values.podDisruptionBudget.maxUnavailable }}
|
||||
{{- fail "Cannot set both .Values.podDisruptionBudget.minAvailable and .Values.podDisruptionBudget.maxUnavailable" -}}
|
||||
{{- end }}
|
||||
|
|
|
@ -10,7 +10,7 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/name: {{ template "kyverno.name" . }}
|
||||
app.kubernetes.io/part-of: {{ template "kyverno.name" . }}
|
||||
app.kubernetes.io/version: "{{ .Chart.Version }}"
|
||||
app.kubernetes.io/version: "{{ .Chart.Version | replace "+" "_" }}"
|
||||
helm.sh/chart: {{ template "kyverno.chart" . }}
|
||||
{{- end -}}
|
||||
|
||||
|
@ -36,3 +36,25 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
|||
{{ required "A service account name is required when `rbac.create` is set to `false`" .Values.cleanupController.rbac.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kyverno.cleanup-controller.securityContext" -}}
|
||||
{{- if semverCompare "<1.19" .Capabilities.KubeVersion.Version }}
|
||||
{{ toYaml (omit .Values.cleanupController.securityContext "seccompProfile") }}
|
||||
{{- else }}
|
||||
{{ toYaml .Values.cleanupController.securityContext }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* Create the default PodDisruptionBudget to use */}}
|
||||
{{- define "kyverno.cleanup-controller.podDisruptionBudget.spec" -}}
|
||||
{{- if and .Values.cleanupController.podDisruptionBudget.minAvailable .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||
{{- fail "Cannot set both .Values.cleanupController.podDisruptionBudget.minAvailable and .Values.cleanupController.podDisruptionBudget.maxUnavailable" -}}
|
||||
{{- end }}
|
||||
{{- if not .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||
minAvailable: {{ default 1 .Values.cleanupController.podDisruptionBudget.minAvailable }}
|
||||
{{- end }}
|
||||
{{- if .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||
maxUnavailable: {{ .Values.cleanupController.podDisruptionBudget.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
|
|
@ -7,7 +7,11 @@ metadata:
|
|||
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
|
||||
namespace: {{ template "kyverno.namespace" . }}
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: {{ .Values.cleanupController.replicas }}
|
||||
{{- with .Values.cleanupController.updateStrategy }}
|
||||
strategy:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kyverno.cleanup-controller.matchLabels" . | nindent 6 }}
|
||||
|
@ -16,6 +20,14 @@ spec:
|
|||
labels:
|
||||
{{- include "kyverno.cleanup-controller.labels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.cleanupController.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||
|
@ -28,6 +40,32 @@ spec:
|
|||
topologySpreadConstraints:
|
||||
{{- tpl (toYaml .) $ | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.priorityClassName }}
|
||||
priorityClassName: {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.hostNetwork }}
|
||||
hostNetwork: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.dnsPolicy }}
|
||||
dnsPolicy: {{ . }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cleanupController.antiAffinity.enabled .Values.cleanupController.podAffinity .Values.cleanupController.nodeAffinity }}
|
||||
affinity:
|
||||
{{- if .Values.cleanupController.antiAffinity.enabled }}
|
||||
{{- with .Values.cleanupController.podAntiAffinity }}
|
||||
podAntiAffinity:
|
||||
{{- tpl (toYaml .) $ | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.podAffinity }}
|
||||
podAffinity:
|
||||
{{- tpl (toYaml .) $ | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.nodeAffinity }}
|
||||
nodeAffinity:
|
||||
{{- tpl (toYaml .) $ | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "kyverno.cleanup-controller.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: controller
|
||||
|
@ -44,10 +82,18 @@ spec:
|
|||
{{- tpl (toYaml .) $ | nindent 12 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: METRICS_CONFIG
|
||||
value: {{ template "kyverno.metricsConfigMapName" . }}
|
||||
- name: KYVERNO_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.cleanupController.resources }}
|
||||
resources: {{ tpl (toYaml .) $ | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.cleanupController.securityContext }}
|
||||
securityContext: {{ include "kyverno.cleanup-controller.securityContext" . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.cleanupController.startupProbe }}
|
||||
startupProbe:
|
||||
{{- tpl (toYaml .) $ | nindent 12 }}
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
{{- if .Values.cleanupController.enabled -}}
|
||||
{{- if (gt (int .Values.cleanupController.replicas) 1) -}}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
|
||||
apiVersion: policy/v1
|
||||
{{- else -}}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
|
||||
labels:
|
||||
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
|
||||
namespace: {{ template "kyverno.namespace" . }}
|
||||
spec:
|
||||
{{- include "kyverno.cleanup-controller.podDisruptionBudget.spec" . | indent 2 }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kyverno.cleanup-controller.matchLabels" . | nindent 6 }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -7,11 +7,12 @@ apiVersion: policy/v1beta1
|
|||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "kyverno.fullname" . }}
|
||||
labels: {{ include "kyverno.labels" . | nindent 4 }}
|
||||
labels:
|
||||
{{- include "kyverno.labels" . | nindent 4 }}
|
||||
app: kyverno
|
||||
namespace: {{ template "kyverno.namespace" . }}
|
||||
spec:
|
||||
{{- include "podDisruptionBudget.spec" . | indent 2 }}
|
||||
{{- include "kyverno.podDisruptionBudget.spec" . | indent 2 }}
|
||||
selector:
|
||||
matchLabels: {{ include "kyverno.matchLabels" . | nindent 6 }}
|
||||
app: kyverno
|
||||
|
|
|
@ -477,6 +477,10 @@ grafana:
|
|||
annotations: {}
|
||||
|
||||
cleanupController:
|
||||
|
||||
# -- Enable cleanup controller.
|
||||
enabled: true
|
||||
|
||||
rbac:
|
||||
# -- Create RBAC resources
|
||||
create: true
|
||||
|
@ -493,10 +497,6 @@ cleanupController:
|
|||
# resources:
|
||||
# - pods
|
||||
|
||||
|
||||
# -- Enable cleanup controller.
|
||||
enabled: true
|
||||
|
||||
image:
|
||||
# -- Image registry
|
||||
registry:
|
||||
|
@ -514,10 +514,144 @@ cleanupController:
|
|||
pullSecrets: []
|
||||
# - secretName
|
||||
|
||||
# -- (int) Desired number of pods
|
||||
replicas: ~
|
||||
|
||||
# -- Deployment update strategy.
|
||||
# Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 40%
|
||||
type: RollingUpdate
|
||||
|
||||
# -- Optional priority class
|
||||
priorityClassName: ''
|
||||
|
||||
# -- Change `hostNetwork` to `true` when you want the pod to share its host's network namespace.
|
||||
# Useful for situations like when you end up dealing with a custom CNI over Amazon EKS.
|
||||
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
|
||||
hostNetwork: false
|
||||
|
||||
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
|
||||
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
|
||||
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
|
||||
dnsPolicy: ClusterFirst
|
||||
|
||||
# -- Arguments passed to the container on the command line
|
||||
args:
|
||||
- --cleanupService=https://{{ template "kyverno.cleanup-controller.deploymentName" . }}.{{ template "kyverno.namespace" . }}.svc
|
||||
|
||||
resources:
|
||||
# -- Pod resource limits
|
||||
limits:
|
||||
memory: 128Mi
|
||||
# -- Pod resource requests
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
|
||||
# -- Startup probe.
|
||||
# The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /health/liveness
|
||||
port: 9443
|
||||
scheme: HTTPS
|
||||
failureThreshold: 20
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 6
|
||||
|
||||
# -- Liveness probe.
|
||||
# The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health/liveness
|
||||
port: 9443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 2
|
||||
successThreshold: 1
|
||||
|
||||
# -- Readiness Probe.
|
||||
# The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health/readiness
|
||||
port: 9443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 6
|
||||
successThreshold: 1
|
||||
|
||||
# -- Node labels for pod assignment
|
||||
nodeSelector: {}
|
||||
|
||||
# -- List of node taints to tolerate
|
||||
tolerations: []
|
||||
|
||||
antiAffinity:
|
||||
# -- Pod antiAffinities toggle.
|
||||
# Enabled by default but can be disabled if you want to schedule pods to the same node.
|
||||
enabled: true
|
||||
|
||||
# -- Pod anti affinity constraints.
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values:
|
||||
- cleanup-controller
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# -- Pod affinity constraints.
|
||||
podAffinity: {}
|
||||
|
||||
# -- Node affinity constraints.
|
||||
nodeAffinity: {}
|
||||
|
||||
# -- Topology spread constraints.
|
||||
topologySpreadConstraints: []
|
||||
|
||||
# -- Security context for the pod
|
||||
podSecurityContext: {}
|
||||
|
||||
# -- Security context for the containers
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
podDisruptionBudget:
|
||||
# -- Configures the minimum available pods for disruptions.
|
||||
# Cannot be used if `maxUnavailable` is set.
|
||||
minAvailable: 1
|
||||
# -- Configures the maximum unavailable pods for disruptions.
|
||||
# Cannot be used if `minAvailable` is set.
|
||||
maxUnavailable:
|
||||
|
||||
service:
|
||||
# -- Service port.
|
||||
port: 443
|
||||
|
@ -528,30 +662,3 @@ cleanupController:
|
|||
nodePort:
|
||||
# -- Service annotations.
|
||||
annotations: {}
|
||||
|
||||
# -- Startup probe.
|
||||
# The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
startupProbe: {}
|
||||
|
||||
# -- Liveness probe.
|
||||
# The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
livenessProbe: {}
|
||||
|
||||
# -- Readiness Probe.
|
||||
# The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
# @default -- See [values.yaml](values.yaml)
|
||||
readinessProbe: {}
|
||||
|
||||
# -- Node labels for pod assignment
|
||||
nodeSelector: {}
|
||||
|
||||
# -- List of node taints to tolerate
|
||||
tolerations: []
|
||||
|
||||
# -- Topology spread constraints.
|
||||
topologySpreadConstraints: []
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
)
|
||||
|
@ -24,10 +25,34 @@ const (
|
|||
resyncPeriod = 15 * time.Minute
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// - implement probes
|
||||
// - better certs management
|
||||
// - supports certs in cronjob
|
||||
// - leader election support
|
||||
// - helm service monitor
|
||||
// - helm name and fullname
|
||||
// - helm review labels / selectors
|
||||
// - helm metrics service
|
||||
|
||||
type probes struct{}
|
||||
|
||||
func (probes) IsReady() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (probes) IsLive() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func main() {
|
||||
var cleanupService string
|
||||
var (
|
||||
cleanupService string
|
||||
dumpPayload bool
|
||||
)
|
||||
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
|
||||
flagset.StringVar(&cleanupService, "cleanupService", "https://cleanup-controller.kyverno.svc", "The url to join the cleanup service.")
|
||||
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
|
||||
// config
|
||||
appConfig := internal.NewConfiguration(
|
||||
internal.WithProfiling(),
|
||||
|
@ -91,6 +116,11 @@ func main() {
|
|||
},
|
||||
admissionHandlers.Validate,
|
||||
cleanupHandlers.Cleanup,
|
||||
metricsConfig,
|
||||
webhooks.DebugModeOptions{
|
||||
DumpPayload: dumpPayload,
|
||||
},
|
||||
probes{},
|
||||
)
|
||||
// start server
|
||||
server.Run(ctx.Done())
|
||||
|
|
|
@ -8,8 +8,11 @@ import (
|
|||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
|
||||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -37,41 +40,57 @@ type (
|
|||
CleanupHandler = func(context.Context, logr.Logger, string, time.Time) error
|
||||
)
|
||||
|
||||
type Probes interface {
|
||||
IsReady() bool
|
||||
IsLive() bool
|
||||
}
|
||||
|
||||
// NewServer creates new instance of server accordingly to given configuration
|
||||
func NewServer(
|
||||
tlsProvider TlsProvider,
|
||||
validationHandler ValidationHandler,
|
||||
cleanupHandler CleanupHandler,
|
||||
metricsConfig metrics.MetricsConfigManager,
|
||||
debugModeOpts webhooks.DebugModeOptions,
|
||||
probes Probes,
|
||||
) Server {
|
||||
policyLogger := logging.WithName("cleanup-policy")
|
||||
cleanupLogger := logging.WithName("cleanup")
|
||||
cleanupHandlerFunc := func(w http.ResponseWriter, r *http.Request) {
|
||||
policy := r.URL.Query().Get("policy")
|
||||
logger := cleanupLogger.WithValues("policy", policy)
|
||||
err := cleanupHandler(r.Context(), logger, policy, time.Now())
|
||||
if err == nil {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
if apierrors.IsNotFound(err) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
mux := httprouter.New()
|
||||
mux.HandlerFunc(
|
||||
"POST",
|
||||
validatingWebhookServicePath,
|
||||
handlers.FromAdmissionFunc("VALIDATE", validationHandler).
|
||||
WithDump(debugModeOpts.DumpPayload).
|
||||
WithSubResourceFilter().
|
||||
WithMetrics(policyLogger, metricsConfig.Config(), metrics.WebhookValidating).
|
||||
WithAdmission(policyLogger.WithName("validate")).
|
||||
ToHandlerFunc(),
|
||||
)
|
||||
mux.HandlerFunc(
|
||||
"GET",
|
||||
cleanup.CleanupServicePath,
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
policy := r.URL.Query().Get("policy")
|
||||
logger := cleanupLogger.WithValues("policy", policy)
|
||||
err := cleanupHandler(r.Context(), logger, policy, time.Now())
|
||||
if err == nil {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
if apierrors.IsNotFound(err) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
},
|
||||
handlers.HttpHandler(cleanupHandlerFunc).
|
||||
WithMetrics(policyLogger).
|
||||
WithTrace("CLEANUP").
|
||||
ToHandlerFunc(),
|
||||
)
|
||||
mux.HandlerFunc("GET", config.LivenessServicePath, handlers.Probe(probes.IsLive))
|
||||
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(probes.IsReady))
|
||||
return &server{
|
||||
server: &http.Server{
|
||||
Addr: ":9443",
|
||||
|
|
|
@ -2,6 +2,7 @@ package handlers
|
|||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -11,6 +12,7 @@ import (
|
|||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
|
@ -20,14 +22,14 @@ func (inner AdmissionHandler) WithMetrics(logger logr.Logger, metricsConfig conf
|
|||
|
||||
func (inner AdmissionHandler) withMetrics(logger logr.Logger, metricsConfig config.MetricsConfiguration, attrs ...attribute.KeyValue) AdmissionHandler {
|
||||
meter := global.MeterProvider().Meter(metrics.MeterName)
|
||||
admissionRequestsMetric, err := meter.SyncInt64().Counter(
|
||||
requestsMetric, err := meter.SyncInt64().Counter(
|
||||
"kyverno_admission_requests_total",
|
||||
instrument.WithDescription("can be used to track the number of admission requests encountered by Kyverno in the cluster"),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to create instrument, kyverno_admission_requests_total")
|
||||
}
|
||||
admissionReviewDurationMetric, err := meter.SyncFloat64().Histogram(
|
||||
durationMetric, err := meter.SyncFloat64().Histogram(
|
||||
"kyverno_admission_review_duration_seconds",
|
||||
instrument.WithDescription("can be used to track the latencies (in seconds) associated with the entire individual admission review. For example, if an incoming request trigger, say, five policies, this metric will track the e2e latency associated with the execution of all those policies"),
|
||||
)
|
||||
|
@ -50,17 +52,60 @@ func (inner AdmissionHandler) withMetrics(logger logr.Logger, metricsConfig conf
|
|||
attribute.Bool("request_allowed", allowed),
|
||||
}
|
||||
attributes = append(attributes, attrs...)
|
||||
if admissionReviewDurationMetric != nil {
|
||||
if durationMetric != nil {
|
||||
defer func() {
|
||||
latency := int64(time.Since(startTime))
|
||||
admissionReviewLatencyDurationInSeconds := float64(latency) / float64(1000*1000*1000)
|
||||
admissionReviewDurationMetric.Record(ctx, admissionReviewLatencyDurationInSeconds, attributes...)
|
||||
durationInSeconds := float64(latency) / float64(1000*1000*1000)
|
||||
durationMetric.Record(ctx, durationInSeconds, attributes...)
|
||||
}()
|
||||
}
|
||||
if admissionRequestsMetric != nil {
|
||||
admissionRequestsMetric.Add(ctx, 1, attributes...)
|
||||
if requestsMetric != nil {
|
||||
requestsMetric.Add(ctx, 1, attributes...)
|
||||
}
|
||||
}
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
func (inner HttpHandler) WithMetrics(logger logr.Logger, attrs ...attribute.KeyValue) HttpHandler {
|
||||
return inner.withMetrics(logger, attrs...).WithTrace("METRICS")
|
||||
}
|
||||
|
||||
func (inner HttpHandler) withMetrics(logger logr.Logger, attrs ...attribute.KeyValue) HttpHandler {
|
||||
meter := global.MeterProvider().Meter(metrics.MeterName)
|
||||
requestsMetric, err := meter.SyncInt64().Counter(
|
||||
"kyverno_http_requests_total",
|
||||
instrument.WithDescription("can be used to track the number of http requests"),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to create instrument, kyverno_http_requests_total")
|
||||
}
|
||||
durationMetric, err := meter.SyncFloat64().Histogram(
|
||||
"kyverno_http_requests_duration_seconds",
|
||||
instrument.WithDescription("can be used to track the latencies (in seconds) associated with the entire individual http request."),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to create instrument, kyverno_http_requests_duration_seconds")
|
||||
}
|
||||
return func(writer http.ResponseWriter, request *http.Request) {
|
||||
startTime := time.Now()
|
||||
attributes := []attribute.KeyValue{
|
||||
semconv.HTTPRequestContentLengthKey.Int64(request.ContentLength),
|
||||
semconv.HTTPHostKey.String(request.Host),
|
||||
semconv.HTTPMethodKey.String(request.Method),
|
||||
semconv.HTTPURLKey.String(request.RequestURI),
|
||||
}
|
||||
attributes = append(attributes, attrs...)
|
||||
if requestsMetric != nil {
|
||||
requestsMetric.Add(request.Context(), 1, attributes...)
|
||||
}
|
||||
if durationMetric != nil {
|
||||
defer func() {
|
||||
latency := int64(time.Since(startTime))
|
||||
durationInSeconds := float64(latency) / float64(1000*1000*1000)
|
||||
durationMetric.Record(request.Context(), durationInSeconds, attributes...)
|
||||
}()
|
||||
}
|
||||
inner(writer, request)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue