1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

Merge branch 'master' into 744_deny_requests

# Conflicts:
#	pkg/webhooks/mutation.go
#	pkg/webhooks/server.go
#	pkg/webhooks/validation.go
This commit is contained in:
Shuting Zhao 2020-05-18 12:32:42 -07:00
commit ad4f06f22d
36 changed files with 1579 additions and 251 deletions

View file

@ -10,7 +10,7 @@ TIMESTAMP := $(shell date '+%Y-%m-%d_%I:%M:%S%p')
REGISTRY=index.docker.io
REPO=$(REGISTRY)/nirmata/kyverno
IMAGE_TAG=$(GIT_VERSION)
IMAGE_TAG?=$(GIT_VERSION)
GOOS ?= $(shell go env GOOS)
PACKAGE ?=github.com/nirmata/kyverno
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
@ -53,6 +53,10 @@ docker-push-initContainer:
.PHONY: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
KYVERNO_PATH := cmd/kyverno
KYVERNO_IMAGE := kyverno
local:
go build -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/
kyverno:
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go

19
charts/kyverno/Chart.yaml Normal file
View file

@ -0,0 +1,19 @@
apiVersion: v2
name: kyverno
version: 0.0.1
appVersion: v1.1.5
description: Kubernetes Native Policy Management
keywords:
- kubernetes
- nirmata
- policy agent
- validating webhook
- admissions controller
home: https://kyverno.io/
sources:
- https://github.com/nirmata/kyverno
maintainers:
- name: Nirmata
url: https://kyverno.io/
engine: gotpl
kubeVersion: ">=1.10.0-0"

104
charts/kyverno/README.md Normal file
View file

@ -0,0 +1,104 @@
# kyverno
[Kyverno](https://kyverno.io) is a Kubernetes Native Policy Management engine. It allows you to
* Manage policies as Kubernetes resources.
* Validate, mutate, and generate configurations.
* Select resources based on labels and wildcards.
* View policy enforcement as events.
* Detect policy violations for existing resources.
## TL;DR;
```console
$ helm install --create-namespace -n kyverno kyverno ./charts/kyverno
```
## Introduction
This chart bootstraps a Kyverno deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Installing the Chart
Kyverno makes assumptions about naming of namespaces and resources. Therefore, the chart must be installed with the default release name `kyverno` (default if --name is omitted) and in the namespace 'kyverno':
```console
$ helm install --namespace kyverno kyverno ./charts/kyverno
```
Note that Helm by default expects the namespace to already exist before running helm install. If you want Helm to create the namespace, add --create-namespace to the command.
The command deploys kyverno on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `kyverno` deployment:
```console
$ helm delete -n kyverno kyverno
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the kyverno chart and their default values.
Parameter | Description | Default
--- | --- | ---
`affinity` | node/pod affinities | `nil`
`createSelfSignedCert` | generate a self signed cert and certificate authority. Kyverno defaults to using kube-controller-manager CA-signed certificate or existing cert secret if false. | `false`
`config.existingConfig` | existing Kubernetes configmap to use for the resource filters configuration | `nil`
`config.resourceFilters` | list of filter of resource types to be skipped by kyverno policy engine. See [documentation](https://github.com/nirmata/kyverno/blob/master/documentation/installation.md#filter-kubernetes-resources-that-admission-webhook-should-not-process) for details | `["[Event,*,*]","[*,kube-system,*]","[*,kube-public,*]","[*,kube-node-lease,*]","[Node,*,*]","[APIService,*,*]","[TokenReview,*,*]","[SubjectAccessReview,*,*]","[*,kyverno,*]"]`
`extraArgs` | list of extra arguments to give the binary | `[]`
`fullnameOverride` | override the expanded name of the chart | `nil`
`generatecontrollerExtraResources` | extra resource type Kyverno is allowed to generate | `[]`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`image.pullSecrets` | Specify image pull secrets | `[]` (does not add image pull secrets to deployed pods)
`image.repository` | Image repository | `nirmata/kyverno`
`image.tag` | Image tag | `nil`
`initImage.pullPolicy` | Init image pull policy | `nil`
`initImage.repository` | Init image repository | `nirmata/kyvernopre`
`initImage.tag` | Init image tag | `nil`
`livenessProbe` | liveness probe configuration | `{}`
`nameOverride` | override the name of the chart | `nil`
`nodeSelector` | node labels for pod assignment | `{}`
`podAnnotations` | annotations to add to each pod | `{}`
`podLabels` | additional labels to add to each pod | `{}`
`podSecurityContext` | security context for the pod | `{}`
`priorityClassName` | priorityClassName | `nil`
`rbac.create` | create cluster roles, cluster role bindings, and service account | `true`
`rbac.serviceAccount.create` | create a service account | `true`
`rbac.serviceAccount.name` | the service account name | `nil`
`rbac.serviceAccount.annotations` | annotations for the service account | `{}`
`readinessProbe` | readiness probe configuration | `{}`
`replicaCount` | desired number of pods | `1`
`resources` | pod resource requests & limits | `{}`
`service.annotations` | annotations to add to the service | `{}`
`service.nodePort` | node port | `nil`
`service.port` | port for the service | `443`
`service.type` | type of service | `ClusterIP`
`tolerations` | list of node taints to tolerate | `[]`
`securityContext` | security context configuration | `{}`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install --namespace kyverno kyverno ./charts/kyverno \
--set=image.tag=v0.0.2,resources.limits.cpu=200m
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install --namespace kyverno kyverno ./charts/kyverno -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## TLS Configuration
If `createSelfSignedCert` is `true`, Helm will take care of the steps of creating an external self-signed certificate describe in option 2 of the [installation documentation](https://github.com/nirmata/kyverno/blob/master/documentation/installation.md#option-2-use-your-own-ca-signed-certificate)
If `createSelfSignedCert` is `false`, Kyverno will generate a pair using the kube-controller-manager., or you can provide your own TLS CA and signed-key pair and create the secret yourself as described in the documentation.

View file

@ -0,0 +1,446 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterpolicies.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1
served: true
storage: true
scope: Cluster
names:
kind: ClusterPolicy
plural: clusterpolicies
singular: clusterpolicy
shortNames:
- cpol
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
status: {}
spec:
required:
- rules
properties:
# default values to be handled by user
validationFailureAction:
type: string
enum:
- enforce # blocks the resorce api-reques if a rule fails.
- audit # allows resource creation and reports the failed validation rules as violations. Default
background:
type: boolean
rules:
type: array
items:
type: object
required:
- name
- match
properties:
name:
type: string
match:
type: object
required:
- resources
properties:
roles:
type: array
items:
type: string
clusterRoles:
type: array
items:
type: string
subjects:
type: array
items:
type: object
required:
- kind
- name
properties:
kind:
type: string
apiGroup:
type: string
name:
type: string
Namespace:
type: string
resources:
type: object
minProperties: 1
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespaces:
type: array
items:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
exclude:
type: object
properties:
roles:
type: array
items:
type: string
clusterRoles:
type: array
items:
type: string
subjects:
type: array
items:
type: object
required:
- kind
- name
properties:
kind:
type: string
apiGroup:
type: string
name:
type: string
Namespace:
type: string
resources:
type: object
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespaces:
type: array
items:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
preconditions:
type: array
items:
type: object
required:
- key # can be of any type
- operator # typed
- value # can be of any type
mutate:
type: object
properties:
overlay:
AnyValue: {}
patches:
type: array
items:
type: object
required:
- path
- op
properties:
path:
type: string
op:
type: string
enum:
- add
- replace
- remove
value:
AnyValue: {}
validate:
type: object
properties:
message:
type: string
pattern:
AnyValue: {}
anyPattern:
AnyValue: {}
generate:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string
clone:
type: object
required:
- namespace
- name
properties:
namespace:
type: string
name:
type: string
data:
AnyValue: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterpolicyviolations.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1
served: true
storage: true
scope: Cluster
names:
kind: ClusterPolicyViolation
plural: clusterpolicyviolations
singular: clusterpolicyviolation
shortNames:
- cpolv
subresources:
status: {}
additionalPrinterColumns:
- name: Policy
type: string
description: The policy that resulted in the violation
JSONPath: .spec.policy
- name: ResourceKind
type: string
description: The resource kind that cause the violation
JSONPath: .spec.resource.kind
- name: ResourceName
type: string
description: The resource name that caused the violation
JSONPath: .spec.resource.name
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
validation:
openAPIV3Schema:
properties:
spec:
required:
- policy
- resource
- rules
properties:
policy:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
rules:
type: array
items:
type: object
required:
- name
- type
- message
properties:
name:
type: string
type:
type: string
message:
type: string
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policyviolations.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1
served: true
storage: true
scope: Namespaced
names:
kind: PolicyViolation
plural: policyviolations
singular: policyviolation
shortNames:
- polv
subresources:
status: {}
additionalPrinterColumns:
- name: Policy
type: string
description: The policy that resulted in the violation
JSONPath: .spec.policy
- name: ResourceKind
type: string
description: The resource kind that cause the violation
JSONPath: .spec.resource.kind
- name: ResourceName
type: string
description: The resource name that caused the violation
JSONPath: .spec.resource.name
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
validation:
openAPIV3Schema:
properties:
spec:
required:
- policy
- resource
- rules
properties:
policy:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
rules:
type: array
items:
type: object
required:
- name
- type
- message
properties:
name:
type: string
type:
type: string
message:
type: string
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: generaterequests.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1
served: true
storage: true
scope: Namespaced
names:
kind: GenerateRequest
plural: generaterequests
singular: generaterequest
shortNames:
- gr
subresources:
status: {}
additionalPrinterColumns:
- name: Policy
type: string
description: The policy that resulted in the violation
JSONPath: .spec.policy
- name: ResourceKind
type: string
description: The resource kind that cause the violation
JSONPath: .spec.resource.kind
- name: ResourceName
type: string
description: The resource name that caused the violation
JSONPath: .spec.resource.name
- name: ResourceNamespace
type: string
description: The resource namespace that caused the violation
JSONPath: .spec.resource.namespace
- name: status
type : string
description: Current state of generate request
JSONPath: .status.state
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
validation:
openAPIV3Schema:
properties:
spec:
required:
- policy
- resource
properties:
policy:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string

View file

@ -0,0 +1,62 @@
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart. */}}
{{- define "kyverno.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kyverno.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/* Create chart name and version as used by the chart label. */}}
{{- define "kyverno.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* Helm required labels */}}
{{- define "kyverno.labels" -}}
app.kubernetes.io/name: {{ template "kyverno.name" . }}
helm.sh/chart: {{ template "kyverno.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/* matchLabels */}}
{{- define "kyverno.matchLabels" -}}
app.kubernetes.io/name: {{ template "kyverno.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/* Get the config map name. */}}
{{- define "kyverno.configMapName" -}}
{{- printf "%s" (default (include "kyverno.fullname" .) .Values.config.existingConfig) -}}
{{- end -}}
{{/* Create the name of the service to use */}}
{{- define "kyverno.serviceName" -}}
{{- printf "%s-svc" (include "kyverno.fullname" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* Create the name of the service account to use */}}
{{- define "kyverno.serviceAccountName" -}}
{{- if .Values.rbac.serviceAccount.create -}}
{{ default (include "kyverno.fullname" .) .Values.rbac.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.rbac.serviceAccount.name }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,147 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:policyviolations
rules:
- apiGroups: ["kyverno.io"]
resources:
- policyviolations
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:webhook
rules:
# Dynamic creation of webhooks, events & certs
- apiGroups:
- '*'
resources:
- events
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
- certificatesigningrequests
- certificatesigningrequests/approval
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
- certificatesigningrequests/approval
- certificatesigningrequests/status
resourceNames:
- kubernetes.io/legacy-unknown
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- certificates.k8s.io
resources:
- signers
resourceNames:
- kubernetes.io/legacy-unknown
verbs:
- approve
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:userinfo
rules:
# get the roleRef for incoming api-request user
- apiGroups:
- "*"
resources:
- rolebindings
- clusterrolebindings
- configmaps
verbs:
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:customresources
rules:
# Kyverno CRs
- apiGroups:
- '*'
resources:
- clusterpolicies
- clusterpolicies/status
- clusterpolicyviolations
- clusterpolicyviolations/status
- policyviolations
- policyviolations/status
- generaterequests
- generaterequests/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:policycontroller
rules:
# background processing, identify all existing resources
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:generatecontroller
rules:
# process generate rules to generate resources
- apiGroups:
- "*"
resources:
- namespaces
- networkpolicies
- secrets
- configmaps
- resourcequotas
- limitranges
- clusterroles
- rolebindings
- clusterrolebindings
{{- range .Values.generatecontrollerExtraResources }}
- {{ . }}
{{- end }}
verbs:
- create
- update
- delete
- get
# dynamic watches on trigger resources for generate rules
# re-evaluate the policy if the resource is updated
- apiGroups:
- '*'
resources:
- namespaces
verbs:
- watch
{{- end }}

View file

@ -0,0 +1,66 @@
{{- if .Values.rbac.create }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.fullname" . }}:webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.fullname" . }}:webhook
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.fullname" . }}:userinfo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.fullname" . }}:userinfo
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.fullname" . }}:customresources
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.fullname" . }}:customresources
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.fullname" . }}:policycontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.fullname" . }}:policycontroller
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.fullname" . }}:generatecontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.fullname" . }}:generatecontroller
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View file

@ -0,0 +1,10 @@
{{- if (not .Values.config.existingConfig) }}
apiVersion: v1
kind: ConfigMap
metadata:
labels: {{ include "kyverno.labels" . | nindent 4 }}
name: {{ template "kyverno.configMapName" . }}
data:
# resource types to be skipped by kyverno policy engine
resourceFilters: {{ join "" .Values.config.resourceFilters | quote }}
{{- end -}}

View file

@ -0,0 +1,65 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "kyverno.fullname" . }}
labels: {{ include "kyverno.labels" . | nindent 4 }}
spec:
selector:
matchLabels: {{ include "kyverno.matchLabels" . | nindent 6 }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels: {{ include "kyverno.labels" . | nindent 8 }}
{{- range $key, $value := .Values.podLabels }}
{{ $key }}: {{ $value }}
{{- end }}
{{- with .Values.podAnnotations }}
annotations: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
spec:
{{- with .Values.image.pullSecrets }}
imagePullSecrets: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.podSecurityContext }}
securityContext: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{ tpl (toYaml .) $ | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "kyverno.serviceAccountName" . }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName | quote }}
{{- end }}
initContainers:
- name: kyverno-pre
image: {{ .Values.initImage.repository }}:{{ default .Chart.AppVersion (default .Values.image.tag .Values.initImage.tag) }}
imagePullPolicy: {{ default .Values.image.pullPolicy .Values.initImage.pullPolicy }}
containers:
- name: kyverno
image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.extraArgs }}
args: {{ tpl (toYaml .) $ | nindent 12 }}
{{- end }}
{{- with .Values.resources }}
resources: {{ tpl (toYaml .) $ | nindent 12 }}
{{- end }}
ports:
- containerPort: 443
name: https
protocol: TCP
env:
- name: INIT_CONFIG
value: {{ template "kyverno.configMapName" . }}
{{- with .Values.livenessProbe }}
livenessProbe: {{ tpl (toYaml .) $ | nindent 12 }}
{{- end }}
{{- with .Values.readinessProbe }}
readinessProbe: {{ tpl (toYaml .) $ | nindent 12 }}
{{- end }}

View file

@ -0,0 +1,23 @@
{{- if .Values.createSelfSignedCert }}
{{- $ca := .ca | default (genCA (printf "*.%s.svc" .Release.Namespace) 1024) -}}
{{- $cert := genSignedCert (printf "%s.%s.svc" (include "kyverno.serviceName" .) .Release.Namespace) nil nil 1024 $ca -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "kyverno.serviceName" . }}.{{ .Release.Namespace }}.svc.kyverno-tls-ca
labels: {{ include "kyverno.labels" . | nindent 4 }}
data:
rootCA.crt: {{ $ca.Cert | b64enc }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ template "kyverno.serviceName" . }}.{{ .Release.Namespace }}.svc.kyverno-tls-pair
labels: {{ include "kyverno.labels" . | nindent 4 }}
annotations:
self-signed-cert: "true"
type: kubernetes.io/tls
data:
tls.key: {{ $cert.Key | b64enc }}
tls.crt: {{ $cert.Cert | b64enc }}
{{- end -}}

View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "kyverno.serviceName" . }}
labels: {{ include "kyverno.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}
annotations: {{ tpl (toYaml .) $ | nindent 4 }}
{{- end }}
spec:
ports:
- port: {{ .Values.service.port }}
targetPort: https
protocol: TCP
name: https
{{- if and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort)) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector: {{ include "kyverno.matchLabels" . | nindent 4 }}
type: {{ .Values.service.type }}

View file

@ -0,0 +1,10 @@
{{- if .Values.rbac.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kyverno.serviceAccountName" . }}
labels: {{ include "kyverno.labels" . | nindent 4 }}
{{- if .Values.rbac.serviceAccount.annotations }}
annotations: {{ toYaml .Values.rbac.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end }}

126
charts/kyverno/values.yaml Normal file
View file

@ -0,0 +1,126 @@
nameOverride:
fullnameOverride:
rbac:
create: true
serviceAccount:
create: true
name:
annotations: {}
# example.com/annotation: value
image:
repository: nirmata/kyverno
# Defaults to appVersion in Chart.yaml if omitted
tag:
pullPolicy: IfNotPresent
pullSecrets: []
# - secretName
initImage:
repository: nirmata/kyvernopre
# If initImage.tag is missing, defaults to image.tag
tag:
# If initImage.pullPolicy is missing, defaults to image.pullPolicy
pullPolicy:
# No pull secrets just for initImage; just add to image.pullSecrets
replicaCount: 1
podLabels: {}
# example.com/label: foo
podAnnotations: {}
# example.com/annotation: foo
podSecurityContext: {}
affinity: {}
nodeSelector: {}
tolerations: []
extraArgs: []
# - --fqdn-as-cn
# - --webhooktimeout=4
resources:
# limits:
# cpu: 1000m
# memory: 500Mi
# requests:
# cpu: 100m
# memory: 100Mi
## Liveness Probe. The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
livenessProbe:
# httpGet:
# path: /healthz
# port: https
# scheme: HTTPS
# initialDelaySeconds: 10
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 2
# successThreshold: 1
## Readiness Probe. The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
readinessProbe:
# httpGet:
# path: /healthz
# port: https
# scheme: HTTPS
# initialDelaySeconds: 5
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# TODO(mbarrien): Should we just list all resources for the
# generatecontroller in here rather than having defaults hard-coded?
generatecontrollerExtraResources:
# - ResourceA
# - ResourceB
config:
# resource types to be skipped by kyverno policy engine
# Make sure to surround each entry in quotes so that it doesn't get parsed
# as a nested YAML list. These are joined together without spaces in the configmap
resourceFilters:
- "[Event,*,*]"
- "[*,kube-system,*]"
- "[*,kube-public,*]"
- "[*,kube-node-lease,*]"
- "[Node,*,*]"
- "[APIService,*,*]"
- "[TokenReview,*,*]"
- "[SubjectAccessReview,*,*]"
- "[*,kyverno,*]"
# Or give the name of an existing config map (ignores default/provided resourceFilters)
existingConfig:
# existingConfig: init-config
service:
port: 443
type: ClusterIP
# Only used if service.type is NodePort
nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# Kyverno requires a certificate key pair and corresponding certificate authority
# to properly register its webhooks. This can be done in one of 3 ways:
# 1) Use kube-controller-manager to generate a CA-signed certificate (preferred)
# 2) Provide your own CA and cert.
# In this case, you will need to create a certificate with a specific name and data structure.
# As long as you follow the naming scheme, it will be automatically picked up.
# kyverno-svc.(namespace).svc.kyverno-tls-ca (with data entry named rootCA.crt)
# kyverno-svc.kyverno.svc.kyverno-tls-key-pair (with data entries named tls.key and tls.crt)
# 3) Let Helm generate a self signed cert, by setting createSelfSignedCert true
# If letting Kyverno create its own CA or providing your own, make createSelfSignedCert is false
createSelfSignedCert: false

View file

@ -33,6 +33,8 @@ import (
log "sigs.k8s.io/controller-runtime/pkg/log"
)
const resyncPeriod = 15 * time.Minute
var (
kubeconfig string
serverIP string
@ -61,15 +63,11 @@ func main() {
// Generate CSR with CN as FQDN due to https://github.com/nirmata/kyverno/issues/542
flag.BoolVar(&fqdncn, "fqdn-as-cn", false, "use FQDN as Common Name in CSR")
flag.Parse()
version.PrintVersionInfo(log.Log)
// cleanUp Channel
cleanUp := make(chan struct{})
// handle os signals
stopCh := signal.SetupSignalHandler()
// CLIENT CONFIG
clientConfig, err := config.CreateClientConfig(kubeconfig, log.Log)
if err != nil {
setupLog.Error(err, "Failed to build kubeconfig")
@ -88,39 +86,31 @@ func main() {
// DYNAMIC CLIENT
// - client for all registered resources
// - invalidate local cache of registered resource every 10 seconds
client, err := dclient.NewClient(clientConfig, 10*time.Second, stopCh, log.Log)
client, err := dclient.NewClient(clientConfig, 5*time.Minute, stopCh, log.Log)
if err != nil {
setupLog.Error(err, "Failed to create client")
os.Exit(1)
}
// CRD CHECK
// - verify if the CRD for Policy & PolicyViolation are available
if !utils.CRDInstalled(client.DiscoveryClient, log.Log) {
setupLog.Error(fmt.Errorf("pre-requisite CRDs not installed"), "Failed to create watch on kyverno CRDs")
os.Exit(1)
}
// KUBERNETES CLIENT
kubeClient, err := utils.NewKubeClient(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create kubernetes client")
os.Exit(1)
}
// TODO(shuting): To be removed for v1.2.0
// TODO: To be removed for v1.2.0
utils.CleanupOldCrd(client, log.Log)
// KUBERNETES RESOURCES INFORMER
// watches namespace resource
// - cache resync time: 10 seconds
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(
kubeClient,
10*time.Second)
// KUBERNETES Dynamic informer
// - cahce resync time: 10 seconds
kubedynamicInformer := client.NewDynamicSharedInformerFactory(10 * time.Second)
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
kubedynamicInformer := client.NewDynamicSharedInformerFactory(resyncPeriod)
// WERBHOOK REGISTRATION CLIENT
webhookRegistrationClient := webhookconfig.NewWebhookRegistrationClient(
clientConfig,
client,
@ -143,10 +133,7 @@ func main() {
// watches CRD resources:
// - Policy
// - PolicyVolation
// - cache resync time: 10 seconds
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(
pclient,
10*time.Second)
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, resyncPeriod)
// Configuration Data
// dynamically load the configuration from configMap
@ -187,9 +174,8 @@ func main() {
// POLICY CONTROLLER
// - reconciliation policy and policy violation
// - process policy on existing resources
// - status aggregator: receives stats when a policy is applied
// & updates the policy status
pc, err := policy.NewPolicyController(pclient,
// - status aggregator: receives stats when a policy is applied & updates the policy status
policyCtrl, err := policy.NewPolicyController(pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().ClusterPolicyViolations(),
@ -201,6 +187,7 @@ func main() {
rWebhookWatcher,
log.Log.WithName("PolicyController"),
)
if err != nil {
setupLog.Error(err, "Failed to create policy controller")
os.Exit(1)
@ -222,6 +209,7 @@ func main() {
statusSync.Listener,
log.Log.WithName("GenerateController"),
)
// GENERATE REQUEST CLEANUP
// -- cleans up the generate requests that have not been processed(i.e. state = [Pending, Failed]) for more than defined timeout
grcc := generatecleanup.NewController(
@ -257,7 +245,7 @@ func main() {
}
// Sync openAPI definitions of resources
openApiSync := openapi.NewCRDSync(client, openAPIController)
openAPISync := openapi.NewCRDSync(client, openAPIController)
// WEBHOOOK
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
@ -284,10 +272,12 @@ func main() {
log.Log.WithName("WebhookServer"),
openAPIController,
)
if err != nil {
setupLog.Error(err, "Failed to create webhook server")
os.Exit(1)
}
// Start the components
pInformer.Start(stopCh)
kubeInformer.Start(stopCh)
@ -296,13 +286,13 @@ func main() {
go rWebhookWatcher.Run(stopCh)
go configData.Run(stopCh)
go policyMetaStore.Run(stopCh)
go pc.Run(1, stopCh)
go policyCtrl.Run(3, stopCh)
go egen.Run(1, stopCh)
go grc.Run(1, stopCh)
go grcc.Run(1, stopCh)
go pvgen.Run(1, stopCh)
go statusSync.Run(1, stopCh)
openApiSync.Run(1, stopCh)
openAPISync.Run(1, stopCh)
// verifys if the admission control is enabled and active
// resync: 60 seconds
@ -319,8 +309,10 @@ func main() {
defer func() {
cancel()
}()
// cleanup webhookconfigurations followed by webhook shutdown
server.Stop(ctx)
// resource cleanup
// remove webhook configurations
<-cleanUp

View file

@ -734,7 +734,7 @@ spec:
- name: kyverno
image: nirmata/kyverno:v1.1.5
args:
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]"
# customize webhook timout
# - "--webhooktimeout=4"
ports:

1
go.mod
View file

@ -14,6 +14,7 @@ require (
github.com/json-iterator/go v1.1.9 // indirect
github.com/julienschmidt/httprouter v1.3.0
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5
github.com/rogpeppe/godef v1.1.2 // indirect
github.com/spf13/cobra v0.0.5
github.com/tevino/abool v0.0.0-20170917061928-9b9efcf221b5
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect

9
go.sum
View file

@ -1,3 +1,5 @@
9fans.net/go v0.0.0-20181112161441-237454027057 h1:OcHlKWkAMJEF1ndWLGxp5dnJQkYM/YImUOvsBoz6h5E=
9fans.net/go v0.0.0-20181112161441-237454027057/go.mod h1:diCsxrliIURU9xsYtjCp5AbpQKqdhKmf0ujWDUSkfoY=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@ -732,6 +734,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/godef v1.1.2 h1:c5mCx0EcCORJOdVMREX7Lgh1raTxAHFmOfXdEB9u8Jw=
github.com/rogpeppe/godef v1.1.2/go.mod h1:WtY9A/ovuQ+UakAJ1/CEqwwulX/WJjb2kgkokCHi/GY=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rubenv/sql-migrate v0.0.0-20190212093014-1007f53448d7/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
@ -872,6 +876,7 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 h1:nVJ3guKA9qdkEQ3TUdXI9QSINo2CUPM/cySEvw2w8I0=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -886,6 +891,7 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1036,7 +1042,10 @@ golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190711191110-9a621aea19f8/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200226224502-204d844ad48d h1:loGv/4fxITSrCD4t2P8ZF4oUC4RlRFDAsczcoUS2g6c=
golang.org/x/tools v0.0.0-20200226224502-204d844ad48d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=

View file

@ -1,6 +1,7 @@
package checker
import (
"fmt"
"sync"
"time"
@ -32,10 +33,11 @@ func (t *LastReqTime) Time() time.Time {
return t.t
}
//SetTime stes the lastrequest time
//SetTime updates the lastrequest time
func (t *LastReqTime) SetTime(tm time.Time) {
t.mu.Lock()
defer t.mu.Unlock()
t.t = tm
}
@ -52,6 +54,7 @@ func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolic
if err != nil {
log.Error(err, "failed to list cluster policies")
}
for _, policy := range policies {
if policy.HasMutateOrValidateOrGenerate() {
// as there exists one policy with mutate or validate rule
@ -59,13 +62,14 @@ func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolic
return true
}
}
return false
}
//Run runs the checker and verify the resource update
func (t *LastReqTime) Run(pLister kyvernolister.ClusterPolicyLister, eventGen event.Interface, client *dclient.Client, defaultResync time.Duration, deadline time.Duration, stopCh <-chan struct{}) {
logger := t.log
logger.V(2).Info("tarting default resync for webhook checker", "resyncTime", defaultResync)
logger.V(4).Info("starting default resync for webhook checker", "resyncTime", defaultResync)
maxDeadline := deadline * time.Duration(MaxRetryCount)
ticker := time.NewTicker(defaultResync)
/// interface to update and increment kyverno webhook status via annotations
@ -85,35 +89,37 @@ func (t *LastReqTime) Run(pLister kyvernolister.ClusterPolicyLister, eventGen ev
for {
select {
case <-ticker.C:
// if there are no policies then we dont have a webhook on resource.
// we indirectly check if the resource
if !checkIfPolicyWithMutateAndGenerateExists(pLister, logger) {
continue
}
// get current time
timeDiff := time.Since(t.Time())
if timeDiff > maxDeadline {
logger.Info("request exceeded max deadline", "deadline", maxDeadline)
logger.Info("Admission Control failing: Webhook is not receiving requests forwarded by api-server as per webhook configurations")
// set the status unavailable
err := fmt.Errorf("Admission control configuration error")
logger.Error(err, "webhook check failed", "deadline", maxDeadline)
if err := statuscontrol.FailedStatus(); err != nil {
logger.Error(err, "failed to set 'failed' status")
logger.Error(err, "error setting webhook check status to failed")
}
continue
}
if timeDiff > deadline {
logger.Info("Admission Control failing: Webhook is not receiving requests forwarded by api-server as per webhook configurations")
logger.V(3).Info("webhook check deadline exceeded", "deadline", deadline)
// send request to update the kyverno deployment
if err := statuscontrol.IncrementAnnotation(); err != nil {
logger.Error(err, "failed to increment annotation")
}
continue
}
// if the status was false before then we update it to true
// send request to update the kyverno deployment
if err := statuscontrol.SuccessStatus(); err != nil {
logger.Error(err, "failed to update success status")
logger.Error(err, "error setting webhook check status to success")
}
case <-stopCh:
// handler termination signal
logger.V(2).Info("stopping default resync for webhook checker")

View file

@ -13,7 +13,7 @@ const deployName string = "kyverno"
const deployNamespace string = "kyverno"
const annCounter string = "kyverno.io/generationCounter"
const annWebhookStats string = "kyverno.io/webhookActive"
const annWebhookStatus string = "kyverno.io/webhookActive"
//StatusInterface provides api to update webhook active annotations on kyverno deployments
type StatusInterface interface {
@ -52,37 +52,42 @@ func NewVerifyControl(client *dclient.Client, eventGen event.Interface, log logr
}
func (vc StatusControl) setStatus(status string) error {
logger := vc.log
logger.Info(fmt.Sprintf("setting deployment %s in ns %s annotation %s to %s", deployName, deployNamespace, annWebhookStats, status))
logger := vc.log.WithValues("name", deployName, "namespace", deployNamespace)
var ann map[string]string
var err error
deploy, err := vc.client.GetResource("Deployment", deployNamespace, deployName)
if err != nil {
logger.Error(err, "failed to get deployment resource")
logger.Error(err, "failed to get deployment")
return err
}
ann = deploy.GetAnnotations()
if ann == nil {
ann = map[string]string{}
ann[annWebhookStats] = status
ann[annWebhookStatus] = status
}
webhookAction, ok := ann[annWebhookStats]
deployStatus, ok := ann[annWebhookStatus]
if ok {
// annotatiaion is present
if webhookAction == status {
logger.V(4).Info(fmt.Sprintf("annotation %s already set to '%s'", annWebhookStats, status))
if deployStatus == status {
logger.V(4).Info(fmt.Sprintf("annotation %s already set to '%s'", annWebhookStatus, status))
return nil
}
}
// set the status
ann[annWebhookStats] = status
logger.Info("updating deployment annotation", "key", annWebhookStatus, "val", status)
ann[annWebhookStatus] = status
deploy.SetAnnotations(ann)
// update counter
_, err = vc.client.UpdateResource("Deployment", deployNamespace, deploy, false)
if err != nil {
logger.Error(err, fmt.Sprintf("failed to update annotation %s for deployment %s in namespace %s", annWebhookStats, deployName, deployNamespace))
logger.Error(err, "failed to update deployment annotation", "key", annWebhookStatus, "val", status)
return err
}
// create event on kyverno deployment
createStatusUpdateEvent(status, vc.eventGen)
return nil
@ -101,34 +106,42 @@ func createStatusUpdateEvent(status string, eventGen event.Interface) {
//IncrementAnnotation ...
func (vc StatusControl) IncrementAnnotation() error {
logger := vc.log
logger.Info(fmt.Sprintf("setting deployment %s in ns %s annotation %s", deployName, deployNamespace, annCounter))
var ann map[string]string
var err error
deploy, err := vc.client.GetResource("Deployment", deployNamespace, deployName)
if err != nil {
logger.Error(err, "failed to get deployment %s in namespace %s", deployName, deployNamespace)
logger.Error(err, "failed to find deployment %s in namespace %s", deployName, deployNamespace)
return err
}
ann = deploy.GetAnnotations()
if ann == nil {
ann = map[string]string{}
}
if ann[annCounter] == "" {
ann[annCounter] = "0"
}
counter, err := strconv.Atoi(ann[annCounter])
if err != nil {
logger.Error(err, "Failed to parse string")
logger.Error(err, "Failed to parse string", "name", annCounter, "value", ann[annCounter])
return err
}
// increment counter
counter++
ann[annCounter] = strconv.Itoa(counter)
logger.Info("incrementing annotation", "old", annCounter, "new", counter)
logger.V(3).Info("updating webhook test annotation", "key", annCounter, "value", counter, "deployment", deployName, "namespace", deployNamespace)
deploy.SetAnnotations(ann)
// update counter
_, err = vc.client.UpdateResource("Deployment", deployNamespace, deploy, false)
if err != nil {
logger.Error(err, fmt.Sprintf("failed to update annotation %s for deployment %s in namespace %s", annCounter, deployName, deployNamespace))
return err
}
return nil
}

View file

@ -52,7 +52,7 @@ func NewClient(config *rest.Config, resync time.Duration, stopCh <-chan struct{}
client: dclient,
clientConfig: config,
kclient: kclient,
log: log.WithName("Client"),
log: log.WithName("dclient"),
}
// Set discovery client
discoveryClient := ServerPreferredResources{cachedClient: memory.NewMemCacheClient(kclient.Discovery()), log: client.log}
@ -221,6 +221,7 @@ func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSign
//IDiscovery provides interface to mange Kind and GVR mapping
type IDiscovery interface {
FindResource(kind string) (*meta.APIResource, schema.GroupVersionResource, error)
GetGVRFromKind(kind string) schema.GroupVersionResource
GetServerVersion() (*version.Info, error)
OpenAPISchema() (*openapi_v2.Document, error)
@ -257,57 +258,67 @@ func (c ServerPreferredResources) Poll(resync time.Duration, stopCh <-chan struc
}
}
// OpenAPISchema returns the API server OpenAPI schema document
func (c ServerPreferredResources) OpenAPISchema() (*openapi_v2.Document, error) {
return c.cachedClient.OpenAPISchema()
}
//GetGVRFromKind get the Group Version Resource from kind
// if kind is not found in first attempt we invalidate the cache,
// the retry will then fetch the new registered resources and check again
// if not found after 2 attempts, we declare kind is not found
// kind is Case sensitive
// GetGVRFromKind get the Group Version Resource from kind
func (c ServerPreferredResources) GetGVRFromKind(kind string) schema.GroupVersionResource {
var gvr schema.GroupVersionResource
var err error
gvr, err = loadServerResources(kind, c.cachedClient, c.log)
if err != nil && !c.cachedClient.Fresh() {
// invalidate cahce & re-try once more
c.cachedClient.Invalidate()
gvr, err = loadServerResources(kind, c.cachedClient, c.log)
if err == nil {
return gvr
}
_, gvr, err := c.FindResource(kind)
if err != nil {
c.log.Info("schema not found", "kind", kind)
return schema.GroupVersionResource{}
}
return gvr
}
//GetServerVersion returns the server version of the cluster
// GetServerVersion returns the server version of the cluster
func (c ServerPreferredResources) GetServerVersion() (*version.Info, error) {
return c.cachedClient.ServerVersion()
}
func loadServerResources(k string, cdi discovery.CachedDiscoveryInterface, log logr.Logger) (schema.GroupVersionResource, error) {
logger := log.WithName("loadServerResources")
emptyGVR := schema.GroupVersionResource{}
serverresources, err := cdi.ServerPreferredResources()
if err != nil {
logger.Error(err, "failed to get registered preferred resources")
return emptyGVR, err
// FindResource finds an API resource that matches 'kind'. If the resource is not
// found and the Cache is not fresh, the cache is invalidated and a retry is attempted
func (c ServerPreferredResources) FindResource(kind string) (*meta.APIResource, schema.GroupVersionResource, error) {
r, gvr, err := c.findResource(kind)
if err == nil {
return r, gvr, nil
}
if !c.cachedClient.Fresh() {
c.cachedClient.Invalidate()
if r, gvr, err = c.findResource(kind); err == nil {
return r, gvr, nil
}
}
return nil, schema.GroupVersionResource{}, err
}
func (c ServerPreferredResources) findResource(k string) (*meta.APIResource, schema.GroupVersionResource, error) {
serverresources, err := c.cachedClient.ServerPreferredResources()
if err != nil {
c.log.Error(err, "failed to get registered preferred resources")
return nil, schema.GroupVersionResource{}, err
}
for _, serverresource := range serverresources {
for _, resource := range serverresource.APIResources {
// skip the resource names with "/", to avoid comparison with subresources
// skip the resource names with "/", to avoid comparison with subresources
if resource.Kind == k && !strings.Contains(resource.Name, "/") {
gv, err := schema.ParseGroupVersion(serverresource.GroupVersion)
if err != nil {
logger.Error(err, "failed to parse groupVersion from schema", "groupVersion", serverresource.GroupVersion)
return emptyGVR, err
c.log.Error(err, "failed to parse groupVersion", "groupVersion", serverresource.GroupVersion)
return nil, schema.GroupVersionResource{}, err
}
return gv.WithResource(resource.Name), nil
return &resource, gv.WithResource(resource.Name), nil
}
}
}
return emptyGVR, fmt.Errorf("kind '%s' not found", k)
return nil, schema.GroupVersionResource{}, fmt.Errorf("kind '%s' not found", k)
}

View file

@ -1,10 +1,12 @@
package client
import (
"fmt"
"strings"
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -76,6 +78,10 @@ func (c *fakeDiscoveryClient) GetGVRFromKind(kind string) schema.GroupVersionRes
return c.getGVR(resource)
}
func (c *fakeDiscoveryClient) FindResource(kind string) (*meta.APIResource, schema.GroupVersionResource, error) {
return nil, schema.GroupVersionResource{}, fmt.Errorf("Not implemented")
}
func (c *fakeDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
return nil, nil
}

View file

@ -247,6 +247,9 @@ func applyRule(log logr.Logger, client *dclient.Client, rule kyverno.Rule, resou
newResource.SetUnstructuredContent(rdata)
newResource.SetName(genName)
newResource.SetNamespace(genNamespace)
if newResource.GetKind() == "" {
newResource.SetKind(genKind)
}
// manage labels
// - app.kubernetes.io/managed-by: kyverno

View file

@ -21,10 +21,14 @@ import (
//TODO: generation rules
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, logger logr.Logger) (responses []response.EngineResponse) {
startTime := time.Now()
logger.Info("start applying policy", "startTime", startTime)
defer func() {
logger.Info("finisnhed applying policy", "processingTime", time.Since(startTime))
name := resource.GetKind() + "/" + resource.GetName()
ns := resource.GetNamespace()
if ns != "" {
name = ns + "/" + name
}
logger.V(3).Info("applyPolicy", "resource", name, "processingTime", time.Since(startTime))
}()
var engineResponses []response.EngineResponse

View file

@ -206,6 +206,7 @@ func (pc *PolicyController) updatePolicy(old, cur interface{}) {
return
}
}
logger.V(4).Info("updating policy", "name", oldP.Name)
pc.enqueuePolicy(curP)
}
@ -225,11 +226,13 @@ func (pc *PolicyController) deletePolicy(obj interface{}) {
return
}
}
logger.V(4).Info("deleting policy", "name", p.Name)
// Unregister from policy meta-store
if err := pc.pMetaStore.UnRegister(*p); err != nil {
logger.Error(err, "failed to unregister policy", "name", p.Name)
}
// we process policies that are not set of background processing as we need to perform policy violation
// cleanup when a policy is deleted.
pc.enqueuePolicy(p)
@ -263,6 +266,7 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
for i := 0; i < workers; i++ {
go wait.Until(pc.worker, time.Second, stopCh)
}
<-stopCh
}
@ -315,49 +319,54 @@ func (pc *PolicyController) syncPolicy(key string) error {
defer func() {
logger.V(4).Info("finished syncing policy", "key", key, "processingTime", time.Since(startTime))
}()
policy, err := pc.pLister.Get(key)
if errors.IsNotFound(err) {
logger.V(2).Info("policy deleted", "key", key)
// delete cluster policy violation
if err := pc.deleteClusterPolicyViolations(key); err != nil {
return err
}
// delete namespaced policy violation
if err := pc.deleteNamespacedPolicyViolations(key); err != nil {
return err
}
go pc.deletePolicyViolations(key)
// remove webhook configurations if there are no policies
if err := pc.removeResourceWebhookConfiguration(); err != nil {
// do not fail, if unable to delete resource webhook config
logger.Error(err, "failed to remove resource webhook configurations")
}
return nil
}
if err != nil {
return err
}
pc.resourceWebhookWatcher.RegisterResourceWebhook()
// process policies on existing resources
engineResponses := pc.processExistingResources(*policy)
// report errors
pc.cleanupAndReport(engineResponses)
return nil
}
func (pc *PolicyController) deletePolicyViolations(key string) {
if err := pc.deleteClusterPolicyViolations(key); err != nil {
pc.log.Error(err, "failed to delete policy violation", "key", key)
}
if err := pc.deleteNamespacedPolicyViolations(key); err != nil {
pc.log.Error(err, "failed to delete policy violation", "key", key)
}
}
func (pc *PolicyController) deleteClusterPolicyViolations(policy string) error {
cpvList, err := pc.getClusterPolicyViolationForPolicy(policy)
if err != nil {
return err
}
for _, cpv := range cpvList {
if err := pc.pvControl.DeleteClusterPolicyViolation(cpv.Name); err != nil {
return err
pc.log.Error(err, "failed to delete policy violation", "name", cpv.Name)
}
}
return nil
}
@ -366,11 +375,13 @@ func (pc *PolicyController) deleteNamespacedPolicyViolations(policy string) erro
if err != nil {
return err
}
for _, nspv := range nspvList {
if err := pc.pvControl.DeleteNamespacedPolicyViolation(nspv.Namespace, nspv.Name); err != nil {
return err
pc.log.Error(err, "failed to delete policy violation", "name", nspv.Name)
}
}
return nil
}

View file

@ -34,8 +34,13 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
}
// skip reporting violation on pod which has annotation pod-policies.kyverno.io/autogen-applied
if skipPodApplication(resource, logger) {
continue
ann := policy.GetAnnotations()
if _, ok := ann[engine.PodTemplateAnnotation]; ok {
if ann[engine.PodTemplateAnnotation] != "none" {
if skipPodApplication(resource, logger) {
continue
}
}
}
// apply the policy on each
@ -53,26 +58,35 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHa
resourceMap := map[string]unstructured.Unstructured{}
for _, rule := range policy.Spec.Rules {
// resources that match
for _, k := range rule.MatchResources.Kinds {
var namespaces []string
if len(rule.MatchResources.Namespaces) > 0 {
namespaces = append(namespaces, rule.MatchResources.Namespaces...)
log.V(4).Info("namespaces included", "namespaces", rule.MatchResources.Namespaces)
} else {
log.V(4).Info("processing all namespaces", "rule", rule.Name)
// get all namespaces
namespaces = getAllNamespaces(client, log)
resourceSchema, _, err := client.DiscoveryClient.FindResource(k)
if err != nil {
log.Error(err, "failed to find resource", "kind", k)
continue
}
// get resources in the namespaces
for _, ns := range namespaces {
rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler, log)
if !resourceSchema.Namespaced {
rMap := getResourcesPerNamespace(k, client, "", rule, configHandler, log)
mergeresources(resourceMap, rMap)
}
} else {
var namespaces []string
if len(rule.MatchResources.Namespaces) > 0 {
log.V(4).Info("namespaces included", "namespaces", rule.MatchResources.Namespaces)
namespaces = append(namespaces, rule.MatchResources.Namespaces...)
} else {
log.V(4).Info("processing all namespaces", "rule", rule.Name)
namespaces = getAllNamespaces(client, log)
}
for _, ns := range namespaces {
rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler, log)
mergeresources(resourceMap, rMap)
}
}
}
}
return resourceMap
}

View file

@ -60,7 +60,7 @@ func (wrc *WebhookRegistrationClient) constructDebugVerifyMutatingWebhookConfig(
func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig(wg *sync.WaitGroup) {
defer wg.Done()
// Mutating webhook configuration
var err error
var mutatingConfig string
if wrc.serverIP != "" {
@ -68,14 +68,18 @@ func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig(w
} else {
mutatingConfig = config.VerifyMutatingWebhookConfigurationName
}
logger := wrc.log.WithValues("name", mutatingConfig)
logger.V(4).Info("removing webhook configuration")
err = wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
if errorsapi.IsNotFound(err) {
logger.Error(err, "verify webhook configuration, does not exits. not deleting")
} else if err != nil {
logger.Error(err, "failed to delete verify wwebhook configuration")
} else {
logger.V(4).Info("successfully deleted verify webhook configuration")
logger.V(5).Info("verify webhook configuration not found")
return
}
if err != nil {
logger.Error(err, "failed to delete verify wwebhook configuration")
return
}
logger.V(4).Info("successfully deleted verify webhook configuration")
}

View file

@ -51,7 +51,7 @@ func NewWebhookRegistrationClient(
func (wrc *WebhookRegistrationClient) Register() error {
logger := wrc.log.WithName("Register")
if wrc.serverIP != "" {
logger.Info("Registering webhook", "url", fmt.Sprintf("https://%s", wrc.serverIP))
logger.V(4).Info("Registering webhook", "url", fmt.Sprintf("https://%s", wrc.serverIP))
}
// For the case if cluster already has this configs
@ -249,20 +249,23 @@ func (wrc *WebhookRegistrationClient) createVerifyMutatingWebhookConfiguration()
// Register will fail if the config exists, so there is no need to fail on error
func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
startTime := time.Now()
wrc.log.Info("Started cleaning up webhookconfigurations")
wrc.log.Info("removing prior webhook configurations")
defer func() {
wrc.log.V(4).Info("Finished cleaning up webhookcongfigurations", "processingTime", time.Since(startTime))
wrc.log.V(4).Info("removed webhookcongfigurations", "processingTime", time.Since(startTime))
}()
var wg sync.WaitGroup
wg.Add(5)
// mutating and validating webhook configuration for Kubernetes resources
go wrc.removeResourceMutatingWebhookConfiguration(&wg)
go wrc.removeResourceValidatingWebhookConfiguration(&wg)
// mutating and validating webhook configurtion for Policy CRD resource
go wrc.removePolicyMutatingWebhookConfiguration(&wg)
go wrc.removePolicyValidatingWebhookConfiguration(&wg)
// mutating webhook configuration for verifying webhook
go wrc.removeVerifyWebhookMutatingWebhookConfig(&wg)
@ -285,48 +288,53 @@ func (wrc *WebhookRegistrationClient) removeResourceValidatingWebhookConfigurati
}
}
// delete policy mutating webhookconfigurations
// handle wait group
func (wrc *WebhookRegistrationClient) removePolicyMutatingWebhookConfiguration(wg *sync.WaitGroup) {
defer wg.Done()
// Mutating webhook configuration
var mutatingConfig string
if wrc.serverIP != "" {
mutatingConfig = config.PolicyMutatingWebhookConfigurationDebugName
} else {
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
}
logger := wrc.log.WithValues("name", mutatingConfig)
logger.V(4).Info("removing mutating webhook configuration")
err := wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
if errorsapi.IsNotFound(err) {
logger.Error(err, "policy mutating webhook configuration does not exist, not deleting")
} else if err != nil {
logger.Error(err, "failed to delete policy mutating webhook configuration")
} else {
logger.V(4).Info("successfully deleted policy mutating webhook configutation")
logger.V(5).Info("policy mutating webhook configuration not found")
return
}
if err != nil {
logger.Error(err, "failed to delete policy mutating webhook configuration")
return
}
logger.V(4).Info("successfully deleted policy mutating webhook configutation")
}
// delete policy validating webhookconfigurations
// handle wait group
func (wrc *WebhookRegistrationClient) removePolicyValidatingWebhookConfiguration(wg *sync.WaitGroup) {
defer wg.Done()
// Validating webhook configuration
var validatingConfig string
if wrc.serverIP != "" {
validatingConfig = config.PolicyValidatingWebhookConfigurationDebugName
} else {
validatingConfig = config.PolicyValidatingWebhookConfigurationName
}
logger := wrc.log.WithValues("name", validatingConfig)
logger.V(4).Info("removing validating webhook configuration")
err := wrc.client.DeleteResource(ValidatingWebhookConfigurationKind, "", validatingConfig, false)
if errorsapi.IsNotFound(err) {
logger.Error(err, "policy validating webhook configuration does not exist, not deleting")
} else if err != nil {
logger.Error(err, "failed to delete policy validating webhook configuration")
} else {
logger.V(4).Info("successfully deleted policy validating webhook configutation")
logger.V(5).Info("policy validating webhook configuration not found")
return
}
if err != nil {
logger.Error(err, "failed to delete policy validating webhook configuration")
return
}
logger.V(4).Info("successfully deleted policy validating webhook configutation")
}

View file

@ -12,7 +12,7 @@ import (
func (wrc *WebhookRegistrationClient) constructDebugMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
logger := wrc.log
url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.MutatingWebhookServicePath)
logger.V(4).Info("Debug MutatingWebhookConfig registed", "url", url)
logger.V(4).Info("Debug MutatingWebhookConfig registered", "url", url)
return &admregapi.MutatingWebhookConfiguration{
ObjectMeta: v1.ObjectMeta{
Name: config.MutatingWebhookConfigurationDebugName,
@ -57,7 +57,7 @@ func (wrc *WebhookRegistrationClient) constructMutatingWebhookConfig(caData []by
}
}
//GetResourceMutatingWebhookConfigName provi
//GetResourceMutatingWebhookConfigName returns the webhook configuration name
func (wrc *WebhookRegistrationClient) GetResourceMutatingWebhookConfigName() string {
if wrc.serverIP != "" {
return config.MutatingWebhookConfigurationDebugName
@ -72,14 +72,16 @@ func (wrc *WebhookRegistrationClient) RemoveResourceMutatingWebhookConfiguration
// delete webhook configuration
err := wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", configName, false)
if errors.IsNotFound(err) {
logger.Error(err, "resource does not exit")
logger.V(5).Info("webhook configuration not found")
return nil
}
if err != nil {
logger.V(4).Info("failed to delete resource")
logger.V(4).Info("failed to delete webhook configuration")
return err
}
logger.V(4).Info("deleted resource")
logger.V(4).Info("deleted webhook configuration")
return nil
}
@ -130,25 +132,30 @@ func (wrc *WebhookRegistrationClient) constructValidatingWebhookConfig(caData []
}
}
// GetResourceValidatingWebhookConfigName returns the webhook configuration name
func (wrc *WebhookRegistrationClient) GetResourceValidatingWebhookConfigName() string {
if wrc.serverIP != "" {
return config.ValidatingWebhookConfigurationDebugName
}
return config.ValidatingWebhookConfigurationName
}
// RemoveResourceValidatingWebhookConfiguration deletes an existing webhook configuration
func (wrc *WebhookRegistrationClient) RemoveResourceValidatingWebhookConfiguration() error {
configName := wrc.GetResourceValidatingWebhookConfigName()
logger := wrc.log.WithValues("kind", ValidatingWebhookConfigurationKind, "name", configName)
err := wrc.client.DeleteResource(ValidatingWebhookConfigurationKind, "", configName, false)
if errors.IsNotFound(err) {
logger.Error(err, "resource does not exist; deleted already")
logger.V(5).Info("webhook configuration not found")
return nil
}
if err != nil {
logger.Error(err, "failed to delete the resource")
logger.Error(err, "failed to delete the webhook configuration")
return err
}
logger.Info("resource deleted")
logger.Info("webhook configuration deleted")
return nil
}

View file

@ -14,11 +14,11 @@ import (
//ResourceWebhookRegister manages the resource webhook registration
type ResourceWebhookRegister struct {
// pendingCreation indicates the status of resource webhook creation
pendingCreation *abool.AtomicBool
LastReqTime *checker.LastReqTime
mwebhookconfigSynced cache.InformerSynced
vwebhookconfigSynced cache.InformerSynced
// list/get mutatingwebhookconfigurations
pendingMutateWebhookCreation *abool.AtomicBool
pendingValidateWebhookCreation *abool.AtomicBool
LastReqTime *checker.LastReqTime
mwebhookconfigSynced cache.InformerSynced
vwebhookconfigSynced cache.InformerSynced
mWebhookConfigLister mconfiglister.MutatingWebhookConfigurationLister
vWebhookConfigLister mconfiglister.ValidatingWebhookConfigurationLister
webhookRegistrationClient *WebhookRegistrationClient
@ -36,7 +36,8 @@ func NewResourceWebhookRegister(
log logr.Logger,
) *ResourceWebhookRegister {
return &ResourceWebhookRegister{
pendingCreation: abool.New(),
pendingMutateWebhookCreation: abool.New(),
pendingValidateWebhookCreation: abool.New(),
LastReqTime: lastReqTime,
mwebhookconfigSynced: mconfigwebhookinformer.Informer().HasSynced,
mWebhookConfigLister: mconfigwebhookinformer.Lister(),
@ -50,51 +51,60 @@ func NewResourceWebhookRegister(
//RegisterResourceWebhook registers a resource webhook
func (rww *ResourceWebhookRegister) RegisterResourceWebhook() {
logger := rww.log
// drop the request if creation is in processing
if rww.pendingCreation.IsSet() {
logger.V(3).Info("resource webhook configuration is in pending creation, skip the request")
timeDiff := time.Since(rww.LastReqTime.Time())
if timeDiff < checker.DefaultDeadline {
if !rww.pendingMutateWebhookCreation.IsSet() {
go rww.createMutatingWebhook()
}
if !rww.pendingValidateWebhookCreation.IsSet() {
go rww.createValidateWebhook()
}
}
}
func (rww *ResourceWebhookRegister) createMutatingWebhook() {
rww.pendingMutateWebhookCreation.Set()
defer rww.pendingMutateWebhookCreation.UnSet()
mutatingConfigName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
mutatingConfig, _ := rww.mWebhookConfigLister.Get(mutatingConfigName)
if mutatingConfig != nil {
rww.log.V(5).Info("mutating webhoook configuration exists", "name", mutatingConfigName)
} else {
err := rww.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration()
if err != nil {
rww.log.Error(err, "failed to create resource mutating webhook configuration, re-queue creation request")
rww.RegisterResourceWebhook()
return
}
rww.log.V(2).Info("created mutating webhook", "name", mutatingConfigName)
}
}
func (rww *ResourceWebhookRegister) createValidateWebhook() {
rww.pendingValidateWebhookCreation.Set()
defer rww.pendingValidateWebhookCreation.UnSet()
if rww.RunValidationInMutatingWebhook == "true" {
rww.log.V(2).Info("validation is configured to run during mutate webhook")
return
}
timeDiff := time.Since(rww.LastReqTime.Time())
if timeDiff < checker.DefaultDeadline {
logger.V(3).Info("verified webhook status, creating webhook configuration")
go func() {
mutatingConfigName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
mutatingConfig, _ := rww.mWebhookConfigLister.Get(mutatingConfigName)
if mutatingConfig != nil {
logger.V(4).Info("mutating webhoook configuration already exists")
} else {
rww.pendingCreation.Set()
err1 := rww.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration()
rww.pendingCreation.UnSet()
if err1 != nil {
logger.Error(err1, "failed to create resource mutating webhook configuration, re-queue creation request")
rww.RegisterResourceWebhook()
return
}
logger.V(3).Info("successfully created mutating webhook configuration for resources")
}
validatingConfigName := rww.webhookRegistrationClient.GetResourceValidatingWebhookConfigName()
validatingConfig, _ := rww.vWebhookConfigLister.Get(validatingConfigName)
if validatingConfig != nil {
rww.log.V(4).Info("validating webhoook configuration exists", "name", validatingConfigName)
} else {
err := rww.webhookRegistrationClient.CreateResourceValidatingWebhookConfiguration()
if err != nil {
rww.log.Error(err, "failed to create resource validating webhook configuration; re-queue creation request")
rww.RegisterResourceWebhook()
return
}
if rww.RunValidationInMutatingWebhook != "true" {
validatingConfigName := rww.webhookRegistrationClient.GetResourceValidatingWebhookConfigName()
validatingConfig, _ := rww.vWebhookConfigLister.Get(validatingConfigName)
if validatingConfig != nil {
logger.V(4).Info("validating webhoook configuration already exists")
} else {
rww.pendingCreation.Set()
err2 := rww.webhookRegistrationClient.CreateResourceValidatingWebhookConfiguration()
rww.pendingCreation.UnSet()
if err2 != nil {
logger.Error(err2, "failed to create resource validating webhook configuration; re-queue creation request")
rww.RegisterResourceWebhook()
return
}
logger.V(3).Info("successfully created validating webhook configuration for resources")
}
}
}()
rww.log.V(2).Info("created validating webhook", "name", validatingConfigName)
}
}
@ -121,7 +131,7 @@ func (rww *ResourceWebhookRegister) RemoveResourceWebhookConfiguration() error {
if err != nil {
return err
}
logger.V(3).Info("emoved mutating resource webhook configuration")
logger.V(3).Info("removed mutating resource webhook configuration")
}
if rww.RunValidationInMutatingWebhook != "true" {

View file

@ -152,7 +152,7 @@ func annotationFromPolicyResponse(policyResponse response.PolicyResponse, log lo
}
// checkPodTemplateAnn checks if a Pod has annotation "pod-policies.kyverno.io/autogen-applied"
func checkPodTemplateAnn(resource unstructured.Unstructured) bool {
func checkPodTemplateAnnotation(resource unstructured.Unstructured) bool {
if resource.GetKind() == "Pod" {
ann := resource.GetAnnotations()
if _, ok := ann[engine.PodTemplateAnnotation]; ok {

View file

@ -9,6 +9,9 @@ import (
"github.com/nirmata/kyverno/pkg/engine/response"
engineutils "github.com/nirmata/kyverno/pkg/engine/utils"
yamlv2 "gopkg.in/yaml.v2"
"k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// isResponseSuccesful return true if all responses are successful
@ -122,3 +125,45 @@ func containRBACinfo(policies []kyverno.ClusterPolicy) bool {
}
return false
}
// extracts the new and old resource as unstructured
func extractResources(newRaw []byte, request *v1beta1.AdmissionRequest) (unstructured.Unstructured, unstructured.Unstructured, error) {
var emptyResource unstructured.Unstructured
// New Resource
if newRaw == nil {
newRaw = request.Object.Raw
}
if newRaw == nil {
return emptyResource, emptyResource, fmt.Errorf("new resource is not defined")
}
new, err := convertResource(newRaw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
if err != nil {
return emptyResource, emptyResource, fmt.Errorf("failed to convert new raw to unstructured: %v", err)
}
// Old Resource - Optional
oldRaw := request.OldObject.Raw
if oldRaw == nil {
return new, emptyResource, nil
}
old, err := convertResource(oldRaw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
if err != nil {
return emptyResource, emptyResource, fmt.Errorf("failed to convert old raw to unstructured: %v", err)
}
return new, old, err
}
// convertResource converts raw bytes to an unstructured object
func convertResource(raw []byte, group, version, kind, namespace string) (unstructured.Unstructured, error) {
obj, err := engineutils.ConvertToUnstructured(raw)
if err != nil {
return unstructured.Unstructured{}, fmt.Errorf("failed to convert raw to unstructured: %v", err)
}
obj.SetGroupVersionKind(schema.GroupVersionKind{Group: group, Version: version, Kind: kind})
obj.SetNamespace(namespace)
return *obj, nil
}

View file

@ -18,9 +18,19 @@ import (
// HandleMutation handles mutating webhook admission request
// return value: generated patches
func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resource unstructured.Unstructured, policies []kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo) []byte {
logger := ws.log.WithValues("action", "mutation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
logger.V(4).Info("incoming request")
func (ws *WebhookServer) HandleMutation(
request *v1beta1.AdmissionRequest,
resource unstructured.Unstructured,
policies []kyverno.ClusterPolicy,
ctx *context.Context,
userRequestInfo kyverno.RequestInfo) []byte {
resourceName := request.Kind.Kind + "/" + request.Name
if request.Namespace != "" {
resourceName = request.Namespace + "/" + resourceName
}
logger := ws.log.WithValues("action", "mutate", "resource", resourceName, "operation", request.Operation)
var patches [][]byte
var engineResponses []response.EngineResponse
@ -31,24 +41,32 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
}
for _, policy := range policies {
logger.V(2).Info("evaluating policy", "policy", policy.Name)
logger.V(3).Info("evaluating policy", "policy", policy.Name)
policyContext.Policy = policy
engineResponse := engine.Mutate(policyContext)
if engineResponse.PolicyResponse.RulesAppliedCount <= 0 {
continue
}
engineResponses = append(engineResponses, engineResponse)
ws.statusListener.Send(mutateStats{resp: engineResponse})
if !engineResponse.IsSuccesful() {
logger.V(4).Info("failed to apply policy", "policy", policy.Name)
logger.Info("failed to apply policy", "policy", policy.Name)
continue
}
err := ws.openAPIController.ValidateResource(*engineResponse.PatchedResource.DeepCopy(), engineResponse.PatchedResource.GetKind())
if err != nil {
logger.Error(err, "failed to validate resource")
logger.Error(err, "validation error", "policy", policy.Name)
continue
}
// gather patches
patches = append(patches, engineResponse.GetPatches()...)
logger.Info("mutation rules from policy applied succesfully", "policy", policy.Name)
if len(engineResponse.GetPatches()) != 0 {
logger.Info("mutation rules from policy applied succesfully", "policy", policy.Name)
}
policyContext.NewResource = engineResponse.PatchedResource
}
@ -62,6 +80,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
// generate violation when response fails
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
ws.pvGenerator.Add(pvInfos...)
// REPORTING EVENTS
// Scenario 1:
// some/all policies failed to apply on the resource. a policy volation is generated.

View file

@ -81,7 +81,7 @@ func generateJSONPatchesForDefaults(policy *kyverno.ClusterPolicy, log logr.Logg
}
func defaultBackgroundFlag(policy *kyverno.ClusterPolicy, log logr.Logger) ([]byte, string) {
// default 'Background' flag to 'true' if not specified
// set 'Background' flag to 'true' if not specified
defaultVal := true
if policy.Spec.Background == nil {
log.V(4).Info("setting default value", "spec.background", true)
@ -94,19 +94,22 @@ func defaultBackgroundFlag(policy *kyverno.ClusterPolicy, log logr.Logger) ([]by
"add",
&defaultVal,
}
patchByte, err := json.Marshal(jsonPatch)
if err != nil {
log.Error(err, "failed to set default value", "spec.background", true)
return nil, ""
}
log.Info("generated JSON Patch to set default", "spec.background", true)
log.V(3).Info("generated JSON Patch to set default", "spec.background", true)
return patchByte, fmt.Sprintf("default 'Background' to '%s'", strconv.FormatBool(true))
}
return nil, ""
}
func defaultvalidationFailureAction(policy *kyverno.ClusterPolicy, log logr.Logger) ([]byte, string) {
// default ValidationFailureAction to "audit" if not specified
// set ValidationFailureAction to "audit" if not specified
if policy.Spec.ValidationFailureAction == "" {
log.V(4).Info("setting defautl value", "spec.validationFailureAction", Audit)
jsonPatch := struct {
@ -116,16 +119,19 @@ func defaultvalidationFailureAction(policy *kyverno.ClusterPolicy, log logr.Logg
}{
"/spec/validationFailureAction",
"add",
Audit, //audit
Audit,
}
patchByte, err := json.Marshal(jsonPatch)
if err != nil {
log.Error(err, "failed to default value", "spec.validationFailureAction", Audit)
return nil, ""
}
log.Info("generated JSON Patch to set default", "spec.validationFailureAction", Audit)
log.V(3).Info("generated JSON Patch to set default", "spec.validationFailureAction", Audit)
return patchByte, fmt.Sprintf("default 'ValidationFailureAction' to '%s'", Audit)
}
return nil, ""
}
@ -145,7 +151,7 @@ func generatePodControllerRule(policy kyverno.ClusterPolicy, log logr.Logger) (p
// scenario A
if !ok {
controllers = "all"
controllers = "DaemonSet,Deployment,Job,StatefulSet"
annPatch, err := defaultPodControllerAnnotation(ann)
if err != nil {
errs = append(errs, fmt.Errorf("failed to generate pod controller annotation for policy '%s': %v", policy.Name, err))
@ -158,6 +164,7 @@ func generatePodControllerRule(policy kyverno.ClusterPolicy, log logr.Logger) (p
if controllers == "none" {
return nil, nil
}
log.V(3).Info("auto generating rule for pod controllers", "controlers", controllers)
p, err := generateRulePatches(policy, controllers, log)
@ -197,6 +204,7 @@ func createRuleMap(rules []kyverno.Rule) map[string]kyvernoRule {
// generateRulePatches generates rule for podControllers based on scenario A and C
func generateRulePatches(policy kyverno.ClusterPolicy, controllers string, log logr.Logger) (rulePatches [][]byte, errs []error) {
var genRule kyvernoRule
insertIdx := len(policy.Spec.Rules)
ruleMap := createRuleMap(policy.Spec.Rules)
@ -286,14 +294,34 @@ func generateRuleForControllers(rule kyverno.Rule, controllers string, log logr.
return kyvernoRule{}
}
// scenario A
// Support backword compatibility
skipAutoGeneration := false
var controllersValidated []string
if controllers == "all" {
skipAutoGeneration = true
} else if controllers != "none" && controllers != "all" {
controllersList := map[string]int{"DaemonSet": 1, "Deployment": 1, "Job": 1, "StatefulSet": 1}
for _, value := range strings.Split(controllers, ",") {
if _, ok := controllersList[value]; ok {
controllersValidated = append(controllersValidated, value)
}
}
if len(controllersValidated) > 0 {
skipAutoGeneration = true
}
}
if skipAutoGeneration {
if match.ResourceDescription.Name != "" || match.ResourceDescription.Selector != nil ||
exclude.ResourceDescription.Name != "" || exclude.ResourceDescription.Selector != nil {
log.Info("skip generating rule on pod controllers: Name / Selector in resource decription may not be applicable.", "rule", rule.Name)
return kyvernoRule{}
}
controllers = engine.PodControllers
if controllers == "all" {
controllers = engine.PodControllers
} else {
controllers = strings.Join(controllersValidated, ",")
}
}
controllerRule := &kyvernoRule{
@ -361,7 +389,7 @@ func generateRuleForControllers(rule kyverno.Rule, controllers string, log logr.
func defaultPodControllerAnnotation(ann map[string]string) ([]byte, error) {
if ann == nil {
ann = make(map[string]string)
ann[engine.PodControllersAnnotation] = "all"
ann[engine.PodControllersAnnotation] = "DaemonSet,Deployment,Job,StatefulSet"
jsonPatch := struct {
Path string `json:"path"`
Op string `json:"op"`
@ -386,7 +414,7 @@ func defaultPodControllerAnnotation(ann map[string]string) ([]byte, error) {
}{
"/metadata/annotations/pod-policies.kyverno.io~1autogen-controllers",
"add",
"all",
"DaemonSet,Deployment,Job,StatefulSet",
}
patchByte, err := json.Marshal(jsonPatch)

View file

@ -41,7 +41,7 @@ func TestGeneratePodControllerRule_NilAnnotation(t *testing.T) {
"metadata": {
"name": "add-safe-to-evict",
"annotations": {
"pod-policies.kyverno.io/autogen-controllers": "all"
"pod-policies.kyverno.io/autogen-controllers": "DaemonSet,Deployment,Job,StatefulSet"
}
}
}`)
@ -274,7 +274,7 @@ func TestGeneratePodControllerRule_ExistOtherAnnotation(t *testing.T) {
"metadata": {
"name": "add-safe-to-evict",
"annotations": {
"pod-policies.kyverno.io/autogen-controllers": "all",
"pod-policies.kyverno.io/autogen-controllers": "DaemonSet,Deployment,Job,StatefulSet",
"test": "annotation"
}
}
@ -483,7 +483,7 @@ func TestGeneratePodControllerRule_ValidatePattern(t *testing.T) {
"kind": "ClusterPolicy",
"metadata": {
"annotations": {
"pod-policies.kyverno.io/autogen-controllers": "all"
"pod-policies.kyverno.io/autogen-controllers": "DaemonSet,Deployment,Job,StatefulSet"
},
"name": "add-safe-to-evict"
},

View file

@ -42,7 +42,6 @@ import (
)
// WebhookServer contains configured TLS server with MutationWebhook.
// MutationWebhook gets policies from policyController and takes control of the cluster with kubeclient.
type WebhookServer struct {
server http.Server
client *client.Client
@ -155,15 +154,16 @@ func NewWebhookServer(
}
func (ws *WebhookServer) handlerFunc(handler func(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse, filter bool) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
return func(rw http.ResponseWriter, r *http.Request) {
startTime := time.Now()
// for every request received on the ep update last request time,
// this is used to verify admission control
ws.lastReqTime.SetTime(time.Now())
admissionReview := ws.bodyToAdmissionReview(r, w)
ws.lastReqTime.SetTime(startTime)
admissionReview := ws.bodyToAdmissionReview(r, rw)
if admissionReview == nil {
ws.log.Info("failed to parse admission review request", "request", r)
return
}
logger := ws.log.WithValues("kind", admissionReview.Request.Kind, "namespace", admissionReview.Request.Namespace, "name", admissionReview.Request.Name)
defer func() {
logger.V(4).Info("request processed", "processingTime", time.Since(startTime))
@ -171,36 +171,37 @@ func (ws *WebhookServer) handlerFunc(handler func(request *v1beta1.AdmissionRequ
admissionReview.Response = &v1beta1.AdmissionResponse{
Allowed: true,
UID: admissionReview.Request.UID,
}
// Do not process the admission requests for kinds that are in filterKinds for filtering
request := admissionReview.Request
if filter {
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
admissionReview.Response = handler(request)
}
} else {
admissionReview.Response = handler(request)
}
admissionReview.Response.UID = request.UID
responseJSON, err := json.Marshal(admissionReview)
if err != nil {
http.Error(w, fmt.Sprintf("Could not encode response: %v", err), http.StatusInternalServerError)
if filter && ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
writeResponse(rw, admissionReview)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if _, err := w.Write(responseJSON); err != nil {
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
}
admissionReview.Response = handler(request)
writeResponse(rw, admissionReview)
return
}
}
func writeResponse(rw http.ResponseWriter, admissionReview *v1beta1.AdmissionReview) {
responseJSON, err := json.Marshal(admissionReview)
if err != nil {
http.Error(rw, fmt.Sprintf("Could not encode response: %v", err), http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
if _, err := rw.Write(responseJSON); err != nil {
http.Error(rw, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
}
}
func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
logger := ws.log.WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
logger := ws.log.WithName("resourceMutation").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
policies, err := ws.pMetaStore.ListAll()
if err != nil {
// Unable to connect to policy Lister to access policies
@ -232,7 +233,7 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
}
}
if checkPodTemplateAnn(resource) {
if checkPodTemplateAnnotation(resource) {
return &v1beta1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
@ -315,7 +316,7 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
}
func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
logger := ws.log.WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
logger := ws.log.WithName("resourceValidation").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
policies, err := ws.pMetaStore.ListAll()
if err != nil {
// Unable to connect to policy Lister to access policies
@ -355,6 +356,28 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
logger.Error(err, "failed to load service account in context")
}
resource, err := convertResource(request.Object.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
if err != nil {
logger.Error(err, "failed to convert RAW resource to unstructured format")
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: "Failure",
Message: err.Error(),
},
}
}
if checkPodTemplateAnnotation(resource) {
return &v1beta1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Status: "Success",
},
}
}
// VALIDATION
ok, msg := ws.HandleValidation(request, policies, nil, ctx, userRequestInfo)
if !ok {
@ -390,6 +413,7 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
}
}(ws)
logger.Info("starting")
// verifys if the admission control is enabled and active
// resync: 60 seconds
// deadline: 60 seconds (send request)

View file

@ -19,9 +19,19 @@ import (
// HandleValidation handles validating webhook admission request
// If there are no errors in validating rule we apply generation rules
// patchedResource is the (resource + patches) after applying mutation rules
func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, policies []kyverno.ClusterPolicy, patchedResource []byte, ctx *context.Context, userRequestInfo kyverno.RequestInfo) (bool, string) {
logger := ws.log.WithValues("action", "validation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
logger.V(4).Info("incoming request")
func (ws *WebhookServer) HandleValidation(
request *v1beta1.AdmissionRequest,
policies []kyverno.ClusterPolicy,
patchedResource []byte,
ctx *context.Context,
userRequestInfo kyverno.RequestInfo) (bool, string) {
resourceName := request.Kind.Kind + "/" + request.Name
if request.Namespace != "" {
resourceName = request.Namespace + "/" + resourceName
}
logger := ws.log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation)
if val, err := ctx.Query("request.object.metadata.deletionTimestamp"); val != nil && err == nil {
return true, ""
@ -43,7 +53,7 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
}
var engineResponses []response.EngineResponse
for _, policy := range policies {
logger.V(2).Info("evaluating policy", "policy", policy.Name)
logger.V(3).Info("evaluating policy", "policy", policy.Name)
policyContext.Policy = policy
engineResponse := engine.Validate(policyContext)
if reflect.DeepEqual(engineResponse, response.EngineResponse{}) {
@ -59,6 +69,8 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
logger.V(4).Info("failed to apply policy", "policy", policy.Name)
continue
}
logger.Info("valiadtion rules from policy applied succesfully", "policy", policy.Name)
}
// If Validation fails then reject the request
// no violations will be created on "enforce"