diff --git a/README.md b/README.md
index 632ac310f2..dee9298ba0 100644
--- a/README.md
+++ b/README.md
@@ -32,6 +32,8 @@ kind: ClusterPolicy
metadata:
name: check-cpu-memory
spec:
+ # `enforce` blocks the request. `audit` reports violations
+ validationFailureAction: enforce
rules:
- name: check-pod-resources
match:
@@ -71,17 +73,15 @@ spec:
match:
resources:
kinds:
- - Deployment
+ - Pod
mutate:
overlay:
spec:
- template:
- spec:
- containers:
- # match images which end with :latest
- - (image): "*:latest"
- # set the imagePullPolicy to "Always"
- imagePullPolicy: "Always"
+ containers:
+ # match images which end with :latest
+ - (image): "*:latest"
+ # set the imagePullPolicy to "Always"
+ imagePullPolicy: "Always"
````
### 3. Generating resources
@@ -100,13 +100,10 @@ spec:
resources:
kinds:
- Namespace
- selector:
- matchExpressions:
- - {key: kafka, operator: Exists}
generate:
kind: ConfigMap
name: zk-kafka-address
- # create the resource in the new namespace
+ # generate the resource in the new namespace
namespace: "{{request.object.metadata.name}}"
data:
kind: ConfigMap
@@ -123,12 +120,15 @@ Refer to a list of curated of ***[sample policies](/samples/README.md)*** that c
* [Getting Started](documentation/installation.md)
* [Writing Policies](documentation/writing-policies.md)
- * [Mutate](documentation/writing-policies-mutate.md)
- * [Validate](documentation/writing-policies-validate.md)
- * [Generate](documentation/writing-policies-generate.md)
+ * [Validate Resources](documentation/writing-policies-validate.md)
+ * [Mutate Resources](documentation/writing-policies-mutate.md)
+ * [Generate Resources](documentation/writing-policies-generate.md)
+ * [Variable Substitution](documentation/writing-policies-variables.md)
+ * [Preconditions](documentation/writing-policies-preconditions.md)
+ * [Auto-Generation of Pod Controller Policies](documentation/writing-policies-autogen.md)
+ * [Background Processing](documentation/writing-policies-background.md)
* [Testing Policies](documentation/testing-policies.md)
- * [Using kubectl](documentation/testing-policies.md#Test-using-kubectl)
- * [Using the Kyverno CLI](documentation/testing-policies.md#Test-using-the-Kyverno-CLI)
+* [Policy Violations](documentation/policy-violations.md)
* [Sample Policies](/samples/README.md)
## License
diff --git a/cmd/initContainer/main.go b/cmd/initContainer/main.go
index b241b38c85..eaf642e1e0 100644
--- a/cmd/initContainer/main.go
+++ b/cmd/initContainer/main.go
@@ -46,6 +46,8 @@ func main() {
requests := []request{
// Resource
+ {validatingWebhookConfigKind, config.ValidatingWebhookConfigurationName},
+ {validatingWebhookConfigKind, config.ValidatingWebhookConfigurationDebugName},
{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName},
{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName},
// Policy
diff --git a/cmd/kyverno/main.go b/cmd/kyverno/main.go
index 9a44b28eb1..62cefceeb9 100644
--- a/cmd/kyverno/main.go
+++ b/cmd/kyverno/main.go
@@ -27,9 +27,10 @@ import (
)
var (
- kubeconfig string
- serverIP string
- webhookTimeout int
+ kubeconfig string
+ serverIP string
+ webhookTimeout int
+ runValidationInMutatingWebhook string
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
filterK8Resources string
@@ -40,7 +41,6 @@ var (
func main() {
defer glog.Flush()
version.PrintVersionInfo()
-
// cleanUp Channel
cleanUp := make(chan struct{})
// handle os signals
@@ -103,7 +103,9 @@ func main() {
rWebhookWatcher := webhookconfig.NewResourceWebhookRegister(
lastReqTime,
kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(),
+ kubeInformer.Admissionregistration().V1beta1().ValidatingWebhookConfigurations(),
webhookRegistrationClient,
+ runValidationInMutatingWebhook,
)
// KYVERNO CRD INFORMER
@@ -265,6 +267,8 @@ func init() {
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
+ flag.StringVar(&runValidationInMutatingWebhook, "runValidationInMutatingWebhook", "", "Validation will also be done using the mutation webhook, set to 'true' to enable. Older kubernetes versions do not work properly when a validation webhook is registered.")
+
// Generate CSR with CN as FQDN due to https://github.com/nirmata/kyverno/issues/542
flag.BoolVar(&fqdncn, "fqdn-as-cn", false, "use FQDN as Common Name in CSR")
config.LogDefaultFlags()
diff --git a/definitions/install.yaml b/definitions/install.yaml
index ed7f70d9bd..1fb0d61762 100644
--- a/definitions/install.yaml
+++ b/definitions/install.yaml
@@ -71,8 +71,7 @@ spec:
type: string
resources:
type: object
- required:
- - kinds
+ minProperties: 1
properties:
kinds:
type: array
diff --git a/definitions/install_debug.yaml b/definitions/install_debug.yaml
index 8bbebcf6d9..cba8662c40 100644
--- a/definitions/install_debug.yaml
+++ b/definitions/install_debug.yaml
@@ -71,8 +71,7 @@ spec:
type: string
resources:
type: object
- required:
- - kinds
+ minProperties: 1
properties:
kinds:
type: array
diff --git a/documentation/installation.md b/documentation/installation.md
index 702c6bc94d..b81f645ccb 100644
--- a/documentation/installation.md
+++ b/documentation/installation.md
@@ -10,7 +10,7 @@ There are 2 ways to configure the secure communications link between Kyverno and
Kyverno can request a CA signed certificate-key pair from `kube-controller-manager`. This method requires that the kube-controller-manager is configured to act as a certificate signer. To verify that this option is enabled for your cluster, check the command-line args for the kube-controller-manager. If `--cluster-signing-cert-file` and `--cluster-signing-key-file` are passed to the controller manager with paths to your CA's key-pair, then you can proceed to install Kyverno using this method.
-**Deploying on EKS requires enabling a command-line argument `--fqdncn` in the 'kyverno' container in the deployment, due to a current limitation with the certificates returned by EKS for CSR(bug: https://github.com/awslabs/amazon-eks-ami/issues/341)**
+**Deploying on EKS requires enabling a command-line argument `--fqdn-as-cn` in the 'kyverno' container in the deployment, due to a current limitation with the certificates returned by EKS for CSR(bug: https://github.com/awslabs/amazon-eks-ami/issues/341)**
To install Kyverno in a cluster that supports certificate signing, run the following command on a host with kubectl `cluster-admin` access:
@@ -18,6 +18,8 @@ To install Kyverno in a cluster that supports certificate signing, run the follo
kubectl create -f https://github.com/nirmata/kyverno/raw/master/definitions/install.yaml
````
+Note that the above command will install the last released (stable) version of Kyverno. If you want to install the latest version, you can edit the `install.yaml` and update the image tag.
+
To check the Kyverno controller status, run the command:
````sh
diff --git a/documentation/policy-violations.md b/documentation/policy-violations.md
new file mode 100644
index 0000000000..8d036944cb
--- /dev/null
+++ b/documentation/policy-violations.md
@@ -0,0 +1,28 @@
+*[documentation](/README.md#documentation) / Policy Violations*
+
+# Policy Violations
+
+Policy Violations are created to:
+1. Report resources that do not comply with validation rules with `validationFailureAction` set to `audit`.
+2. Report existing resources (i.e. resources created before the policy was created) that do not comply with validation or mutation rules.
+
+Policy Violation objects are created in the resource namespace. Policy Violation resources are automatically removed when the resource is updated to comply with the policy rule, or when the policy rule is deleted.
+
+You can view all existing policy violations as shown below:
+
+````
+λ kubectl get polv --all-namespaces
+NAMESPACE NAME POLICY RESOURCEKIND RESOURCENAME AGE
+default disallow-root-user-56j4t disallow-root-user Deployment nginx-deployment 5m7s
+default validation-example2-7snmh validation-example2 Deployment nginx-deployment 5m7s
+docker disallow-root-user-2kl4m disallow-root-user Pod compose-api-dbbf7c5db-kpnvk 43m
+docker disallow-root-user-hfxzn disallow-root-user Pod compose-7b7c5cbbcc-xj8f6 43m
+docker disallow-root-user-s5rjp disallow-root-user Deployment compose 43m
+docker disallow-root-user-w58kp disallow-root-user Deployment compose-api 43m
+docker validation-example2-dgj9j validation-example2 Deployment compose 5m28s
+docker validation-example2-gzfdf validation-example2 Deployment compose-api 5m27s
+````
+
+# Cluster Policy Violations
+
+Cluster Policy Violations are like Policy Violations but created for cluster-wide resources.
diff --git a/documentation/testing-policies.md b/documentation/testing-policies.md
index a877430846..b8fa8c62b8 100644
--- a/documentation/testing-policies.md
+++ b/documentation/testing-policies.md
@@ -2,9 +2,11 @@
# Testing Policies
+
The resources definitions for testing are located in [/test](/test) directory. Each test contains a pair of files: one is the resource definition, and the second is the kyverno policy for this definition.
## Test using kubectl
+
To do this you should [install kyverno to the cluster](/documentation/installation.md).
For example, to test the simplest kyverno policy for ConfigMap, create the policy and then the resource itself via kubectl:
@@ -20,51 +22,4 @@ Then compare the original resource definition in CM.yaml with the actual one:
kubectl get -f CM.yaml -o yaml
````
-## Test using the Kyverno CLI
-
-The Kyverno Command Line Interface (CLI) tool allows writing and testing policies without having to apply local policy changes to a cluster. You can also test policies without a Kubernetes clusters, but results may vary as default values will not be filled in.
-
-### Building the CLI
-
-You will need a [Go environment](https://golang.org/doc/install) setup.
-
-1. Clone the Kyverno repo
-
-````bash
-git clone https://github.com/nirmata/kyverno/
-````
-
-2. Build the CLI
-
-````bash
-cd kyverno/cmd/kyverno
-go build
-````
-
-Or, you can directly build and install the CLI using `go get`:
-
-````bash
-go get -u https://github.com/nirmata/kyverno/cmd/kyverno
-````
-
-### Using the CLI
-
-The CLI loads default kubeconfig ($HOME/.kube/config) to test policies in Kubernetes cluster. If no kubeconfig is found, the CLI will test policies on raw resources.
-
-To test a policy using the CLI type:
-
-`kyverno apply @ @`
-
-For example:
-
-```bash
-kyverno apply @../../examples/cli/policy-deployment.yaml @../../examples/cli/resources
-```
-
-To test a policy with the specific kubeconfig:
-
-```bash
-kyverno apply @../../examples/cli/policy-deployment.yaml @../../examples/cli/resources --kubeconfig $PATH_TO_KUBECONFIG_FILE
-```
-
-In future releases, the CLI will support complete validation and generation of policies.
+*Read Next >> [Policy Violations](/documentation/policy-violations.md)*
diff --git a/documentation/writing-policies-autogen.md b/documentation/writing-policies-autogen.md
new file mode 100644
index 0000000000..97a6c922c6
--- /dev/null
+++ b/documentation/writing-policies-autogen.md
@@ -0,0 +1,20 @@
+*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Auto-Generation for Pod Controllers*
+
+# Auto Generating Rules for Pod Controllers
+
+Writing policies on pods helps address all pod creation flows. However, when pod cotrollers are used pod level policies result in errors not being reported when the pod controller object is created.
+
+Kyverno solves this issue by supporting automatic generation of policy rules for pod controllers from a rule written for a pod.
+
+This auto-generation behavior is controlled by the `pod-policies.kyverno.io/autogen-controllers` annotation.
+
+By default, Kyverno inserts an annotation `pod-policies.kyverno.io/autogen-controllers=all`, to generate an additional rule that is applied to pod controllers: DaemonSet, Deployment, Job, StatefulSet.
+
+You can change the annotation `pod-policies.kyverno.io/autogen-controllers` to customize the target pod controllers for the auto-generated rules. For example, Kyverno generates a rule for a `Deployment` if the annotation of policy is defined as `pod-policies.kyverno.io/autogen-controllers=Deployment`.
+
+When a `name` or `labelSelector` is specified in the match / exclude block, Kyverno skips generating pod controllers rule as these filters may not be applicable to pod controllers.
+
+To disable auto-generating rules for pod controllers set `pod-policies.kyverno.io/autogen-controllers` to the value `none`.
+
+*Read Next >> [Background Processing](/documentation/writing-policies-background.md)*
+
diff --git a/documentation/writing-policies-background.md b/documentation/writing-policies-background.md
new file mode 100644
index 0000000000..b2e3046e11
--- /dev/null
+++ b/documentation/writing-policies-background.md
@@ -0,0 +1,20 @@
+*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Background Processing*
+
+# Background processing
+
+Kyverno applies policies during admission control and to existing resources in the cluster that may have been created before a policy was created. The application of policies to existing resources is referred to as `background` processing.
+
+Note, that Kyverno does not mutate existing resources, and will only report policy violation for existing resources that do not match mutation, validation, or generation rules.
+
+A policy is always enabled for processing during admission control. However, policy rules that rely on request information (e.g. `{{request.userInfo}}`) cannot be applied to existing resource in the `background` mode as the user information is not available outside of the admission controller. Hence, these rules must use the boolean flag `{spec.background}` to disable `background` processing.
+
+```
+spec:
+ background: true
+ rules:
+ - name: default-deny-ingress
+```
+
+The default value of `background` is `true`. When a policy is created or modified, the policy validation logic will report an error if a rule uses `userInfo` and does not set `background` to `false`.
+
+*Read Next >> [Testing Policies](/documentation/testing-policies.md)*
diff --git a/documentation/writing-policies-generate.md b/documentation/writing-policies-generate.md
index 634d69bd6c..e4dcda2b1e 100644
--- a/documentation/writing-policies-generate.md
+++ b/documentation/writing-policies-generate.md
@@ -1,12 +1,15 @@
-*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Generate*
+*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Generate Resources*
-# Generate Configurations
+# Generate Resources
-```generate``` is used to create additional resources when a resource is created. This is useful to create supporting resources, such as role bindings for a new namespace.
+The ```generate``` rule can used to create additional resources when a new resource is created. This is useful to create supporting resources, such as new role bindings for a new namespace.
+
+The `generate` rule supports `match` and `exclude` blocks, like other rules. Hence, the trigger for applying this rule can be the creation of any resource and its possible to match or exclude API requests based on subjects, roles, etc.
+
+Currently, the generate rule only triggers during an API request and does not support [background processing](/documentation/writing-policies-background.md). Keeping resources synchhronized is planned for a future release (see https://github.com/nirmata/kyverno/issues/560).
## Example 1
-- rule
-Creates a ConfigMap with name `default-config` for all
+
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
@@ -19,28 +22,22 @@ spec:
resources:
kinds:
- Namespace
- selector:
- matchLabels:
- LabelForSelector : "namespace2"
generate:
kind: ConfigMap # Kind of resource
name: default-config # Name of the new Resource
- namespace: "{{request.object.metadata.name}}" # Create in the namespace that triggers this rule
+ namespace: "{{request.object.metadata.name}}" # namespace that triggers this rule
clone:
namespace: default
name: config-template
- - name: "Generate Secret"
+ - name: "Generate Secret (insecure)"
match:
resources:
kinds:
- Namespace
- selector:
- matchLabels:
- LabelForSelector : "namespace2"
generate:
kind: Secret
name: mongo-creds
- namespace: "{{request.object.metadata.name}}" # Create in the namespace that triggers this rule
+ namespace: "{{request.object.metadata.name}}" # namespace that triggers this rule
data:
data:
DB_USER: YWJyYWthZGFicmE=
@@ -50,9 +47,9 @@ spec:
purpose: mongo
````
-In this example, when this policy is applied, any new namespace that satisfies the label selector will receive 2 new resources after its creation:
- * ConfigMap copied from default/config-template.
- * Secret with values DB_USER and DB_PASSWORD, and label ```purpose: mongo```.
+In this example new namespaces will receive 2 new resources after its creation:
+ * A ConfigMap cloned from default/config-template.
+ * A Secret with values DB_USER and DB_PASSWORD, and label ```purpose: mongo```.
## Example 2
@@ -72,20 +69,21 @@ spec:
generate:
kind: NetworkPolicy
name: deny-all-traffic
- namespace: "{{request.object.metadata.name}}" # Create in the namespace that triggers this rule
+ namespace: "{{request.object.metadata.name}}" # namespace that triggers this rule
data:
spec:
- podSelector:
- matchLabels: {}
- matchExpressions: []
- policyTypes: []
+ # select all pods in the namespace
+ podSelector: {}
+ policyTypes:
+ - Ingress
metadata:
labels:
policyname: "default"
````
-In this example, when the policy is applied, any new namespace will receive a NetworkPolicy based on the specified template that by default denies all inbound and outbound traffic.
+In this example new namespaces will receive a NetworkPolicy that default denies all inbound and outbound traffic.
---
-*Read Next >> [Testing Policies](/documentation/testing-policies.md)*
+
+*Read Next >> [Variables](/documentation/writing-policies-variables.md)*
diff --git a/documentation/writing-policies-mutate.md b/documentation/writing-policies-mutate.md
index 7cf5015897..19110511e3 100644
--- a/documentation/writing-policies-mutate.md
+++ b/documentation/writing-policies-mutate.md
@@ -1,8 +1,8 @@
-*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Mutate*
+*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Mutate Resources*
-# Mutate Configurations
+# Mutate Resources
-The ```mutate``` rule contains actions that will be applied to matching resources. A mutate rule can be written as a JSON Patch or as an overlay.
+The ```mutate``` rule can be used to add, replace, or delete elements in matching resources. A mutate rule can be written as a JSON Patch or as an overlay.
By using a ```patch``` in the (JSONPatch - RFC 6902)[http://jsonpatch.com/] format, you can make precise changes to the resource being created. Using an ```overlay``` is convenient for describing the desired state of the resource.
@@ -35,13 +35,13 @@ spec :
kinds:
- Deployment
mutate:
- patches:
- - path: "/spec/template/spec/initContainers/0/"
- op: add
- value:
- - image: "nirmata.io/kube-vault-client:v2"
- name: "init-secrets"
-
+ overlay:
+ spec:
+ template:
+ spec:
+ initContainers:
+ - name: init-secrets
+ image: nirmata.io/kube-vault-client:v2
````
Here is the example of a patch that removes a label from the secret:
@@ -178,25 +178,30 @@ A variation of an anchor, is to add a field value if it is not already defined.
An `add anchor` is processed as part of applying the mutation. Typically, every non-anchor tag-value is applied as part of the mutation. If the `add anchor` is set on a tag, the tag and value are only applied if they do not exist in the resource.
-For example, this overlay will set the port to 6443, if a port is not already defined:
+For example, this policy matches and mutates pods with `emptyDir` volume, to add the `safe-to-evict` annotation if it is not specified.
````yaml
apiVersion: kyverno.io/v1
-kind : ClusterPolicy
-metadata :
- name : policy-set-port
-spec :
- rules:
- - name: "Set port"
- match:
- resources:
- kinds :
- - Endpoints
- mutate:
+kind: ClusterPolicy
+metadata:
+ name: add-safe-to-evict
+ annotations:
+ pod-policies.kyverno.io/autogen-controllers: none
+spec:
+ rules:
+ - name: "annotate-empty-dir"
+ match:
+ resources:
+ kinds:
+ - Pod
+ mutate:
overlay:
- subsets:
- - (ports):
- +(port): 6443
+ metadata:
+ annotations:
+ +(cluster-autoscaler.kubernetes.io/safe-to-evict): true
+ spec:
+ volumes:
+ - (emptyDir): {}
````
#### Anchor processing flow
@@ -213,4 +218,4 @@ The anchor processing behavior for mutate conditions is as follows:
Additional details on mutation overlay behaviors are available on the wiki: [Mutation Overlay](https://github.com/nirmata/kyverno/wiki/Mutation-Overlay)
---
-*Read Next >> [Generate](/documentation/writing-policies-generate.md)*
+*Read Next >> [Generate Resources](/documentation/writing-policies-generate.md)*
diff --git a/documentation/writing-policies-preconditions.md b/documentation/writing-policies-preconditions.md
new file mode 100644
index 0000000000..39da70e482
--- /dev/null
+++ b/documentation/writing-policies-preconditions.md
@@ -0,0 +1,30 @@
+*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Preconditions*
+
+# Preconditions
+
+Preconditions allow controlling policy rule execution based on variable values.
+
+While `match` & `exclude` conditions allow filtering requests based on resource and user information, `preconditions` can be used to define custom filters for more granular control.
+
+The following operators are currently supported for preconditon evaluation:
+- Equal
+- NotEqual
+
+## Example
+
+```yaml
+ - name: generate-owner-role
+ match:
+ resources:
+ kinds:
+ - Namespace
+ preconditions:
+ - key: "{{serviceAccountName}}"
+ operator: NotEqual
+ value: ""
+```
+
+In the above example, the rule is only applied to requests from service accounts i.e. when the `{{serviceAccountName}}` is not empty.
+
+
+*Read Next >> [Auto-Generation for Pod Controllers](/documentation/writing-policies-autogen.md)*
diff --git a/documentation/writing-policies-validate.md b/documentation/writing-policies-validate.md
index 4a98b21d29..6b628ed0bd 100644
--- a/documentation/writing-policies-validate.md
+++ b/documentation/writing-policies-validate.md
@@ -1,7 +1,7 @@
-*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Validate*
+*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Validate Resources*
-# Validate Configurations
+# Validate Resources
A validation rule is expressed as an overlay pattern that expresses the desired configuration. Resource configurations must match fields and expressions defined in the pattern to pass the validation rule. The following rules are followed when processing the overlay pattern:
@@ -134,17 +134,17 @@ spec :
- Deployment
# Name is optional and can use wildcards
name: "*"
- # Selector is optional
- selector:
validate:
pattern:
spec:
- ^(containers):
- resources:
- requests:
- memory: "$(<=./../../limits/memory)"
- limits:
- memory: "2048Mi"
+ template:
+ spec:
+ ^(containers):
+ - resources:
+ requests:
+ memory: "$(<=./../../limits/memory)"
+ limits:
+ memory: "2048Mi"
````
### Logical OR across validation patterns
@@ -191,4 +191,4 @@ Additional examples are available in [samples](/samples/README.md)
The `validationFailureAction` attribute controls processing behaviors when the resource is not compliant with the policy. If the value is set to `enforce` resource creation or updates are blocked when the resource does not comply, and when the value is set to `audit` a policy violation is reported but the resource creation or update is allowed.
---
-*Read Next >> [Generate](/documentation/writing-policies-mutate.md)*
+*Read Next >> [Mutate Resources](/documentation/writing-policies-mutate.md)*
diff --git a/documentation/writing-policies-variables.md b/documentation/writing-policies-variables.md
new file mode 100644
index 0000000000..a0371e25b4
--- /dev/null
+++ b/documentation/writing-policies-variables.md
@@ -0,0 +1,35 @@
+*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Variables*
+
+# Variables
+
+Sometimes it is necessary to vary the contents of a mutated or generated resource based on request data. To achieve this, variables can be used to reference attributes that are loaded in the rule processing context using a [JMESPATH](http://jmespath.org/) notation.
+
+The policy engine will substitute any values with the format `{{}}` with the variable value before processing the rule.
+
+The following data is available for use in context:
+- Resource: `{{request.object}}`
+- UserInfo: `{{request.userInfo}}`
+
+## Pre-defined Variables
+
+Kyverno automatically creates a few useful variables:
+
+- `serviceAccountName` : the last part of a service account i.e. without the suffix `system:serviceaccount::` and stores the userName. For example, when processing a request from `system:serviceaccount:nirmata:user1` Kyverno will store the value `user1` in the variable `serviceAccountName`.
+
+- `serviceAccountNamespace` : the `namespace` portion of the serviceAccount. For example, when processing a request from `system:serviceaccount:nirmata:user1` Kyverno will store `nirmata` in the variable `serviceAccountNamespace`.
+
+## Examples
+
+1. Reference a resource name (type string)
+
+`{{request.object.metadata.name}}`
+
+2. Build name from multiple variables (type string)
+
+`"ns-owner-{{request.object.metadata.namespace}}-{{request.userInfo.username}}-binding"`
+
+3. Reference the metadata (type object)
+
+`{{request.object.metadata}}`
+
+*Read Next >> [Preconditions](/documentation/writing-policies-preconditions.md)*
diff --git a/documentation/writing-policies.md b/documentation/writing-policies.md
index b81a0de32e..b0890dedf4 100644
--- a/documentation/writing-policies.md
+++ b/documentation/writing-policies.md
@@ -6,11 +6,17 @@ The following picture shows the structure of a Kyverno Policy:

-Each Kyverno policy contains one or more rules. Each rule has a match clause, an optional excludes clause, and a mutate, validate, or generate clause.
+Each Kyverno policy contains one or more rules. Each rule has a `match` clause, an optional `exclude` clause, and one of a `mutate`, `validate`, or `generate` clause.
+
+The match / exclude clauses have the same structure, and can contain the following elements:
+* resources: select resources by name, namespaces, kinds, and label selectors.
+* subjects: select users, user groups, and service accounts
+* roles: select namespaced roles
+* clusterroles: select cluster wide roles
When Kyverno receives an admission controller request, i.e. a validation or mutation webhook, it first checks to see if the resource and user information matches or should be excluded from processing. If both checks pass, then the rule logic to mutate, validate, or generate resources is applied.
-The following YAML provides an example for the match and validate clauses.
+The following YAML provides an example for a match clause.
````yaml
apiVersion : kyverno.io/v1
@@ -31,11 +37,11 @@ spec :
kinds: # Required, list of kinds
- Deployment
- StatefulSet
- name: "mongo*" # Optional, a resource name is optional. Name supports wildcards * and ?
- namespaces: # Optional, list of namespaces. Supports wilcards * and ?
+ name: "mongo*" # Optional, a resource name is optional. Name supports wildcards (* and ?)
+ namespaces: # Optional, list of namespaces. Supports wildcards (* and ?)
- "dev*"
- test
- selector: # Optional, a resource selector is optional. Selector values support wildcards * and ?
+ selector: # Optional, a resource selector is optional. Values support wildcards (* and ?)
matchLabels:
app: mongodb
matchExpressions:
@@ -47,100 +53,14 @@ spec :
# Optional, roles to be matched
roles:
# Optional, clusterroles to be matched
- clusterroles:
- # Resources that need to be excluded
- exclude: # Optional, resources to be excluded from evaulation
- resources:
- kinds:
- - Daemonsets
- name: "*"
- namespaces:
- - prod
- - "kube*"
- selector:
- matchLabels:
- app: mongodb
- matchExpressions:
- - {key: tier, operator: In, values: [database]}
- # Optional, subjects to be excluded
- subjects:
- # Optional, roles to be excluded
- roles:
- # Optional, clusterroles to be excluded
- clusterroles:
- - cluster-admin
- - admin
- # rule is evaluated if the preconditions are satisfied
- # all preconditions are AND/&& operation
- preconditions:
- - key: name # compares (key operator value)
- operator: Equal
- value: name # constant "name" == "name"
- - key: "{{serviceAccountName}}" # refer to a pre-defined variable serviceAccountName
- operator: NotEqual
- value: "user1" # if service
- # Each rule can contain a single validate, mutate, or generate directive
- ...
+ clusterroles: cluster-admin
+
+ ...
+
````
Each rule can validate, mutate, or generate configurations of matching resources. A rule definition can contain only a single **mutate**, **validate**, or **generate** child node. These actions are applied to the resource in described order: mutation, validation and then generation.
-# Variables:
-Variables can be used to reference attributes that are loaded in the context using a [JMESPATH](http://jmespath.org/) search path.
-Format: `{{}}`
-Resources available in context:
-- Resource: `{{request.object}}`
-- UserInfo: `{{request.userInfo}}`
-
-## Pre-defined Variables
-- `serviceAccountName` : the variable removes the suffix system:serviceaccount:: and stores the userName.
-Example userName=`system:serviceaccount:nirmata:user1` will store variable value as `user1`.
-- `serviceAccountNamespace` : extracts the `namespace` of the serviceAccount.
-Example userName=`system:serviceaccount:nirmata:user1` will store variable value as `nirmata`.
-
-Examples:
-
-1. Refer to resource name(type string)
-
-`{{request.object.metadata.name}}`
-
-2. Build name from multiple variables(type string)
-
-`"ns-owner-{{request.object.metadata.namespace}}-{{request.userInfo.username}}-binding"`
-
-3. Refer to metadata struct/object(type object)
-
-`{{request.object.metadata}}`
-
-# PreConditions:
-Apart from using `match` & `exclude` conditions on resource to filter which resources to apply the rule on, `preconditions` can be used to define custom filters.
-```yaml
- - name: generate-owner-role
- match:
- resources:
- kinds:
- - Namespace
- preconditions:
- - key: "{{request.userInfo.username}}"
- operator: NotEqual
- value: ""
-```
-In the above example, if the variable `{{request.userInfo.username}}` is blank then we dont apply the rule on resource.
-
-Operators supported:
-- Equal
-- NotEqual
-
-# Auto generating rules for pod controllers
-Writing policies on pods helps address all pod creation flows, but results in errors not being reported when a pod controller object is created. Kyverno solves this issue, by automatically generating rules for pod controllers from a rule written for a pod.
-
-This behavior is controlled by the pod-policies.kyverno.io/autogen-controllers annotation. By default, Kyverno inserts an annotation `pod-policies.kyverno.io/autogen-controllers=all`, to generate an additional rule that is applied to pod controllers: DaemonSet, Deployment, Job, StatefulSet.
-
-Change the annotation `pod-policies.kyverno.io/autogen-controllers` to customize the applicable pod controllers of the auto-gen rule. For example, Kyverno generates the rule for `Deployment` if the annotation of policy is defined as `pod-policies.kyverno.io/autogen-controllers=Deployment`. If `name` or `labelSelector` is specified in the match / exclude block, Kyverno skips generating pod controllers rule as these filters may not be applicable to pod controllers.
-
-To disable auto-generating rules for pod controllers, set `pod-policies.kyverno.io/autogen-controllers=none`.
-
-
---
-*Read Next >> [Validate](/documentation/writing-policies-validate.md)*
\ No newline at end of file
+*Read Next >> [Validate Resources](/documentation/writing-policies-validate.md)*
diff --git a/pkg/config/config.go b/pkg/config/config.go
index e0e3459213..3ef21fbfee 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -23,9 +23,9 @@ const (
//MutatingWebhookName default resource mutating webhook name
MutatingWebhookName = "nirmata.kyverno.resource.mutating-webhook"
- // ValidatingWebhookConfigurationName = "kyverno-validating-webhook-cfg"
- // ValidatingWebhookConfigurationDebug = "kyverno-validating-webhook-cfg-debug"
- // ValidatingWebhookName = "nirmata.kyverno.policy-validating-webhook"
+ ValidatingWebhookConfigurationName = "kyverno-resource-validating-webhook-cfg"
+ ValidatingWebhookConfigurationDebugName = "kyverno-resource-validating-webhook-cfg-debug"
+ ValidatingWebhookName = "nirmata.kyverno.resource.validating-webhook"
//VerifyMutatingWebhookConfigurationName default verify mutating webhook configuration name
VerifyMutatingWebhookConfigurationName = "kyverno-verify-mutating-webhook-cfg"
diff --git a/pkg/engine/policy/validate.go b/pkg/engine/policy/validate.go
index ee151b8120..24eb4e17b2 100644
--- a/pkg/engine/policy/validate.go
+++ b/pkg/engine/policy/validate.go
@@ -135,10 +135,6 @@ func validateMatchedResourceDescription(rd kyverno.ResourceDescription) (string,
return "", fmt.Errorf("match resources not specified")
}
- if len(rd.Kinds) == 0 {
- return "match", fmt.Errorf("kind is mandatory")
- }
-
if err := validateResourceDescription(rd); err != nil {
return "match", err
}
diff --git a/pkg/engine/policy/validate_test.go b/pkg/engine/policy/validate_test.go
index 9c3a827ffa..e3bbbd718e 100644
--- a/pkg/engine/policy/validate_test.go
+++ b/pkg/engine/policy/validate_test.go
@@ -248,24 +248,6 @@ func Test_Validate_ResourceDescription_MatchedValid(t *testing.T) {
_, err = validateMatchedResourceDescription(rd)
assert.NilError(t, err)
}
-func Test_Validate_ResourceDescription_MissingKindsOnMatched(t *testing.T) {
- var err error
- matchedResourcedescirption := []byte(`
- {
- "selector": {
- "matchLabels": {
- "app.type": "prod"
- }
- }
- }`)
-
- var rd kyverno.ResourceDescription
- err = json.Unmarshal(matchedResourcedescirption, &rd)
- assert.NilError(t, err)
-
- _, err = validateMatchedResourceDescription(rd)
- assert.Assert(t, err != nil)
-}
func Test_Validate_ResourceDescription_MissingKindsOnExclude(t *testing.T) {
var err error
diff --git a/pkg/engine/utils.go b/pkg/engine/utils.go
index a0680cdd97..6a6a162af8 100644
--- a/pkg/engine/utils.go
+++ b/pkg/engine/utils.go
@@ -31,8 +31,10 @@ func MatchesResourceDescription(resource unstructured.Unstructured, rule kyverno
matches := rule.MatchResources.ResourceDescription
exclude := rule.ExcludeResources.ResourceDescription
- if !findKind(matches.Kinds, resource.GetKind()) {
- return false
+ if len(matches.Kinds) > 0 {
+ if !findKind(matches.Kinds, resource.GetKind()) {
+ return false
+ }
}
name := resource.GetName()
diff --git a/pkg/engine/validation_test.go b/pkg/engine/validation_test.go
index a233826446..8a7ac06e23 100644
--- a/pkg/engine/validation_test.go
+++ b/pkg/engine/validation_test.go
@@ -31,196 +31,6 @@ func TestGetAnchorsFromMap_ThereAreAnchors(t *testing.T) {
assert.Equal(t, actualMap["(namespace)"].(string), "kube-?olicy")
}
-func TestValidate_ServiceTest(t *testing.T) {
- rawPolicy := []byte(`{
- "apiVersion":"kyverno.nirmata.io/v1",
- "kind":"ClusterPolicy",
- "metadata":{
- "name":"policy-service"
- },
- "spec":{
- "rules":[
- {
- "name":"ps1",
- "resource":{
- "kinds":[
- "Service"
- ],
- "name":"game-service*"
- },
- "mutate":{
- "patches":[
- {
- "path":"/metadata/labels/isMutated",
- "op":"add",
- "value":"true"
- },
- {
- "path":"/metadata/labels/secretLabel",
- "op":"replace",
- "value":"weKnow"
- },
- {
- "path":"/metadata/labels/originalLabel",
- "op":"remove"
- },
- {
- "path":"/spec/selector/app",
- "op":"replace",
- "value":"mutedApp"
- }
- ]
- },
- "validate":{
- "message":"This resource is broken",
- "pattern":{
- "spec":{
- "ports":[
- {
- "name":"hs",
- "protocol":32
- }
- ]
- }
- }
- }
- }
- ]
- }
- }`)
- rawResource := []byte(`{
- "kind":"Service",
- "apiVersion":"v1",
- "metadata":{
- "name":"game-service",
- "labels":{
- "originalLabel":"isHere",
- "secretLabel":"thisIsMySecret"
- }
- },
- "spec":{
- "selector":{
- "app":"MyApp"
- },
- "ports":[
- {
- "name":"http",
- "protocol":"TCP",
- "port":80,
- "targetPort":9376
- }
- ]
- }
- }
- `)
-
- var policy kyverno.ClusterPolicy
- json.Unmarshal(rawPolicy, &policy)
-
- resourceUnstructured, err := utils.ConvertToUnstructured(rawResource)
- assert.NilError(t, err)
-
- er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
- assert.Assert(t, len(er.PolicyResponse.Rules) == 0)
-}
-
-func TestValidate_MapHasFloats(t *testing.T) {
- rawPolicy := []byte(`{
- "apiVersion":"kyverno.nirmata.io/v1",
- "kind":"ClusterPolicy",
- "metadata":{
- "name":"policy-deployment-changed"
- },
- "spec":{
- "rules":[
- {
- "name":"First policy v2",
- "resource":{
- "kinds":[
- "Deployment"
- ],
- "name":"nginx-*"
- },
- "mutate":{
- "patches":[
- {
- "path":"/metadata/labels/isMutated",
- "op":"add",
- "value":"true"
- },
- {
- "path":"/metadata/labels/app",
- "op":"replace",
- "value":"nginx_is_mutated"
- }
- ]
- },
- "validate":{
- "message":"replicas number is wrong",
- "pattern":{
- "metadata":{
- "labels":{
- "app":"*"
- }
- },
- "spec":{
- "replicas":3
- }
- }
- }
- }
- ]
- }
- }`)
- rawResource := []byte(`{
- "apiVersion":"apps/v1",
- "kind":"Deployment",
- "metadata":{
- "name":"nginx-deployment",
- "labels":{
- "app":"nginx"
- }
- },
- "spec":{
- "replicas":3,
- "selector":{
- "matchLabels":{
- "app":"nginx"
- }
- },
- "template":{
- "metadata":{
- "labels":{
- "app":"nginx"
- }
- },
- "spec":{
- "containers":[
- {
- "name":"nginx",
- "image":"nginx:1.7.9",
- "ports":[
- {
- "containerPort":80
- }
- ]
- }
- ]
- }
- }
- }
- }
- `)
-
- var policy kyverno.ClusterPolicy
- json.Unmarshal(rawPolicy, &policy)
-
- resourceUnstructured, err := utils.ConvertToUnstructured(rawResource)
- assert.NilError(t, err)
- er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
- assert.Assert(t, len(er.PolicyResponse.Rules) == 0)
-}
-
func TestValidate_image_tag_fail(t *testing.T) {
// If image tag is latest then imagepull policy needs to be checked
rawPolicy := []byte(`{
diff --git a/pkg/namespace/generation.go b/pkg/namespace/generation.go
index f5a96fa245..7ce302a046 100644
--- a/pkg/namespace/generation.go
+++ b/pkg/namespace/generation.go
@@ -165,7 +165,7 @@ func (nsc *NamespaceController) processPolicy(policy *kyverno.ClusterPolicy) {
func listpolicies(ns unstructured.Unstructured, pMetaStore policystore.LookupInterface) []kyverno.ClusterPolicy {
var filteredpolicies []kyverno.ClusterPolicy
glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
- policies, err := pMetaStore.LookUp(ns.GetKind(), ns.GetNamespace())
+ policies, err := pMetaStore.ListAll()
if err != nil {
glog.Errorf("failed to get list policies: %v", err)
return nil
diff --git a/pkg/policystore/policystore.go b/pkg/policystore/policystore.go
index a5b2f82cdf..3d23b106e4 100644
--- a/pkg/policystore/policystore.go
+++ b/pkg/policystore/policystore.go
@@ -3,6 +3,8 @@ package policystore
import (
"sync"
+ "k8s.io/apimachinery/pkg/labels"
+
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
@@ -34,8 +36,7 @@ type UpdateInterface interface {
//LookupInterface provides api to lookup policies
type LookupInterface interface {
- // Lookup based on kind and namespaces
- LookUp(kind, namespace string) ([]kyverno.ClusterPolicy, error)
+ ListAll() ([]kyverno.ClusterPolicy, error)
}
// NewPolicyStore returns a new policy store
@@ -81,19 +82,18 @@ func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
}
}
-//LookUp look up the resources
-func (ps *PolicyStore) LookUp(kind, namespace string) ([]kyverno.ClusterPolicy, error) {
- ret := []kyverno.ClusterPolicy{}
- // lookup meta-store
- policyNames := ps.lookUp(kind, namespace)
- for _, policyName := range policyNames {
- policy, err := ps.pLister.Get(policyName)
- if err != nil {
- return nil, err
- }
- ret = append(ret, *policy)
+func (ps *PolicyStore) ListAll() ([]kyverno.ClusterPolicy, error) {
+ policyPointers, err := ps.pLister.List(labels.NewSelector())
+ if err != nil {
+ return nil, err
}
- return ret, nil
+
+ var policies = make([]kyverno.ClusterPolicy, 0, len(policyPointers))
+ for _, policy := range policyPointers {
+ policies = append(policies, *policy)
+ }
+
+ return policies, nil
}
//UnRegister Remove policy information
@@ -125,48 +125,6 @@ func (ps *PolicyStore) UnRegister(policy kyverno.ClusterPolicy) error {
return nil
}
-//LookUp lookups up the policies for kind and namespace
-// returns a list of that statisfy the filters
-func (ps *PolicyStore) lookUp(kind, namespace string) []string {
- ps.mu.RLock()
- defer ps.mu.RUnlock()
- var policyMap policyMap
- var ret []string
- // kind
- kindMap := ps.getKind(kind)
- if kindMap == nil {
- return []string{}
- }
- // get namespace specific policies
- policyMap = kindMap[namespace]
- ret = append(ret, transform(policyMap)...)
- // get policies on all namespaces
- policyMap = kindMap["*"]
- ret = append(ret, transform(policyMap)...)
- return unique(ret)
-}
-
-func unique(intSlice []string) []string {
- keys := make(map[string]bool)
- list := []string{}
- for _, entry := range intSlice {
- if _, value := keys[entry]; !value {
- keys[entry] = true
- list = append(list, entry)
- }
- }
- return list
-}
-
-// generates a copy
-func transform(pmap policyMap) []string {
- ret := []string{}
- for k := range pmap {
- ret = append(ret, k)
- }
- return ret
-}
-
func (ps *PolicyStore) addKind(kind string) namespaceMap {
val, ok := ps.data[kind]
if ok {
diff --git a/pkg/policystore/policystore_test.go b/pkg/policystore/policystore_test.go
deleted file mode 100644
index 0e893f7ca4..0000000000
--- a/pkg/policystore/policystore_test.go
+++ /dev/null
@@ -1,340 +0,0 @@
-package policystore
-
-import (
- "encoding/json"
- "reflect"
- "testing"
- "time"
-
- kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
- "github.com/nirmata/kyverno/pkg/client/clientset/versioned/fake"
- listerv1 "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/kubernetes/scheme"
- cache "k8s.io/client-go/tools/cache"
-)
-
-func Test_Operations(t *testing.T) {
- rawPolicy1 := []byte(`
- {
- "apiVersion": "kyverno.io/v1",
- "kind": "ClusterPolicy",
- "metadata": {
- "name": "test-policy1"
- },
- "spec": {
- "rules": [
- {
- "name": "r1",
- "match": {
- "resources": {
- "kinds": [
- "Pod"
- ]
- }
- },
- "mutate": {
- "overlay": "temp"
- }
- },
- {
- "name": "r2",
- "match": {
- "resources": {
- "kinds": [
- "Pod",
- "Deployment"
- ]
- }
- },
- "mutate": {
- "overlay": "temp"
- }
- },
- {
- "name": "r3",
- "match": {
- "resources": {
- "kinds": [
- "Pod",
- "Deployment"
- ],
- "namespaces": [
- "n1"
- ]
- }
- },
- "mutate": {
- "overlay": "temp"
- }
- },
- {
- "name": "r4",
- "match": {
- "resources": {
- "kinds": [
- "Pod",
- "Deployment"
- ],
- "namespaces": [
- "n1",
- "n2"
- ]
- }
- },
- "validate": {
- "pattern": "temp"
- }
- }
- ]
- }
- }
- `)
-
- rawPolicy2 := []byte(`
- {
- "apiVersion": "kyverno.io/v1",
- "kind": "ClusterPolicy",
- "metadata": {
- "name": "test-policy2"
- },
- "spec": {
- "rules": [
- {
- "name": "r1",
- "match": {
- "resources": {
- "kinds": [
- "Pod"
- ]
- }
- },
- "mutate": {
- "overlay": "temp"
- }
- },
- {
- "name": "r2",
- "match": {
- "resources": {
- "kinds": [
- "Pod"
- ],
- "namespaces": [
- "n4"
- ]
- }
- },
- "mutate": {
- "overlay": "temp"
- }
- },
- {
- "name": "r2",
- "match": {
- "resources": {
- "kinds": [
- "Pod"
- ],
- "namespaces": [
- "n4",
- "n5",
- "n6"
- ]
- }
- },
- "validate": {
- "pattern": "temp"
- }
- }
- ]
- }
- }`)
-
- rawPolicy3 := []byte(`
- {
- "apiVersion": "kyverno.io/v1",
- "kind": "ClusterPolicy",
- "metadata": {
- "name": "test-policy3"
- },
- "spec": {
- "rules": [
- {
- "name": "r4",
- "match": {
- "resources": {
- "kinds": [
- "Service"
- ]
- }
- },
- "mutate": {
- "overlay": "temp"
- }
- }
- ]
- }
- }`)
- var policy1 kyverno.ClusterPolicy
- json.Unmarshal(rawPolicy1, &policy1)
- var policy2 kyverno.ClusterPolicy
- json.Unmarshal(rawPolicy2, &policy2)
- var policy3 kyverno.ClusterPolicy
- json.Unmarshal(rawPolicy3, &policy3)
- scheme.Scheme.AddKnownTypes(kyverno.SchemeGroupVersion,
- &kyverno.ClusterPolicy{},
- )
- var obj runtime.Object
- var err error
- var retPolicies []kyverno.ClusterPolicy
- polices := []runtime.Object{}
- // list of runtime objects
- decode := scheme.Codecs.UniversalDeserializer().Decode
- obj, _, err = decode(rawPolicy1, nil, nil)
- if err != nil {
- t.Error(err)
- }
- polices = append(polices, obj)
- obj, _, err = decode(rawPolicy2, nil, nil)
- if err != nil {
- t.Error(err)
- }
- polices = append(polices, obj)
- obj, _, err = decode(rawPolicy3, nil, nil)
- if err != nil {
- t.Error(err)
- }
- polices = append(polices, obj)
- // Mock Lister
- client := fake.NewSimpleClientset(polices...)
- fakeInformer := &FakeInformer{client: client}
- store := NewPolicyStore(fakeInformer)
- // Test Operations
- // Add
- store.Register(policy1)
- // Add
- store.Register(policy2)
- // Add
- store.Register(policy3)
- // Lookup
- retPolicies, err = store.LookUp("Pod", "")
- if err != nil {
- t.Error(err)
- }
- if len(retPolicies) != len([]kyverno.ClusterPolicy{policy1, policy2}) {
- // checking length as the order of polcies might be different
- t.Error("not matching")
- }
-
- // Remove
- err = store.UnRegister(policy1)
- if err != nil {
- t.Error(err)
- }
- retPolicies, err = store.LookUp("Pod", "")
- if err != nil {
- t.Error(err)
- }
- // Lookup
- if !reflect.DeepEqual(retPolicies, []kyverno.ClusterPolicy{policy2}) {
- t.Error("not matching")
- }
- // Add
- store.Register(policy1)
- retPolicies, err = store.LookUp("Pod", "")
- if err != nil {
- t.Error(err)
- }
-
- if len(retPolicies) != len([]kyverno.ClusterPolicy{policy1, policy2}) {
- // checking length as the order of polcies might be different
- t.Error("not matching")
- }
-
- retPolicies, err = store.LookUp("Service", "")
- if err != nil {
- t.Error(err)
- }
- if !reflect.DeepEqual(retPolicies, []kyverno.ClusterPolicy{policy3}) {
- t.Error("not matching")
- }
-
-}
-
-type FakeInformer struct {
- client *fake.Clientset
-}
-
-func (fi *FakeInformer) Informer() cache.SharedIndexInformer {
- fsi := &FakeSharedInformer{}
- return fsi
-}
-
-func (fi *FakeInformer) Lister() listerv1.ClusterPolicyLister {
- fl := &FakeLister{client: fi.client}
- return fl
-}
-
-type FakeLister struct {
- client *fake.Clientset
-}
-
-func (fl *FakeLister) List(selector labels.Selector) (ret []*kyverno.ClusterPolicy, err error) {
- return nil, nil
-}
-
-func (fl *FakeLister) Get(name string) (*kyverno.ClusterPolicy, error) {
- return fl.client.KyvernoV1().ClusterPolicies().Get(name, v1.GetOptions{})
-}
-
-func (fl *FakeLister) GetPolicyForPolicyViolation(pv *kyverno.ClusterPolicyViolation) ([]*kyverno.ClusterPolicy, error) {
- return nil, nil
-}
-func (fl *FakeLister) ListResources(selector labels.Selector) (ret []*kyverno.ClusterPolicy, err error) {
- return nil, nil
-}
-
-func (fl *FakeLister) GetPolicyForNamespacedPolicyViolation(pv *kyverno.PolicyViolation) ([]*kyverno.ClusterPolicy, error) {
- return nil, nil
-}
-
-type FakeSharedInformer struct {
-}
-
-func (fsi *FakeSharedInformer) HasSynced() bool {
- return true
-}
-
-func (fsi *FakeSharedInformer) AddEventHandler(handler cache.ResourceEventHandler) {
-}
-
-func (fsi *FakeSharedInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) {
-
-}
-
-func (fsi *FakeSharedInformer) AddIndexers(indexers cache.Indexers) error {
- return nil
-}
-
-func (fsi *FakeSharedInformer) GetIndexer() cache.Indexer {
- return nil
-}
-
-func (fsi *FakeSharedInformer) GetStore() cache.Store {
- return nil
-}
-
-func (fsi *FakeSharedInformer) GetController() cache.Controller {
- return nil
-}
-
-func (fsi *FakeSharedInformer) Run(stopCh <-chan struct{}) {
-
-}
-
-func (fsi *FakeSharedInformer) LastSyncResourceVersion() string {
- return ""
-}
diff --git a/pkg/policyviolation/namespacedpv.go b/pkg/policyviolation/namespacedpv.go
index ad927b3ea4..1967a0ba3f 100644
--- a/pkg/policyviolation/namespacedpv.go
+++ b/pkg/policyviolation/namespacedpv.go
@@ -109,8 +109,9 @@ func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error
// update resource
_, err = nspv.kyvernoInterface.PolicyViolations(newPv.GetNamespace()).Update(newPv)
if err != nil {
- return fmt.Errorf("failed to update namespaced polciy violation: %v", err)
+ return fmt.Errorf("failed to update namespaced policy violation: %v", err)
}
+
glog.Infof("namespaced policy violation updated for resource %v", newPv.Spec.ResourceSpec)
return nil
}
diff --git a/pkg/webhookconfig/registration.go b/pkg/webhookconfig/registration.go
index e16ba2ecc9..8db0cba73c 100644
--- a/pkg/webhookconfig/registration.go
+++ b/pkg/webhookconfig/registration.go
@@ -86,7 +86,7 @@ func (wrc *WebhookRegistrationClient) RemoveWebhookConfigurations(cleanUp chan<-
//CreateResourceMutatingWebhookConfiguration create a Mutatingwebhookconfiguration resource for all resource type
// used to forward request to kyverno webhooks to apply policeis
-// Mutationg webhook is be used for Mutating & Validating purpose
+// Mutationg webhook is be used for Mutating purpose
func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration() error {
var caData []byte
var config *admregapi.MutatingWebhookConfiguration
@@ -101,7 +101,7 @@ func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration
if wrc.serverIP != "" {
// debug mode
// clientConfig - URL
- config = wrc.contructDebugMutatingWebhookConfig(caData)
+ config = wrc.constructDebugMutatingWebhookConfig(caData)
} else {
// clientConfig - service
config = wrc.constructMutatingWebhookConfig(caData)
@@ -118,6 +118,35 @@ func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration
return nil
}
+func (wrc *WebhookRegistrationClient) CreateResourceValidatingWebhookConfiguration() error {
+ var caData []byte
+ var config *admregapi.ValidatingWebhookConfiguration
+
+ if caData = wrc.readCaData(); caData == nil {
+ return errors.New("Unable to extract CA data from configuration")
+ }
+ // if serverIP is specified we assume its debug mode
+ if wrc.serverIP != "" {
+ // debug mode
+ // clientConfig - URL
+ config = wrc.constructDebugValidatingWebhookConfig(caData)
+ } else {
+ // clientConfig - service
+ config = wrc.constructValidatingWebhookConfig(caData)
+ }
+
+ _, err := wrc.client.CreateResource(ValidatingWebhookConfigurationKind, "", *config, false)
+ if errorsapi.IsAlreadyExists(err) {
+ glog.V(4).Infof("resource validating webhook configuration %s, already exists. not creating one", config.Name)
+ return nil
+ }
+ if err != nil {
+ glog.V(4).Infof("failed to create resource validating webhook configuration %s: %v", config.Name, err)
+ return err
+ }
+ return nil
+}
+
//registerPolicyValidatingWebhookConfiguration create a Validating webhook configuration for Policy CRD
func (wrc *WebhookRegistrationClient) createPolicyValidatingWebhookConfiguration() error {
var caData []byte
@@ -221,9 +250,10 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
var wg sync.WaitGroup
- wg.Add(4)
+ wg.Add(5)
// mutating and validating webhook configuration for Kubernetes resources
go wrc.removeResourceMutatingWebhookConfiguration(&wg)
+ go wrc.removeResourceValidatingWebhookConfiguration(&wg)
// mutating and validating webhook configurtion for Policy CRD resource
go wrc.removePolicyMutatingWebhookConfiguration(&wg)
go wrc.removePolicyValidatingWebhookConfiguration(&wg)
@@ -242,6 +272,12 @@ func (wrc *WebhookRegistrationClient) removeResourceMutatingWebhookConfiguration
glog.Error(err)
}
}
+func (wrc *WebhookRegistrationClient) removeResourceValidatingWebhookConfiguration(wg *sync.WaitGroup) {
+ defer wg.Done()
+ if err := wrc.RemoveResourceValidatingWebhookConfiguration(); err != nil {
+ glog.Error(err)
+ }
+}
// delete policy mutating webhookconfigurations
// handle wait group
diff --git a/pkg/webhookconfig/resource.go b/pkg/webhookconfig/resource.go
index 8a32a8863e..090c33636a 100644
--- a/pkg/webhookconfig/resource.go
+++ b/pkg/webhookconfig/resource.go
@@ -10,7 +10,7 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-func (wrc *WebhookRegistrationClient) contructDebugMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
+func (wrc *WebhookRegistrationClient) constructDebugMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.MutatingWebhookServicePath)
glog.V(4).Infof("Debug MutatingWebhookConfig is registered with url %s\n", url)
@@ -83,3 +83,73 @@ func (wrc *WebhookRegistrationClient) RemoveResourceMutatingWebhookConfiguration
glog.V(4).Infof("deleted resource webhook configuration %s", configName)
return nil
}
+
+func (wrc *WebhookRegistrationClient) constructDebugValidatingWebhookConfig(caData []byte) *admregapi.ValidatingWebhookConfiguration {
+ url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.ValidatingWebhookServicePath)
+ glog.V(4).Infof("Debug ValidatingWebhookConfig is registered with url %s\n", url)
+
+ return &admregapi.ValidatingWebhookConfiguration{
+ ObjectMeta: v1.ObjectMeta{
+ Name: config.ValidatingWebhookConfigurationDebugName,
+ },
+ Webhooks: []admregapi.Webhook{
+ generateDebugWebhook(
+ config.ValidatingWebhookName,
+ url,
+ caData,
+ true,
+ wrc.timeoutSeconds,
+ "*/*",
+ "*",
+ "*",
+ []admregapi.OperationType{admregapi.Create, admregapi.Update},
+ ),
+ },
+ }
+}
+
+func (wrc *WebhookRegistrationClient) constructValidatingWebhookConfig(caData []byte) *admregapi.ValidatingWebhookConfiguration {
+ return &admregapi.ValidatingWebhookConfiguration{
+ ObjectMeta: v1.ObjectMeta{
+ Name: config.ValidatingWebhookConfigurationName,
+ OwnerReferences: []v1.OwnerReference{
+ wrc.constructOwner(),
+ },
+ },
+ Webhooks: []admregapi.Webhook{
+ generateWebhook(
+ config.ValidatingWebhookName,
+ config.ValidatingWebhookServicePath,
+ caData,
+ false,
+ wrc.timeoutSeconds,
+ "*/*",
+ "*",
+ "*",
+ []admregapi.OperationType{admregapi.Create, admregapi.Update},
+ ),
+ },
+ }
+}
+
+func (wrc *WebhookRegistrationClient) GetResourceValidatingWebhookConfigName() string {
+ if wrc.serverIP != "" {
+ return config.ValidatingWebhookConfigurationDebugName
+ }
+ return config.ValidatingWebhookConfigurationName
+}
+
+func (wrc *WebhookRegistrationClient) RemoveResourceValidatingWebhookConfiguration() error {
+ configName := wrc.GetResourceValidatingWebhookConfigName()
+ err := wrc.client.DeleteResource(ValidatingWebhookConfigurationKind, "", configName, false)
+ if errors.IsNotFound(err) {
+ glog.V(4).Infof("resource webhook configuration %s does not exits, so not deleting", configName)
+ return nil
+ }
+ if err != nil {
+ glog.V(4).Infof("failed to delete resource webhook configuration %s: %v", configName, err)
+ return err
+ }
+ glog.V(4).Infof("deleted resource webhook configuration %s", configName)
+ return nil
+}
diff --git a/pkg/webhookconfig/rwebhookregister.go b/pkg/webhookconfig/rwebhookregister.go
index 044db31241..aacdc72b9f 100644
--- a/pkg/webhookconfig/rwebhookregister.go
+++ b/pkg/webhookconfig/rwebhookregister.go
@@ -17,23 +17,31 @@ type ResourceWebhookRegister struct {
pendingCreation *abool.AtomicBool
LastReqTime *checker.LastReqTime
mwebhookconfigSynced cache.InformerSynced
+ vwebhookconfigSynced cache.InformerSynced
// list/get mutatingwebhookconfigurations
- mWebhookConfigLister mconfiglister.MutatingWebhookConfigurationLister
- webhookRegistrationClient *WebhookRegistrationClient
+ mWebhookConfigLister mconfiglister.MutatingWebhookConfigurationLister
+ vWebhookConfigLister mconfiglister.ValidatingWebhookConfigurationLister
+ webhookRegistrationClient *WebhookRegistrationClient
+ RunValidationInMutatingWebhook string
}
// NewResourceWebhookRegister returns a new instance of ResourceWebhookRegister manager
func NewResourceWebhookRegister(
lastReqTime *checker.LastReqTime,
mconfigwebhookinformer mconfiginformer.MutatingWebhookConfigurationInformer,
+ vconfigwebhookinformer mconfiginformer.ValidatingWebhookConfigurationInformer,
webhookRegistrationClient *WebhookRegistrationClient,
+ runValidationInMutatingWebhook string,
) *ResourceWebhookRegister {
return &ResourceWebhookRegister{
- pendingCreation: abool.New(),
- LastReqTime: lastReqTime,
- mwebhookconfigSynced: mconfigwebhookinformer.Informer().HasSynced,
- mWebhookConfigLister: mconfigwebhookinformer.Lister(),
- webhookRegistrationClient: webhookRegistrationClient,
+ pendingCreation: abool.New(),
+ LastReqTime: lastReqTime,
+ mwebhookconfigSynced: mconfigwebhookinformer.Informer().HasSynced,
+ mWebhookConfigLister: mconfigwebhookinformer.Lister(),
+ vwebhookconfigSynced: vconfigwebhookinformer.Informer().HasSynced,
+ vWebhookConfigLister: vconfigwebhookinformer.Lister(),
+ webhookRegistrationClient: webhookRegistrationClient,
+ RunValidationInMutatingWebhook: runValidationInMutatingWebhook,
}
}
@@ -45,62 +53,86 @@ func (rww *ResourceWebhookRegister) RegisterResourceWebhook() {
return
}
- // check cache
- configName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
- // exsitence of config is all that matters; if error occurs, creates webhook anyway
- // errors of webhook creation are handled separately
- config, _ := rww.mWebhookConfigLister.Get(configName)
- if config != nil {
- glog.V(4).Info("mutating webhoook configuration already exists, skip the request")
- return
- }
-
- createWebhook := func() {
- rww.pendingCreation.Set()
- err := rww.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration()
- rww.pendingCreation.UnSet()
-
- if err != nil {
- glog.Errorf("failed to create resource mutating webhook configuration: %v, re-queue creation request", err)
- rww.RegisterResourceWebhook()
- return
- }
- glog.V(3).Info("Successfully created mutating webhook configuration for resources")
- }
-
timeDiff := time.Since(rww.LastReqTime.Time())
if timeDiff < checker.DefaultDeadline {
glog.V(3).Info("Verified webhook status, creating webhook configuration")
- go createWebhook()
+ go func() {
+ mutatingConfigName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
+ mutatingConfig, _ := rww.mWebhookConfigLister.Get(mutatingConfigName)
+ if mutatingConfig != nil {
+ glog.V(4).Info("mutating webhoook configuration already exists")
+ } else {
+ rww.pendingCreation.Set()
+ err1 := rww.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration()
+ rww.pendingCreation.UnSet()
+ if err1 != nil {
+ glog.Errorf("failed to create resource mutating webhook configuration: %v, re-queue creation request", err1)
+ rww.RegisterResourceWebhook()
+ return
+ }
+ glog.V(3).Info("Successfully created mutating webhook configuration for resources")
+ }
+
+ if rww.RunValidationInMutatingWebhook != "true" {
+ validatingConfigName := rww.webhookRegistrationClient.GetResourceValidatingWebhookConfigName()
+ validatingConfig, _ := rww.vWebhookConfigLister.Get(validatingConfigName)
+ if validatingConfig != nil {
+ glog.V(4).Info("validating webhoook configuration already exists")
+ } else {
+ rww.pendingCreation.Set()
+ err2 := rww.webhookRegistrationClient.CreateResourceValidatingWebhookConfiguration()
+ rww.pendingCreation.UnSet()
+ if err2 != nil {
+ glog.Errorf("failed to create resource validating webhook configuration: %v, re-queue creation request", err2)
+ rww.RegisterResourceWebhook()
+ return
+ }
+ glog.V(3).Info("Successfully created validating webhook configuration for resources")
+ }
+ }
+ }()
}
}
//Run starts the ResourceWebhookRegister manager
func (rww *ResourceWebhookRegister) Run(stopCh <-chan struct{}) {
// wait for cache to populate first time
- if !cache.WaitForCacheSync(stopCh, rww.mwebhookconfigSynced) {
+ if !cache.WaitForCacheSync(stopCh, rww.mwebhookconfigSynced, rww.vwebhookconfigSynced) {
glog.Error("configuration: failed to sync webhook informer cache")
}
+
}
// RemoveResourceWebhookConfiguration removes the resource webhook configurations
func (rww *ResourceWebhookRegister) RemoveResourceWebhookConfiguration() error {
- var err error
- // check informer cache
- configName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
- config, err := rww.mWebhookConfigLister.Get(configName)
+ mutatingConfigName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
+ mutatingConfig, err := rww.mWebhookConfigLister.Get(mutatingConfigName)
if err != nil {
glog.V(4).Infof("failed to list mutating webhook config: %v", err)
return err
}
- if config == nil {
- // as no resource is found
- return nil
+ if mutatingConfig != nil {
+ err = rww.webhookRegistrationClient.RemoveResourceMutatingWebhookConfiguration()
+ if err != nil {
+ return err
+ }
+ glog.V(3).Info("removed mutating resource webhook configuration")
}
- err = rww.webhookRegistrationClient.RemoveResourceMutatingWebhookConfiguration()
- if err != nil {
- return err
+
+ if rww.RunValidationInMutatingWebhook != "true" {
+ validatingConfigName := rww.webhookRegistrationClient.GetResourceValidatingWebhookConfigName()
+ validatingConfig, err := rww.vWebhookConfigLister.Get(validatingConfigName)
+ if err != nil {
+ glog.V(4).Infof("failed to list validating webhook config: %v", err)
+ return err
+ }
+ if validatingConfig != nil {
+ err = rww.webhookRegistrationClient.RemoveResourceValidatingWebhookConfiguration()
+ if err != nil {
+ return err
+ }
+ glog.V(3).Info("removed validating resource webhook configuration")
+ }
}
- glog.V(3).Info("removed resource webhook configuration")
return nil
}
diff --git a/pkg/webhooks/annotations.go b/pkg/webhooks/annotations.go
index 21ed21414d..34d3e10468 100644
--- a/pkg/webhooks/annotations.go
+++ b/pkg/webhooks/annotations.go
@@ -2,6 +2,9 @@ package webhooks
import (
"encoding/json"
+ "strings"
+
+ yamlv2 "gopkg.in/yaml.v2"
jsonpatch "github.com/evanphx/json-patch"
"github.com/golang/glog"
@@ -11,14 +14,9 @@ import (
)
const (
- policyAnnotation = "policies.kyverno.patches"
+ policyAnnotation = "policies.kyverno.io~1patches"
)
-type policyPatch struct {
- PolicyName string `json:"policyname"`
- RulePatches interface{} `json:"patches"`
-}
-
type rulePatch struct {
RuleName string `json:"rulename"`
Op string `json:"op"`
@@ -31,6 +29,15 @@ type annresponse struct {
Value interface{} `json:"value"`
}
+var operationToPastTense = map[string]string{
+ "add": "added",
+ "remove": "removed",
+ "replace": "replaced",
+ "move": "moved",
+ "copy": "copied",
+ "test": "tested",
+}
+
func generateAnnotationPatches(engineResponses []response.EngineResponse) []byte {
var annotations map[string]string
@@ -52,7 +59,7 @@ func generateAnnotationPatches(engineResponses []response.EngineResponse) []byte
return nil
}
- if _, ok := annotations[policyAnnotation]; ok {
+ if _, ok := annotations[strings.ReplaceAll(policyAnnotation, "~1", "/")]; ok {
// create update patch string
patchResponse = annresponse{
Op: "replace",
@@ -69,7 +76,7 @@ func generateAnnotationPatches(engineResponses []response.EngineResponse) []byte
}
} else {
// insert 'policies.kyverno.patches' entry in annotation map
- annotations[policyAnnotation] = string(value)
+ annotations[strings.ReplaceAll(policyAnnotation, "~1", "/")] = string(value)
patchResponse = annresponse{
Op: "add",
Path: "/metadata/annotations",
@@ -90,31 +97,31 @@ func generateAnnotationPatches(engineResponses []response.EngineResponse) []byte
}
func annotationFromEngineResponses(engineResponses []response.EngineResponse) []byte {
- var policyPatches []policyPatch
+ var annotationContent = make(map[string]string)
for _, engineResponse := range engineResponses {
if !engineResponse.IsSuccesful() {
glog.V(3).Infof("Policy %s failed, skip preparing annotation\n", engineResponse.PolicyResponse.Policy)
continue
}
- var pp policyPatch
rulePatches := annotationFromPolicyResponse(engineResponse.PolicyResponse)
if rulePatches == nil {
continue
}
- pp.RulePatches = rulePatches
- pp.PolicyName = engineResponse.PolicyResponse.Policy
- policyPatches = append(policyPatches, pp)
+ policyName := engineResponse.PolicyResponse.Policy
+ for _, rulePatch := range rulePatches {
+ annotationContent[rulePatch.RuleName+"."+policyName+".kyverno.io"] = operationToPastTense[rulePatch.Op] + " " + rulePatch.Path
+ }
}
// return nil if there's no patches
// otherwise result = null, len(result) = 4
- if len(policyPatches) == 0 {
+ if len(annotationContent) == 0 {
return nil
}
- result, _ := json.Marshal(policyPatches)
+ result, _ := yamlv2.Marshal(annotationContent)
return result
}
diff --git a/pkg/webhooks/annotations_test.go b/pkg/webhooks/annotations_test.go
index 4fc0fc07dd..320e347ea3 100644
--- a/pkg/webhooks/annotations_test.go
+++ b/pkg/webhooks/annotations_test.go
@@ -43,7 +43,7 @@ func Test_empty_annotation(t *testing.T) {
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true, nil)
annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse})
- expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.patches":"[{\"policyname\":\"mutate-container\",\"patches\":[{\"rulename\":\"default-imagepullpolicy\",\"op\":\"replace\",\"path\":\"/spec/containers/0/imagePullPolicy\"}]}]"}}`
+ expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"default-imagepullpolicy.mutate-container.kyverno.io: replaced /spec/containers/0/imagePullPolicy\n"}}`
assert.Assert(t, string(annPatches) == expectedPatches)
}
@@ -56,7 +56,7 @@ func Test_exist_annotation(t *testing.T) {
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true, annotation)
annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse})
- expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.patches":"[{\"policyname\":\"mutate-container\",\"patches\":[{\"rulename\":\"default-imagepullpolicy\",\"op\":\"replace\",\"path\":\"/spec/containers/0/imagePullPolicy\"}]}]"}}`
+ expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"default-imagepullpolicy.mutate-container.kyverno.io: replaced /spec/containers/0/imagePullPolicy\n"}}`
assert.Assert(t, string(annPatches) == expectedPatches)
}
@@ -69,7 +69,7 @@ func Test_exist_kyverno_annotation(t *testing.T) {
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true, annotation)
annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse})
- expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.patches":"[{\"policyname\":\"mutate-container\",\"patches\":[{\"rulename\":\"default-imagepullpolicy\",\"op\":\"replace\",\"path\":\"/spec/containers/0/imagePullPolicy\"}]}]"}}`
+ expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"default-imagepullpolicy.mutate-container.kyverno.io: replaced /spec/containers/0/imagePullPolicy\n"}}`
assert.Assert(t, string(annPatches) == expectedPatches)
}
diff --git a/pkg/webhooks/common.go b/pkg/webhooks/common.go
index 1313f2bcdf..f19cfd699e 100644
--- a/pkg/webhooks/common.go
+++ b/pkg/webhooks/common.go
@@ -127,6 +127,9 @@ func extractResources(newRaw []byte, request *v1beta1.AdmissionRequest) (unstruc
var emptyResource unstructured.Unstructured
// New Resource
+ if newRaw == nil {
+ newRaw = request.Object.Raw
+ }
if newRaw == nil {
return emptyResource, emptyResource, fmt.Errorf("new resource is not defined")
}
diff --git a/pkg/webhooks/server.go b/pkg/webhooks/server.go
index d6dd330e99..d401cdc449 100644
--- a/pkg/webhooks/server.go
+++ b/pkg/webhooks/server.go
@@ -123,6 +123,7 @@ func NewWebhookServer(
}
mux := http.NewServeMux()
mux.HandleFunc(config.MutatingWebhookServicePath, ws.serve)
+ mux.HandleFunc(config.ValidatingWebhookServicePath, ws.serve)
mux.HandleFunc(config.VerifyMutatingWebhookServicePath, ws.serve)
mux.HandleFunc(config.PolicyValidatingWebhookServicePath, ws.serve)
mux.HandleFunc(config.PolicyMutatingWebhookServicePath, ws.serve)
@@ -164,7 +165,11 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
admissionReview.Response = ws.handleVerifyRequest(request)
case config.MutatingWebhookServicePath:
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
- admissionReview.Response = ws.handleAdmissionRequest(request)
+ admissionReview.Response = ws.handleMutateAdmissionRequest(request)
+ }
+ case config.ValidatingWebhookServicePath:
+ if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
+ admissionReview.Response = ws.handleValidateAdmissionRequest(request)
}
case config.PolicyValidatingWebhookServicePath:
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
@@ -189,8 +194,8 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
}
}
-func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
- policies, err := ws.pMetaStore.LookUp(request.Kind.Kind, request.Namespace)
+func (ws *WebhookServer) handleMutateAdmissionRequest(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
+ policies, err := ws.pMetaStore.ListAll()
if err != nil {
// Unable to connect to policy Lister to access policies
glog.Errorf("Unable to connect to policy controller to access policies. Policies are NOT being applied: %v", err)
@@ -242,16 +247,18 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
// patch the resource with patches before handling validation rules
patchedResource := processResourceWithPatches(patches, request.Object.Raw)
- // VALIDATION
- ok, msg := ws.HandleValidation(request, policies, patchedResource, roles, clusterRoles)
- if !ok {
- glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name)
- return &v1beta1.AdmissionResponse{
- Allowed: false,
- Result: &metav1.Status{
- Status: "Failure",
- Message: msg,
- },
+ if ws.resourceWebhookWatcher != nil && ws.resourceWebhookWatcher.RunValidationInMutatingWebhook == "true" {
+ // VALIDATION
+ ok, msg := ws.HandleValidation(request, policies, patchedResource, roles, clusterRoles)
+ if !ok {
+ glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name)
+ return &v1beta1.AdmissionResponse{
+ Allowed: false,
+ Result: &metav1.Status{
+ Status: "Failure",
+ Message: msg,
+ },
+ }
}
}
@@ -260,7 +267,7 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
// Success -> Generate Request CR created successsfully
// Failed -> Failed to create Generate Request CR
if request.Operation == v1beta1.Create {
- ok, msg = ws.HandleGenerate(request, policies, patchedResource, roles, clusterRoles)
+ ok, msg := ws.HandleGenerate(request, policies, patchedResource, roles, clusterRoles)
if !ok {
glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name)
return &v1beta1.AdmissionResponse{
@@ -284,6 +291,49 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
}
}
+func (ws *WebhookServer) handleValidateAdmissionRequest(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
+ policies, err := ws.pMetaStore.ListAll()
+ if err != nil {
+ // Unable to connect to policy Lister to access policies
+ glog.Errorf("Unable to connect to policy controller to access policies. Policies are NOT being applied: %v", err)
+ return &v1beta1.AdmissionResponse{Allowed: true}
+ }
+
+ var roles, clusterRoles []string
+
+ // getRoleRef only if policy has roles/clusterroles defined
+ startTime := time.Now()
+ if containRBACinfo(policies) {
+ roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request)
+ if err != nil {
+ // TODO(shuting): continue apply policy if error getting roleRef?
+ glog.Errorf("Unable to get rbac information for request Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s: %v",
+ request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, err)
+ }
+ }
+ glog.V(4).Infof("Time: webhook GetRoleRef %v", time.Since(startTime))
+
+ // VALIDATION
+ ok, msg := ws.HandleValidation(request, policies, nil, roles, clusterRoles)
+ if !ok {
+ glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name)
+ return &v1beta1.AdmissionResponse{
+ Allowed: false,
+ Result: &metav1.Status{
+ Status: "Failure",
+ Message: msg,
+ },
+ }
+ }
+
+ return &v1beta1.AdmissionResponse{
+ Allowed: true,
+ Result: &metav1.Status{
+ Status: "Success",
+ },
+ }
+}
+
// RunAsync TLS server in separate thread and returns control immediately
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
if !cache.WaitForCacheSync(stopCh, ws.pSynced, ws.rbSynced, ws.crbSynced) {
diff --git a/samples/best_practices/disallow_default_namespace.yaml b/samples/best_practices/disallow_default_namespace.yaml
index 0f579f4d49..64b1fe8844 100644
--- a/samples/best_practices/disallow_default_namespace.yaml
+++ b/samples/best_practices/disallow_default_namespace.yaml
@@ -3,7 +3,6 @@ kind: ClusterPolicy
metadata:
name: disallow-default-namespace
annotations:
- pod-policies.kyverno.io/autogen-controllers: none
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: Kubernetes namespaces are an optional feature
that provide a way to segment and isolate cluster resources across multiple
diff --git a/test/resources/require_probes.yaml b/test/resources/require_probes.yaml
index 4c967c6a43..a7ad415546 100644
--- a/test/resources/require_probes.yaml
+++ b/test/resources/require_probes.yaml
@@ -4,9 +4,13 @@ metadata:
name: myapp-pod
labels:
app: myapp
-spec:
+spec:
containers:
- - name: nginx
- image: nginx
+ - name: goproxy
+ image: k8s.gcr.io/goproxy:0.1
+ ports:
+ - containerPort: 8080
readinessProbe:
- periodSeconds: 5
\ No newline at end of file
+ tcpSocket:
+ port: 8080
+ periodSeconds: 10
\ No newline at end of file