1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-30 19:35:06 +00:00

feat: add chainsaw tests for validate policies (part 2) (#10545)

Signed-off-by: Mariam Fahmy <mariam.fahmy@nirmata.com>
This commit is contained in:
Mariam Fahmy 2024-06-26 19:37:32 +08:00 committed by GitHub
parent 340009f55f
commit 565f4b5427
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
283 changed files with 6171 additions and 46 deletions

View file

@ -0,0 +1,12 @@
## Description
This test verifies that context variables (APICalls, etc.) are lazily evaluated after pre-conditions are processed.
## Expected Behavior
The Ingress delete should be allowed.
## Reference Issues
https://github.com/kyverno/kyverno/issues/4374

View file

@ -0,0 +1,40 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-unique-host
spec:
failurePolicy: Fail
rules:
- context:
- name: requestIngressClass
variable:
jmesPath: request.object.metadata.annotations."kubernetes.io/ingress.class"
- apiCall:
jmesPath: items[?metadata.name != '{{ request.object.metadata.name }}']
urlPath: /apis/networking.k8s.io/v1/ingresses
name: ingresses
match:
all:
- resources:
kinds:
- Ingress
name: unique-ingress-against-other-ingress-class
preconditions:
all:
- key: '{{ request.operation }}'
operator: AnyIn
value:
- CREATE
- UPDATE
validate:
deny:
conditions:
any:
- key: '{{ request.object.spec.rules[].host }}'
operator: AnyIn
value: '{{ingresses[?metadata.annotations."kubernetes.io/ingress.class"
!= ''{{ request.object.metadata.annotations."kubernetes.io/ingress.class"
}}''].spec.rules[].host }}'
message: |
Ingress must have a unique hostname across different ingress classes
validationFailureAction: Enforce

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: test-ingress

View file

@ -0,0 +1,19 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
name: my-app-ingress
namespace: test-ingress
spec:
rules:
- host: my-app.myorg.io
http:
paths:
- backend:
service:
name: my-app-deployment
port:
number: 80
path: /
pathType: ImplementationSpecific

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-unique-host
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,5 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: my-app-ingress
namespace: test-ingress

View file

@ -0,0 +1,27 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: lazyload
spec:
steps:
- name: step-01
try:
- apply:
file: chainsaw-step-01-apply-1-1.yaml
- apply:
file: chainsaw-step-01-apply-1-2.yaml
- apply:
file: chainsaw-step-01-apply-1-3.yaml
- assert:
file: chainsaw-step-01-assert-1-1.yaml
- assert:
file: chainsaw-step-01-assert-1-2.yaml
- name: step-02
try:
- delete:
ref:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: my-app-ingress
namespace: test-ingress

View file

@ -0,0 +1,13 @@
## Description
This test checks a POST operation to the Kubernetes API server for a SubjectAccessReview. It checks for delete access to the namespace of the request, and allows or denies the request.
## Expected Behavior
The test resource should be allowed to be created in the test namespace but not in the `default` namespace, as Kyverno cannot delete it.
## Reference Issues
https://github.com/kyverno/kyverno/issues/1717
https://github.com/kyverno/kyverno/issues/6857

View file

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-controller
app.kubernetes.io/instance: kyverno
app.kubernetes.io/part-of: kyverno
name: kyverno:subjectaccessreviews
rules:
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- '*'

View file

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-controller
app.kubernetes.io/instance: kyverno
app.kubernetes.io/part-of: kyverno
name: kyverno:namespace-delete
rules:
- apiGroups:
- ""
resourceNames:
- test-sar
resources:
- namespaces
verbs:
- delete

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: test-sar

View file

@ -0,0 +1,40 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: check-subjectaccessreview
spec:
background: false
rules:
- context:
- apiCall:
data:
- key: kind
value: SubjectAccessReview
- key: apiVersion
value: authorization.k8s.io/v1
- key: spec
value:
resourceAttributes:
group: ""
name: '{{ request.namespace }}'
resource: namespaces
verb: delete
user: system:serviceaccount:kyverno:kyverno-admission-controller
method: POST
urlPath: /apis/authorization.k8s.io/v1/subjectaccessreviews
name: subjectaccessreview
match:
any:
- resources:
kinds:
- ConfigMap
name: check-sar
validate:
deny:
conditions:
any:
- key: '{{ subjectaccessreview.status.allowed }}'
operator: NotEquals
value: true
message: User is not authorized.
validationFailureAction: Enforce

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-subjectaccessreview
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,28 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: subjectaccessreview
spec:
steps:
- name: step-01
try:
- apply:
file: chainsaw-step-01-apply-1-1.yaml
- apply:
file: chainsaw-step-01-apply-1-2.yaml
- apply:
file: chainsaw-step-01-apply-1-3.yaml
- apply:
file: chainsaw-step-01-apply-1-4.yaml
- assert:
file: chainsaw-step-01-assert-1-1.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: cm-default-ns.yaml
- apply:
file: cm-test-ns.yaml

View file

@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cm
namespace: default
data: {}

View file

@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cm
namespace: test-sar
data: {}

View file

@ -3,7 +3,6 @@ kind: ClusterPolicy
metadata:
name: ingress-unique-host
spec:
failurePolicy: Fail
rules:
- context:
- name: requestIngressClass
@ -27,6 +26,7 @@ spec:
- CREATE
- UPDATE
validate:
validationFailureAction: Enforce
deny:
conditions:
any:
@ -37,4 +37,5 @@ spec:
}}''].spec.rules[].host }}'
message: |
Ingress must have a unique hostname across different ingress classes
validationFailureAction: Enforce
webhookConfiguration:
failurePolicy: Fail

View file

@ -30,6 +30,7 @@ spec:
- ConfigMap
name: check-sar
validate:
validationFailureAction: Enforce
deny:
conditions:
any:
@ -37,4 +38,3 @@ spec:
operator: NotEquals
value: true
message: User is not authorized.
validationFailureAction: Enforce

View file

@ -0,0 +1,3 @@
# Title
Ensures this policy cannot be created because clusterRoles is not valid in background mode. It checks that the return failure output contains the given string and finally checks that the policy has not been created (in case somehow it returned an error, which passed, but was still created).

View file

@ -0,0 +1,22 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-match-clusterroles
spec:
background: true
rules:
- match:
any:
- clusterRoles:
- foo-admin
resources:
kinds:
- Pod
name: ns-clusterroles
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: ?*
validationFailureAction: Audit

View file

@ -0,0 +1,18 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: background-match-clusterroles
spec:
steps:
- name: step-01
try:
- script:
content: kubectl apply -f manifests.yaml
check:
# This check below ensures that the string 'invalid variable used' is found in stderr or else fails
(contains($stderr, 'invalid variable used')): true
- name: step-02
try:
- error:
file: chainsaw-step-02-error-1-1.yaml

View file

@ -0,0 +1,22 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-match-clusterroles
spec:
validationFailureAction: Audit
background: true
rules:
- name: ns-clusterroles
match:
any:
- resources:
kinds:
- Pod
clusterRoles:
- foo-admin
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: "?*"

View file

@ -0,0 +1,3 @@
# Title
Ensures this policy cannot be created because clusterRoles is not valid in background mode. It checks that the return failure output contains the given string and finally checks that the policy has not been created (in case somehow it returned an error, which passed, but was still created).

View file

@ -0,0 +1,22 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-match-roles
spec:
background: true
rules:
- match:
any:
- resources:
kinds:
- Pod
roles:
- foo-role
name: ns-roles
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: ?*
validationFailureAction: Audit

View file

@ -0,0 +1,18 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: background-match-roles
spec:
steps:
- name: step-01
try:
- script:
content: kubectl apply -f manifests.yaml
check:
# This check below ensures that the string 'invalid variable used' is found in stderr or else fails
(contains($stderr, 'invalid variable used')): true
- name: step-02
try:
- error:
file: chainsaw-step-02-error-1-1.yaml

View file

@ -0,0 +1,22 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-match-roles
spec:
validationFailureAction: Audit
background: true
rules:
- name: ns-roles
match:
any:
- resources:
kinds:
- Pod
roles:
- foo-role
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: "?*"

View file

@ -0,0 +1,3 @@
# Title
Ensures this policy cannot be created because clusterRoles is not valid in background mode. It checks that the return failure output contains the given string and finally checks that the policy has not been created (in case somehow it returned an error, which passed, but was still created).

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-vars-roles
spec:
background: true
rules:
- match:
any:
- resources:
kinds:
- Pod
name: ns-vars-roles
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
foo: '{{request.roles}}'
validationFailureAction: Audit

View file

@ -0,0 +1,18 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: background-vars-roles
spec:
steps:
- name: step-01
try:
- script:
content: kubectl apply -f manifests.yaml
check:
# This check below ensures that the string 'variable {{request.roles}} is not allowed' is found in stderr or else fails
(contains($stderr, 'variable {{request.roles}} is not allowed')): true
- name: step-02
try:
- error:
file: chainsaw-step-02-error-1-1.yaml

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-vars-roles
spec:
validationFailureAction: Audit
background: true
rules:
- name: ns-vars-roles
match:
any:
- resources:
kinds:
- Pod
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
foo: "{{request.roles}}"

View file

@ -0,0 +1,3 @@
# Title
Ensures this policy cannot be created because clusterRoles is not valid in background mode. It checks that the return failure output contains the given string and finally checks that the policy has not been created (in case somehow it returned an error, which passed, but was still created).

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-vars-serviceaccountname
spec:
background: true
rules:
- match:
any:
- resources:
kinds:
- Pod
name: ns-vars-serviceaccountname
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
baz: '{{serviceAccountName}}'
validationFailureAction: Audit

View file

@ -0,0 +1,18 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: background-vars-serviceaccountname
spec:
steps:
- name: step-01
try:
- script:
content: kubectl apply -f manifests.yaml
check:
# This check below ensures that the string 'variable {{serviceAccountName}} is not allowed' is found in stderr or else fails
(contains($stderr, 'variable {{serviceAccountName}} is not allowed')): true
- name: step-02
try:
- error:
file: chainsaw-step-02-error-1-1.yaml

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-vars-serviceaccountname
spec:
validationFailureAction: Audit
background: true
rules:
- name: ns-vars-serviceaccountname
match:
any:
- resources:
kinds:
- Pod
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
baz: "{{serviceAccountName}}"

View file

@ -0,0 +1,3 @@
# Title
Ensures this policy cannot be created because clusterRoles is not valid in background mode. It checks that the return failure output contains the given string and finally checks that the policy has not been created (in case somehow it returned an error, which passed, but was still created).

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-vars-userinfo
spec:
background: true
rules:
- match:
any:
- resources:
kinds:
- Pod
name: ns-vars-userinfo
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: '{{request.userInfo}}'
validationFailureAction: Audit

View file

@ -0,0 +1,18 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: background-vars-userinfo
spec:
steps:
- name: step-01
try:
- script:
content: kubectl apply -f manifests.yaml
check:
# This check below ensures that the string 'variable {{request.userInfo}} is not allowed' is found in stderr or else fails
(contains($stderr, 'variable {{request.userInfo}} is not allowed')): true
- name: step-02
try:
- error:
file: chainsaw-step-02-error-1-1.yaml

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: background-vars-userinfo
spec:
validationFailureAction: Audit
background: true
rules:
- name: ns-vars-userinfo
match:
any:
- resources:
kinds:
- Pod
validate:
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: "{{request.userInfo}}"

View file

@ -0,0 +1,11 @@
## Description
The configmap context lookup uses informer's cache internally, the background processing should use the same to resolve configmap context without crashing Kyverno.
## Expected Behavior
Policy is created successfully and the report is generated properly.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/5704

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: test-cm-lookup

View file

@ -0,0 +1,7 @@
apiVersion: v1
data:
foo: bar
kind: ConfigMap
metadata:
name: keys
namespace: test-cm-lookup

View file

@ -0,0 +1,24 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: validate-labels
spec:
background: true
rules:
- context:
- configMap:
name: keys
namespace: test-cm-lookup
name: keys
match:
any:
- resources:
kinds:
- Pod
name: validate-labels
validate:
pattern:
metadata:
labels:
foo: '{{ keys.data.foo }}'
validationFailureAction: Audit

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: validate-labels
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: Pod
metadata:
labels:
foo: bar
name: test-cm-lookup-pod
namespace: test-cm-lookup
spec:
containers:
- image: nginx
name: test-cm-lookup

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: Pod
metadata:
name: test-cm-lookup-pod
namespace: test-cm-lookup

View file

@ -0,0 +1,21 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
labels:
app.kubernetes.io/managed-by: kyverno
namespace: test-cm-lookup
results:
- policy: validate-labels
result: pass
rule: validate-labels
scope:
apiVersion: v1
kind: Pod
name: test-cm-lookup-pod
namespace: test-cm-lookup
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0

View file

@ -0,0 +1,27 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: configmap-context-lookup
spec:
steps:
- name: step-01
try:
- apply:
file: chainsaw-step-01-apply-1-1.yaml
- apply:
file: chainsaw-step-01-apply-1-2.yaml
- apply:
file: chainsaw-step-01-apply-1-3.yaml
- assert:
file: chainsaw-step-01-assert-1-1.yaml
- name: step-02
try:
- apply:
file: chainsaw-step-02-apply-1-1.yaml
- assert:
file: chainsaw-step-02-assert-1-1.yaml
- name: step-03
try:
- assert:
file: chainsaw-step-03-assert-1-1.yaml

View file

@ -14,9 +14,9 @@ spec:
- Pod
name: ns-clusterroles
validate:
validationFailureAction: Audit
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: ?*
validationFailureAction: Audit

View file

@ -14,9 +14,9 @@ spec:
- foo-role
name: ns-roles
validate:
validationFailureAction: Audit
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: ?*
validationFailureAction: Audit

View file

@ -12,9 +12,9 @@ spec:
- Pod
name: ns-vars-roles
validate:
validationFailureAction: Audit
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
foo: '{{request.roles}}'
validationFailureAction: Audit

View file

@ -12,9 +12,9 @@ spec:
- Pod
name: ns-vars-serviceaccountname
validate:
validationFailureAction: Audit
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
baz: '{{serviceAccountName}}'
validationFailureAction: Audit

View file

@ -12,9 +12,9 @@ spec:
- Pod
name: ns-vars-userinfo
validate:
validationFailureAction: Audit
message: The `owner` label is required for all Namespaces.
pattern:
metadata:
labels:
owner: '{{request.userInfo}}'
validationFailureAction: Audit

View file

@ -17,8 +17,8 @@ spec:
- Pod
name: validate-labels
validate:
validationFailureAction: Audit
pattern:
metadata:
labels:
foo: '{{ keys.data.foo }}'
validationFailureAction: Audit

View file

@ -0,0 +1,30 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: with-permissions
spec:
steps:
- name: step-01
try:
- apply:
file: serviceaccount.yaml
- name: step-02
try:
- apply:
file: rbac.yaml
- name: step-03
try:
- apply:
file: policy.yaml
- assert:
file: policy.yaml
- name: step-04
try:
- command:
args:
- apply
- -f
- ./pod.yaml
- --as=system:serviceaccount:default:test-account
entrypoint: kubectl

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Pod
metadata:
name: webserver
spec:
containers:
- name: webserver
image: nginx:latest
ports:
- containerPort: 80

View file

@ -0,0 +1,22 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-host-port
spec:
validationFailureAction: Enforce
background: false
rules:
- name: host-port
match:
any:
- resources:
kinds:
- Pod
validate:
cel:
expressions:
- expression: "authorizer.serviceAccount('default', 'test-account').group('').resource('pods').namespace('default').check('delete').allowed()"
message: "The user isn't allowed to delete pods in the 'default' namespace."
- expression: "object.spec.containers.all(container, !has(container.ports) || container.ports.all(port, !has(port.hostPort) || port.hostPort == 0))"
message: "The fields spec.containers[*].ports[*].hostPort must either be unset or set to `0`"

View file

@ -0,0 +1,25 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: role
namespace: default
rules:
- apiGroups:
- ''
resources:
- pods
verbs: ["create", "update", "get", "list", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: role
subjects:
- namespace: default
kind: ServiceAccount
name: test-account

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: test-account
namespace: default

View file

@ -0,0 +1,28 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: without-permissions
spec:
steps:
- name: step-01
try:
- apply:
file: serviceaccount.yaml
- name: step-02
try:
- apply:
file: rbac.yaml
- name: step-03
try:
- apply:
file: policy.yaml
- assert:
file: policy.yaml
- name: step-04
try:
- script:
content: "if kubectl apply -f ./deployment.yaml --as=system:serviceaccount:default:test-account-1\nthen\n
\ echo \"Test failed. Deployment shouldn't be created.\"\n exit 1\nelse
\n echo \"Test succeeded. Deployment isn't created as expected.\"\n exit
0\nfi\n"

View file

@ -0,0 +1,17 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-test-1
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: nginx

View file

@ -0,0 +1,21 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-deployment-replicas-1
spec:
validationFailureAction: Enforce
background: false
rules:
- name: deployment-replicas
match:
any:
- resources:
kinds:
- Deployment
validate:
cel:
expressions:
- expression: "authorizer.serviceAccount('default', 'test-account-1').group('apps').resource('deployments').namespace('default').check('delete').allowed()"
message: "The user isn't allowed to delete deployments in the 'default' namespace."
- expression: "object.spec.replicas <= 3"
message: "Deployment spec.replicas must be less than 3."

View file

@ -0,0 +1,25 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: role-1
namespace: default
rules:
- apiGroups:
- apps
resources:
- deployments
verbs: ["create", "update", "get", "list", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rolebinding-1
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: role-1
subjects:
- namespace: default
kind: ServiceAccount
name: test-account-1

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: test-account-1
namespace: default

View file

@ -0,0 +1,9 @@
## Description
This test validates the use of `rule.celPreconditions`.
The policy will be applied on resources that matches the CEL Preconditions.
## Expected Behavior
The policy will be applied on `pod-fail` and since it violates the rule, it will be blocked.
The policy won't be applied on `pod-pass` because it doesn't match the CEL precondition. Therefore it will be created.

View file

@ -0,0 +1,22 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: cel-preconditions
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
file: pod-pass.yaml
- apply:
expect:
- check:
($error != null): true
file: pod-fail.yaml

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
spec:
containers:
- name: webserver
image: nginx:latest
ports:
- containerPort: 8080
hostPort: 80

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: pod
spec:
containers:
- name: webserver
image: nginx:latest
ports:
- containerPort: 8080
hostPort: 80

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-host-port-range
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,22 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-host-port-range
spec:
validationFailureAction: Enforce
background: false
rules:
- name: host-port-range
match:
any:
- resources:
kinds:
- Pod
celPreconditions:
- name: "first match condition in CEL"
expression: "object.metadata.name.matches('nginx-pod')"
validate:
cel:
expressions:
- expression: "object.spec.containers.all(container, !has(container.ports) || container.ports.all(port, !has(port.hostPort) || (port.hostPort >= 5000 && port.hostPort <= 6000)))"
message: "The only permitted hostPorts are in the range 5000-6000."

View file

@ -0,0 +1,20 @@
## Description
This test validates the use of variables in validate.cel subrule.
This test creates the following:
1. Two namespaces: `production-ns` and `staging-ns`
2. A policy that enforces that all containers of a deployment has the image repo match the environment label of its namespace. Except for "exempt" deployments, or any containers that do not belong to the "example.com" organization For example, if the namespace has a label of {"environment": "staging"}, all container images must be either staging.example.com/* or do not contain "example.com" at all, unless the deployment has {"exempt": "true"} label.
3. Six deployments.
## Expected Behavior
The following deployments is blocked:
1. `deployment-fail-01`: It intended to be created in namespace `production-ns` but its container image is `staging.example.com/nginx` which violates the validation rule.
2. `deployment-fail-02`: It intended to be created in namespace `staging-ns` but its container image is `example.com/nginx` which violates the validation rule.
3. `deployment-fail-03`: It intended to be created in namespace `staging-ns` and it has a label of `exempt: "false"` but its container image is `example.com/nginx` which violates the validation rule.
The following deployments is created:
1. `deployment-pass-01`, It is created in namespace `production-ns` and its container image is `prod.example.com/nginx`.
2. `deployment-pass-02`, It is created in namespace `staging-ns` and its container image is `staging.example.com/nginx`.
3. `deployment-pass-03`, It is created in namespace `staging-ns` and its container image is `example.com/nginx` but it has a label of `exempt: "true"` so it passes the validation rule.

View file

@ -0,0 +1,28 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: cel-variables
spec:
steps:
- name: step-01
try:
- apply:
file: ns.yaml
- assert:
file: ns.yaml
- name: step-02
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-03
try:
- apply:
file: deployments-pass.yaml
- apply:
expect:
- check:
($error != null): true
file: deployments-fail.yaml

View file

@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-fail-01
namespace: production-ns
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: staging.example.com/nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-fail-02
namespace: staging-ns
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: example.com/nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-fail-03
namespace: staging-ns
labels:
exempt: "false"
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: example.com/nginx

View file

@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-pass-01
namespace: production-ns
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: prod.example.com/nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-pass-02
namespace: staging-ns
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: staging.example.com/nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-pass-03
namespace: staging-ns
labels:
exempt: "true"
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: example.com/nginx

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: Namespace
metadata:
name: production-ns
---
apiVersion: v1
kind: Namespace
metadata:
name: staging-ns
labels:
environment: staging

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: image-matches-namespace-environment.policy.example.com
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,28 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: image-matches-namespace-environment.policy.example.com
spec:
validationFailureAction: Enforce
background: false
rules:
- name: image-matches-namespace-environment
match:
any:
- resources:
kinds:
- Deployment
validate:
cel:
variables:
- name: environment
expression: "'environment' in namespaceObject.metadata.labels ? namespaceObject.metadata.labels['environment'] : 'prod'"
- name: exempt
expression: "has(object.metadata.labels) && 'exempt' in object.metadata.labels && object.metadata.labels['exempt'] == 'true'"
- name: containers
expression: "object.spec.template.spec.containers"
- name: containersToCheck
expression: "variables.containers.filter(c, c.image.contains('example.com/'))"
expressions:
- expression: "variables.exempt || variables.containersToCheck.all(c, c.image.startsWith(variables.environment + '.'))"
messageExpression: "'only ' + variables.environment + ' images are allowed in namespace ' + namespaceObject.metadata.name"

View file

@ -0,0 +1,7 @@
## Description
This test creates a policy that uses CEL expressions to check if the statefulset is created in the `production` namespace or not.
## Expected Behavior
The statefulset `bad-statefulset` is blocked, and the statefulset `good-statefulset` is created.

View file

@ -0,0 +1,28 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: check-statefulset-namespace
spec:
steps:
- name: step-01
try:
- apply:
file: ns.yaml
- assert:
file: ns.yaml
- name: step-02
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-03
try:
- apply:
file: statefulset-pass.yaml
- apply:
expect:
- check:
($error != null): true
file: statefulset-fail.yaml

View file

@ -0,0 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
name: production
---
apiVersion: v1
kind: Namespace
metadata:
name: testing

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-statefulset-namespace
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,19 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-statefulset-namespace
spec:
validationFailureAction: Enforce
background: false
rules:
- name: statefulset-namespace
match:
any:
- resources:
kinds:
- StatefulSet
validate:
cel:
expressions:
- expression: "namespaceObject.metadata.name == 'production'"
message: "The StatefulSet must be created in the 'production' namespace."

View file

@ -0,0 +1,18 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: bad-statefulset
namespace: testing
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: nginx

View file

@ -0,0 +1,18 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: good-statefulset
namespace: production
spec:
replicas: 1
selector:
matchLabels:
app: app
template:
metadata:
labels:
app: app
spec:
containers:
- name: container2
image: nginx

View file

@ -0,0 +1,7 @@
## Description
This test creates a policy that uses CEL expressions to disallow host ports in pods.
## Expected Behavior
The pod `pod-fail` is blocked, and the pod `pod-pass` is created.

View file

@ -0,0 +1,22 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: disallow-host-port
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
file: pod-pass.yaml
- apply:
expect:
- check:
($error != null): true
file: pod-fail.yaml

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Pod
metadata:
name: webserver
spec:
containers:
- name: webserver
image: nginx:latest
ports:
- hostPort: 80

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Pod
metadata:
name: webserver
spec:
containers:
- name: webserver
image: nginx:latest
ports:
- containerPort: 80

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-host-port
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,21 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-host-port
annotations:
pod-policies.kyverno.io/autogen-controllers: none
spec:
validationFailureAction: Enforce
background: false
rules:
- name: host-port
match:
any:
- resources:
kinds:
- Pod
validate:
cel:
expressions:
- expression: "object.spec.containers.all(container, !has(container.ports) || container.ports.all(port, !has(port.hostPort) || port.hostPort == 0))"
message: "The fields spec.containers[*].ports[*].hostPort must either be unset or set to `0`"

View file

@ -0,0 +1,12 @@
## Description
This test validates the use of parameter resources in validate.cel subrule.
This test creates the following:
1. A cluster-scoped custom resource definition `NamespaceConstraint`
3. A policy that checks the namespace name using the parameter resource.
4. Two namespaces.
## Expected Behavior
The namespace `testing-ns` is blocked, and the namespace `production-ns` is created.

View file

@ -0,0 +1,34 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: clusterscoped
spec:
steps:
- name: step-01
try:
- apply:
file: crd.yaml
- assert:
file: crd-assert.yaml
- name: step-02
try:
- apply:
file: namespaceConstraint.yaml
- assert:
file: namespaceConstraint.yaml
- name: step-03
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-04
try:
- apply:
file: ns-pass.yaml
- apply:
expect:
- check:
($error != null): true
file: ns-fail.yaml

View file

@ -0,0 +1,10 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: namespaceconstraints.rules.example.com
status:
acceptedNames:
kind: NamespaceConstraint
plural: namespaceconstraints
storedVersions:
- v1

View file

@ -0,0 +1,26 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: namespaceconstraints.rules.example.com
spec:
group: rules.example.com
names:
kind: NamespaceConstraint
plural: namespaceconstraints
scope: Cluster
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
name:
type: string

View file

@ -0,0 +1,5 @@
apiVersion: rules.example.com/v1
kind: NamespaceConstraint
metadata:
name: "namespace-constraint-test.example.com"
name: "production-ns-01"

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: testing-ns-01

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: production-ns-01

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-namespace-name-01
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,25 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: check-namespace-name-01
spec:
validationFailureAction: Enforce
background: false
rules:
- name: namespace-name-01
match:
any:
- resources:
kinds:
- Namespace
validate:
cel:
paramKind:
apiVersion: rules.example.com/v1
kind: NamespaceConstraint
paramRef:
name: "namespace-constraint-test.example.com"
parameterNotFoundAction: "Deny"
expressions:
- expression: "object.metadata.name == params.name"
messageExpression: "'Namespace name must be ' + params.name"

View file

@ -0,0 +1,12 @@
## Description
This test validates the use of parameter resources in validate.cel subrule.
This test creates the following:
1. A namespaced custom resource definition `NameConstraint`
3. A policy that checks the namespace name using the parameter resource.
4. A namespace `testing`.
## Expected Behavior
Since the parameter resource is namespaced-scope and the policy matches cluster-scoped resource `Namespace`, therefore the creation of a namespace is blocked

View file

@ -0,0 +1,32 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: match-clusterscoped-resource
spec:
steps:
- name: step-01
try:
- apply:
file: crd.yaml
- assert:
file: crd-assert.yaml
- name: step-02
try:
- apply:
file: nameConstraint.yaml
- assert:
file: nameConstraint.yaml
- name: step-03
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-04
try:
- apply:
expect:
- check:
($error != null): true
file: ns.yaml

Some files were not shown because too many files have changed in this diff Show more