1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-05 07:26:55 +00:00

feat: add chainsaw tests for validate policies (part 3) (#10546)

Signed-off-by: Mariam Fahmy <mariam.fahmy@nirmata.com>
This commit is contained in:
Mariam Fahmy 2024-06-26 22:07:03 +08:00 committed by GitHub
parent 565f4b5427
commit 418bf25659
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
134 changed files with 2826 additions and 17 deletions

View file

@ -0,0 +1,10 @@
## Description
This test ensures the PSS checks with the latest version, without exclusions, are applied to the resources successfully.
## Expected Behavior
The two pods should not be created as it violate the baseline:latest `seccomp` PSS check.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/7260

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-with-restricted-seccomp-profile-1
spec:
containers:
- name: busybox
image: busybox:1.35
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: Unconfined

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-with-restricted-seccomp-profile-2
spec:
containers:
- name: busybox
image: busybox:1.35
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: Unconfined

View file

@ -0,0 +1,27 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: seccomp-latest-check-no-exclusion
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod-1.yaml
- apply:
expect:
- check:
($error != null): true
file: bad-pod-2.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-with-restricted-seccomp-profile-3
spec:
containers:
- name: busybox
image: busybox:1.35
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: latest-check-no-exclusion
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,18 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: latest-check-no-exclusion
spec:
background: false
validationFailureAction: Enforce
rules:
- name: restricted
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest

View file

@ -0,0 +1,10 @@
## Description
This test ensures the deletion of a resource that matches the podSecurity does not cause any panic.
## Expected Behavior
The resource should be deleted successfully without any error.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/6897

View file

@ -0,0 +1,25 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-deletion-request
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
file: manifests.yaml
- name: step-03
try:
- delete:
ref:
apiVersion: apps/v1
kind: DaemonSet
name: test-deletion-request-datadog-operator
namespace: cpol-validate-psa-test-deletion-request

View file

@ -0,0 +1,480 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-validate-psa-test-deletion-request
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubernetes.io/component: agent
app.kubernetes.io/instance: datadog-operator
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: datadog-operator
app.kubernetes.io/version: "7"
helm.sh/chart: datadog-3.25.1
name: test-deletion-request-datadog-operator
namespace: cpol-validate-psa-test-deletion-request
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: datadog-operator
template:
metadata:
annotations:
labels:
app: datadog-operator
app.kubernetes.io/component: agent
app.kubernetes.io/instance: datadog-operator
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: datadog-operator
name: datadog-operator
spec:
affinity: {}
automountServiceAccountToken: true
containers:
- command:
- agent
- run
env:
- name: GODEBUG
value: x509ignoreCN=0
- name: DD_API_KEY
valueFrom:
secretKeyRef:
key: api-key
name: datadog-operator
- name: DD_AUTH_TOKEN_FILE_PATH
value: /etc/datadog-agent/auth/token
- name: DD_CLUSTER_NAME
value: cluster
- name: KUBERNETES
value: "yes"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: DD_ENV
value: cluster
- name: DD_PROPAGATION_STYLE_INJECT
value: Datadog B3
- name: DD_EC2_PREFER_IMDSV2
value: "true"
- name: DD_PROXY_HTTP
value: http://proxy.config.pcp.local:3128
- name: DD_PROXY_HTTPS
value: http://proxy.config.pcp.local:3128
- name: DD_PROXY_NO_PROXY
value: localhost 127.0.0.1 10.100.0.0/16 172.31.0.0/16 172.16.0.0/12
- name: DD_LOG_LEVEL
value: INFO
- name: DD_DOGSTATSD_PORT
value: "8125"
- name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC
value: "true"
- name: DD_CLUSTER_AGENT_ENABLED
value: "true"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-operator-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
valueFrom:
secretKeyRef:
key: token
name: datadog-operator-cluster-agent
- name: DD_APM_ENABLED
value: "false"
- name: DD_LOGS_ENABLED
value: "false"
- name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL
value: "false"
- name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE
value: "true"
- name: DD_LOGS_CONFIG_AUTO_MULTI_LINE_DETECTION
value: "false"
- name: DD_HEALTH_PORT
value: "5555"
- name: DD_DOGSTATSD_SOCKET
value: /var/run/datadog/dsd.socket
- name: DD_IGNORE_AUTOCONF
value: kubernetes_state
- name: DD_EXPVAR_PORT
value: "6000"
image: datadog/agent:7.36.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
httpGet:
path: /live
port: 5555
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
name: agent
ports:
- containerPort: 8125
hostPort: 8125
name: dogstatsdport
protocol: UDP
readinessProbe:
failureThreshold: 6
httpGet:
path: /ready
port: 5555
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /etc/datadog-agent/install_info
name: installinfo
readOnly: true
subPath: install_info
- mountPath: /var/log/datadog
name: logdatadog
readOnly: false
- mountPath: /tmp
name: tmpdir
readOnly: false
- mountPath: /host/etc/os-release
mountPropagation: None
name: os-release-file
readOnly: true
- mountPath: /etc/datadog-agent
name: config
readOnly: false
- mountPath: /etc/datadog-agent/auth
name: auth-token
readOnly: false
- mountPath: /host/var/run
mountPropagation: None
name: runtimesocketdir
readOnly: true
- mountPath: /var/run/datadog
name: dsdsocket
readOnly: false
- mountPath: /host/proc
mountPropagation: None
name: procdir
readOnly: true
- mountPath: /host/sys/fs/cgroup
mountPropagation: None
name: cgroups
readOnly: true
- command:
- trace-agent
- -config=/etc/datadog-agent/datadog.yaml
env:
- name: GODEBUG
value: x509ignoreCN=0
- name: DD_API_KEY
valueFrom:
secretKeyRef:
key: api-key
name: datadog-operator
- name: DD_AUTH_TOKEN_FILE_PATH
value: /etc/datadog-agent/auth/token
- name: DD_CLUSTER_NAME
value: cluster
- name: KUBERNETES
value: "yes"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: DD_ENV
value: cluster
- name: DD_PROPAGATION_STYLE_INJECT
value: Datadog B3
- name: DD_EC2_PREFER_IMDSV2
value: "true"
- name: DD_PROXY_HTTP
value: http://proxy.config.pcp.local:3128
- name: DD_PROXY_HTTPS
value: http://proxy.config.pcp.local:3128
- name: DD_PROXY_NO_PROXY
value: localhost 127.0.0.1 10.100.0.0/16 172.31.0.0/16 172.16.0.0/12
- name: DD_CLUSTER_AGENT_ENABLED
value: "true"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-operator-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
valueFrom:
secretKeyRef:
key: token
name: datadog-operator-cluster-agent
- name: DD_LOG_LEVEL
value: INFO
- name: DD_APM_ENABLED
value: "true"
- name: DD_APM_NON_LOCAL_TRAFFIC
value: "true"
- name: DD_APM_RECEIVER_PORT
value: "8126"
- name: DD_APM_RECEIVER_SOCKET
value: /var/run/datadog/apm.socket
- name: DD_DOGSTATSD_SOCKET
value: /var/run/datadog/dsd.socket
image: datadog/agent:7.36.0
imagePullPolicy: IfNotPresent
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 15
tcpSocket:
port: 8126
timeoutSeconds: 5
name: trace-agent
ports:
- containerPort: 8126
hostPort: 8126
name: traceport
protocol: TCP
resources: {}
volumeMounts:
- mountPath: /etc/datadog-agent
name: config
readOnly: true
- mountPath: /etc/datadog-agent/auth
name: auth-token
readOnly: true
- mountPath: /host/proc
mountPropagation: None
name: procdir
readOnly: true
- mountPath: /host/sys/fs/cgroup
mountPropagation: None
name: cgroups
readOnly: true
- mountPath: /var/log/datadog
name: logdatadog
readOnly: false
- mountPath: /tmp
name: tmpdir
readOnly: false
- mountPath: /var/run/datadog
name: dsdsocket
readOnly: false
- mountPath: /host/var/run
mountPropagation: None
name: runtimesocketdir
readOnly: true
- command:
- process-agent
- --cfgpath=/etc/datadog-agent/datadog.yaml
env:
- name: GODEBUG
value: x509ignoreCN=0
- name: DD_API_KEY
valueFrom:
secretKeyRef:
key: api-key
name: datadog-operator
- name: DD_AUTH_TOKEN_FILE_PATH
value: /etc/datadog-agent/auth/token
- name: DD_CLUSTER_NAME
value: cluster
- name: KUBERNETES
value: "yes"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: DD_ENV
value: cluster
- name: DD_PROPAGATION_STYLE_INJECT
value: Datadog B3
- name: DD_EC2_PREFER_IMDSV2
value: "true"
- name: DD_PROXY_HTTP
value: http://proxy.config.pcp.local:3128
- name: DD_PROXY_HTTPS
value: http://proxy.config.pcp.local:3128
- name: DD_PROXY_NO_PROXY
value: localhost 127.0.0.1 10.100.0.0/16 172.31.0.0/16 172.16.0.0/12
- name: DD_CLUSTER_AGENT_ENABLED
value: "true"
- name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME
value: datadog-operator-cluster-agent
- name: DD_CLUSTER_AGENT_AUTH_TOKEN
valueFrom:
secretKeyRef:
key: token
name: datadog-operator-cluster-agent
- name: DD_PROCESS_AGENT_ENABLED
value: "true"
- name: DD_PROCESS_AGENT_DISCOVERY_ENABLED
value: "true"
- name: DD_LOG_LEVEL
value: INFO
- name: DD_SYSTEM_PROBE_ENABLED
value: "false"
- name: DD_DOGSTATSD_SOCKET
value: /var/run/datadog/dsd.socket
- name: DD_ORCHESTRATOR_EXPLORER_ENABLED
value: "true"
image: datadog/agent:7.36.0
imagePullPolicy: IfNotPresent
name: process-agent
resources: {}
volumeMounts:
- mountPath: /etc/datadog-agent
name: config
readOnly: true
- mountPath: /etc/datadog-agent/auth
name: auth-token
readOnly: true
- mountPath: /var/run/datadog
name: dsdsocket
readOnly: false
- mountPath: /var/log/datadog
name: logdatadog
readOnly: false
- mountPath: /tmp
name: tmpdir
readOnly: false
- mountPath: /host/etc/os-release
mountPropagation: None
name: os-release-file
readOnly: true
- mountPath: /host/var/run
mountPropagation: None
name: runtimesocketdir
readOnly: true
- mountPath: /host/sys/fs/cgroup
mountPropagation: None
name: cgroups
readOnly: true
- mountPath: /etc/passwd
name: passwd
readOnly: true
- mountPath: /host/proc
mountPropagation: None
name: procdir
readOnly: true
hostPID: true
initContainers:
- args:
- cp -r /etc/datadog-agent /opt
command:
- bash
- -c
image: datadog/agent:7.36.0
imagePullPolicy: IfNotPresent
name: init-volume
resources: {}
volumeMounts:
- mountPath: /opt/datadog-agent
name: config
readOnly: false
- args:
- for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do
bash $script ; done
command:
- bash
- -c
env:
- name: GODEBUG
value: x509ignoreCN=0
- name: DD_API_KEY
valueFrom:
secretKeyRef:
key: api-key
name: datadog-operator
- name: DD_AUTH_TOKEN_FILE_PATH
value: /etc/datadog-agent/auth/token
- name: DD_CLUSTER_NAME
value: cluster
- name: KUBERNETES
value: "yes"
- name: DD_KUBERNETES_KUBELET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: DD_ENV
value: cluster
- name: DD_PROPAGATION_STYLE_INJECT
value: Datadog B3
- name: DD_EC2_PREFER_IMDSV2
value: "true"
image: datadog/agent:7.36.0
imagePullPolicy: IfNotPresent
name: init-config
resources: {}
volumeMounts:
- mountPath: /var/log/datadog
name: logdatadog
readOnly: false
- mountPath: /etc/datadog-agent
name: config
readOnly: false
- mountPath: /host/proc
mountPropagation: None
name: procdir
readOnly: true
- mountPath: /host/var/run
mountPropagation: None
name: runtimesocketdir
readOnly: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: false
runAsUser: 101
seccompProfile:
type: RuntimeDefault
serviceAccountName: datadog-operator
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- emptyDir: {}
name: auth-token
- configMap:
name: datadog-operator-installinfo
name: installinfo
- emptyDir: {}
name: config
- emptyDir: {}
name: logdatadog
- emptyDir: {}
name: tmpdir
- hostPath:
path: /proc
name: procdir
- hostPath:
path: /sys/fs/cgroup
name: cgroups
- hostPath:
path: /etc/os-release
name: os-release-file
- hostPath:
path: /var/run/datadog/
type: DirectoryOrCreate
name: dsdsocket
- hostPath:
path: /var/run/datadog/
type: DirectoryOrCreate
name: apmsocket
- emptyDir: {}
name: s6-run
- hostPath:
path: /etc/passwd
name: passwd
- hostPath:
path: /var/run
name: runtimesocketdir
updateStrategy:
rollingUpdate:
maxUnavailable: 10%
type: RollingUpdate

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: kyverno-psa-policy-test-deletion
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,25 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: kyverno-psa-policy-test-deletion
spec:
background: true
validationFailureAction: Enforce
rules:
- name: kyverno-psa-policy-test-deletion
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "HostPath Volumes"
- controlName: "Host Namespaces"
- controlName: "Host Ports"
images:
- datadog/*

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `Capabilities` PSS check and one pod (`bad-pod`) should not be created as it violate the baseline:latest `Capabilities` PSS check.

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
capabilities:
add:
- bar
- baz
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
capabilities:
add:
- baz

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-capabilities
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
capabilities:
add:
- foo
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
capabilities:
add:
- baz

View file

@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
capabilities:
add:
- CHOWN
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
capabilities:
add:
- FOWNER

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-capabilities
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,31 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-capabilities
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-capabilities
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "Capabilities"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.capabilities.add"
values:
- "foo"
- controlName: "Capabilities"
images:
- nginx
restrictedField: "spec.initContainers[*].securityContext.capabilities.add"
values:
- "baz"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `Host Namespaces` PSS check and one pod (`bad-pod`) should not be created as it violate the baseline:latest `Host Namespaces` PSS check.

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
hostPID: true
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-host-namespaces
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
hostNetwork: true
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
hostNetwork: false
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-host-namespaces
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,23 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-host-namespaces
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-host-namespaces
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "Host Namespaces"
restrictedField: "spec.hostNetwork"
values:
- "true"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `Host Ports` PSS check and one pod (`bad-pod`) should not be created as it violate the baseline:latest `Host Ports` PSS check.

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
ports:
- hostPort: 20
containerPort: 80
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
ports:
- hostPort: 20
containerPort: 80

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-host-ports
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
ports:
- hostPort: 10
containerPort: 80
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
ports:
- hostPort: 20
containerPort: 80

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
ports:
- hostPort: 0
containerPort: 80
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
ports:
- hostPort: 0
containerPort: 80

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-host-ports
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,32 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-host-ports
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-host-ports
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "Host Ports"
images:
- nginx
restrictedField: "spec.containers[*].ports[*].hostPort"
values:
- "10"
- controlName: "Host Ports"
images:
- nginx
restrictedField: "spec.initContainers[*].ports[*].hostPort"
values:
- "20"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `HostPath Volumes` PSS check.

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
volumes:
- name: host
hostPath:
path: /var/lib2
containers:
- name: nginx
image: nginx
args:
- sleep
- 1d

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-hostpath-volume
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
volumes:
- name: host
hostPath:
path: /var/lib1
containers:
- name: nginx
image: nginx
args:
- sleep
- 1d

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx
image: nginx
args:
- sleep
- 1d

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-hostpath-volumes
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,23 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-hostpath-volumes
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-hostpath-volumes
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "HostPath Volumes"
restrictedField: "spec.volumes[*].hostPath"
values:
- "/var/lib1"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `HostProcesses` PSS check and one pod (`bad-pod`) should not be created as it violate the baseline:latest `HostProcesses` PSS check.

View file

@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
hostNetwork: true
securityContext:
windowsOptions:
hostProcess: true
containers:
- name: busybox
image: busybox
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: true
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: true

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-hostprocesses
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
hostNetwork: true
securityContext:
windowsOptions:
hostProcess: true
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: true
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: true

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
securityContext:
windowsOptions:
hostProcess: false
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: false
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: false

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-hostprocess
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,39 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-hostprocess
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-hostprocess
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "Host Namespaces"
restrictedField: "spec.hostNetwork"
values:
- "true"
- controlName: "HostProcess"
restrictedField: "spec.securityContext.windowsOptions.hostProcess"
values:
- "true"
- controlName: "HostProcess"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.windowsOptions.hostProcess"
values:
- "true"
- controlName: "HostProcess"
images:
- nginx
restrictedField: "spec.initContainers[*].securityContext.windowsOptions.hostProcess"
values:
- "true"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the restricted:latest `Privilege Escalation` PSS check and one pod (`bad-pod`) should not be created as it violate the restricted:latest `Privilege Escalation` PSS check.

View file

@ -0,0 +1,31 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
capabilities:
drop:
- ALL

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-privilege-escalation
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: true
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: true
capabilities:
drop:
- ALL

View file

@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-privilege-escalation
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,31 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-privilege-escalation
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-privilege-escalation
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: restricted
version: latest
exclude:
- controlName: "Privilege Escalation"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.allowPrivilegeEscalation"
values:
- "true"
- controlName: "Privilege Escalation"
images:
- nginx
restrictedField: "spec.initContainers[*].securityContext.allowPrivilegeEscalation"
values:
- "true"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `Privileged Containers` PSS check.

View file

@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: busybox
image: busybox
args:
- sleep
- 1d
securityContext:
privileged: true
initContainers:
- name: nginx
image: nginx
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: false

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-privileged-containers
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,21 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
privileged: true
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
privileged: true

View file

@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
privileged: false
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
windowsOptions:
hostProcess: false

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-privileged-containers
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,31 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-privileged-containers
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-privileged-containers
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "Privileged Containers"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.privileged"
values:
- "true"
- controlName: "Privileged Containers"
images:
- nginx
restrictedField: "spec.initContainers[*].securityContext.privileged"
values:
- "true"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the restricted:latest `Capabilities` PSS check and one pod (`bad-pod`) should not be created as it violate the restricted:latest `Capabilities` PSS check.

View file

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- bar
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- baz

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-restricted-capabilities
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- foo
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- baz

View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-restricted-capabilities
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,31 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-restricted-capabilities
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-restricted-capabilities
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: restricted
version: latest
exclude:
- controlName: "Capabilities"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.capabilities.add"
values:
- "foo"
- controlName: "Capabilities"
images:
- nginx
restrictedField: "spec.initContainers[*].securityContext.capabilities.add"
values:
- "baz"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the restricted:latest `Seccomp` PSS check and one pod (`bad-pod`) should not be created as it violate the restricted:latest `Seccomp` PSS check.

View file

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
securityContext:
seccompProfile:
type: Unconfined
containers:
- name: busybox
image: busybox
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: Unconfined
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: Localhost
localhostProfile: profiles/audit.json
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-restricted-seccomp
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: Unconfined
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,36 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-restricted-seccomp
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,25 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-restricted-seccomp
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-restricted-seccomp
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: restricted
version: latest
exclude:
- controlName: "Seccomp"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.seccompProfile.type"
values:
- "Unconfined"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the restricted:latest `Running as Non-root User` PSS check and one pod (`bad-pod`) should not be created as it violate the restricted:latest `Running as Non-root User` PSS check.

View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
runAsUser: 1
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
runAsUser: 0
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-running-as-nonroot-user
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
runAsUser: 0
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
runAsUser: 10
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
runAsUser: 1
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-running-as-non-root-user
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,29 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-running-as-non-root-user
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-running-as-non-root-user
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: restricted
version: latest
exclude:
- controlName: "Running as Non-root user"
restrictedField: "spec.securityContext.runAsUser"
values:
- "0"
- controlName: "Running as Non-root user"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.runAsUser"
values:
- "0"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the restricted:latest `Running as Non-root` PSS check and one pod (`bad-pod`) should not be created as it violate the restricted:latest `Running as Non-root` PSS check.

View file

@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: busybox
image: busybox
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-running-as-nonroot
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-running-as-non-root
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,29 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-running-as-non-root
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-running-as-non-root
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: restricted
version: latest
exclude:
- controlName: "Running as Non-root"
restrictedField: "spec.securityContext.runAsNonRoot"
values:
- "false"
- controlName: "Running as Non-root"
images:
- nginx
restrictedField: "spec.containers[*].securityContext.runAsNonRoot"
values:
- "false"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `Seccomp` PSS check and one pod (`bad-pod`) should not be created as it violate the baseline:latest `Seccomp` PSS check.

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
securityContext:
seccompProfile:
type: Unconfined
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: Unconfined
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: Unconfined

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-seccomp
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
securityContext:
seccompProfile:
type: Unconfined
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: good-pod
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seccompProfile:
type: RuntimeDefault
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-seccomp
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,24 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: test-exclusion-seccomp
spec:
background: true
validationFailureAction: Enforce
rules:
- name: test-exclusion-seccomp
match:
any:
- resources:
kinds:
- Pod
validate:
podSecurity:
level: baseline
version: latest
exclude:
- controlName: "Seccomp"
restrictedField: "spec.securityContext.seccompProfile.type"
values:
- "Unconfined"

View file

@ -0,0 +1,7 @@
## Description
This test ensures the PSS checks with the new advanced support on exclusions are applied to the resources successfully.
## Expected Behavior
Two pods (`good-pod` & `excluded-pod`) should be created as it follows the baseline:latest `SELinux` PSS check and one pod (`bad-pod`) should not be created as it violate the baseline:latest `SELinux` PSS check.

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: bad-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seLinuxOptions:
type: bar
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seLinuxOptions:
type: foo

View file

@ -0,0 +1,24 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: test-exclusion-selinux
spec:
steps:
- name: step-01
try:
- apply:
file: policy.yaml
- assert:
file: policy-assert.yaml
- name: step-02
try:
- apply:
expect:
- check:
($error != null): true
file: bad-pod.yaml
- apply:
file: excluded-pod.yaml
- apply:
file: good-pod.yaml

View file

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: excluded-pod
spec:
containers:
- name: nginx1
image: nginx
args:
- sleep
- 1d
securityContext:
seLinuxOptions:
type: foo
initContainers:
- name: nginx2
image: nginx
args:
- sleep
- 1d
securityContext:
seLinuxOptions:
type: bar

Some files were not shown because too many files have changed in this diff Show more