1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

Merge pull request #466 from nirmata/452_make_sample_policy_rule_names_consistent

452 make sample policy rule names consistent
This commit is contained in:
Jim Bugwadia 2019-11-12 23:23:25 -08:00 committed by GitHub
commit 5a4458f65b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
89 changed files with 670 additions and 836 deletions

View file

@ -10,68 +10,56 @@ func Test_Mutate_Validate_qos(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
}
func Test_validate_deny_runasrootuser(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_deny_runasrootuser.yaml")
func Test_disallow_root_user(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_root_user.yaml")
}
func Test_validate_disallow_priviledgedprivelegesecalation(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_priviledged_privelegesecalation.yaml")
func Test_disallow_priviledged(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_priviledged.yaml")
}
func Test_validate_healthChecks(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_validate_healthChecks.yaml")
}
func Test_generate_networkPolicy(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/scenario_generate_networkPolicy.yaml")
func Test_add_networkPolicy(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/add_networkPolicy.yaml")
}
// namespace is blank, not "default" as testrunner evaulates the policyengine, but the "default" is added by kubeapiserver
func Test_validate_require_image_tag_not_latest_deny(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_valiadate_require_image_tag_not_latest_deny.yaml")
func Test_validate_disallow_latest_tag(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_latest_tag.yaml")
}
func Test_validate_require_image_tag_not_latest_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_valiadate_require_image_tag_not_latest_pass.yaml")
}
func Test_validate_disallow_automoutingapicred_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_automountingapicred.yaml")
testScenario(t, "test/scenarios/samples/best_practices/disallow_latest_tag_pass.yaml")
}
func Test_validate_disallow_default_namespace(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_default_namespace.yaml")
testScenario(t, "test/scenarios/samples/best_practices/disallow_default_namespace.yaml")
}
func Test_validate_host_network_port(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_host_network_hostport.yaml")
testScenario(t, "test/scenarios/samples/best_practices/disallow_host_network_port.yaml")
}
func Test_validate_hostPID_hostIPC(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_hostpid_hostipc.yaml")
func Test_validate_host_PID_IPC(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_host_pid_ipc.yaml")
}
func Test_validate_not_readonly_rootfilesystem(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_require_readonly_rootfilesystem.yaml")
func Test_validate_ro_rootfs(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/require_ro_rootfs.yaml")
}
func Test_validate_require_namespace_quota(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_require_namespace_quota.yaml")
}
func Test_validate_disallow_node_port(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_node_port.yaml")
func Test_add_ns_quota(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/add_ns_quota.yaml")
}
func Test_validate_disallow_default_serviceaccount(t *testing.T) {
testScenario(t, "test/scenarios/other/scenario_validate_disallow_default_serviceaccount.yaml")
}
func Test_validate_fsgroup(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/scenario_validate_fsgroup.yaml")
}
func Test_validate_selinux_context(t *testing.T) {
testScenario(t, "test/scenarios/other/scenario_validate_selinux_context.yaml")
}
@ -80,61 +68,66 @@ func Test_validate_proc_mount(t *testing.T) {
testScenario(t, "test/scenarios/other/scenario_validate_default_proc_mount.yaml")
}
func Test_validate_container_capabilities(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/scenario_validate_container_capabilities.yaml")
}
func Test_validate_disallow_sysctl(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/scenario_validate_sysctl_configs.yaml")
}
func Test_validate_volume_whitelist(t *testing.T) {
testScenario(t, "test/scenarios/other/scenario_validate_volume_whiltelist.yaml")
}
func Test_validate_trusted_image_registries(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_trusted_image_registries.yaml")
}
func Test_require_pod_requests_limits(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_require_pod_requests_limits.yaml")
testScenario(t, "test/scenarios/samples/best_practices/require_pod_requests_limits.yaml")
}
func Test_require_probes(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_probes.yaml")
testScenario(t, "test/scenarios/samples/best_practices/require_probes.yaml")
}
func Test_validate_disallow_host_filesystem_fail(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_host_filesystem.yaml")
func Test_validate_disallow_bind_mounts_fail(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_bind_mounts_fail.yaml")
}
func Test_validate_disallow_host_filesystem_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_host_filesystem_pass.yaml")
func Test_validate_disallow_bind_mounts_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_bind_mounts_pass.yaml")
}
func Test_validate_disallow_new_capabilities(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/scenario_validate_disallow_new_capabilities.yaml")
testScenario(t, "/test/scenarios/samples/best_practices/disallow_new_capabilities.yaml")
}
func Test_validate_disallow_docker_sock_mount(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_docker_sock_mount.yaml")
func Test_disallow_sysctls(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/disallow_sysctls.yaml")
}
func Test_disallow_docker_sock_mount(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_docker_sock_mount.yaml")
}
func Test_validate_disallow_helm_tiller(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_helm_tiller.yaml")
}
func Test_add_safe_to_evict_annotation(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_mutate_safe-to-evict.yaml")
func Test_add_safe_to_evict(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/add_safe_to_evict.yaml")
}
func Test_add_safe_to_evict_annotation2(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_mutate_safe-to-evict2.yaml")
testScenario(t, "test/scenarios/samples/best_practices/add_safe_to_evict2.yaml")
}
func Test_validate_restrict_automount_sa_token_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/restrict_automount_sa_token.yaml")
}
func Test_restrict_node_port(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/restrict_node_port.yaml")
}
func Test_validate_restrict_image_registries(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/restrict_image_registries.yaml")
}
func Test_known_ingress(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_known_ingress_class.yaml")
testScenario(t, "test/scenarios/samples/more/restrict_ingress_classes.yaml")
}
func Test_unknown_ingress(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_unknown_ingress_class.yaml")
testScenario(t, "test/scenarios/samples/more/unknown_ingress_class.yaml")
}

View file

@ -0,0 +1,33 @@
# Default deny all ingress traffic
By default, Kubernetes allows communications across all pods within a cluster. Network policies and, a CNI that supports network policies, must be used to restrict communinications.
A default `NetworkPolicy` should be configured for each namespace to default deny all ingress traffic to the pods in the namespace. Application teams can then configure additional `NetworkPolicy` resources to allow desired traffic to application pods from select sources.
## Policy YAML
[add_network_policy.yaml](best_practices/add_network_policy.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: add-networkpolicy
spec:
rules:
- name: "default-deny-ingress"
match:
resources:
kinds:
- Namespace
name: "*"
generate:
kind: NetworkPolicy
name: default-deny-ingress
data:
spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress
````

View file

@ -8,23 +8,23 @@ To limit the number of resources like CPU and memory, as well as objects that ma
## Policy YAML
[require_namespace_quota.yaml](best_practices/require_namespace_quota.yaml)
[add_ns_quota.yaml](best_practices/add_ns_quota.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: generate-namespace-quota
name: add-ns-quota
spec:
rules:
- name: generate-namespace-quota
- name: generate-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: "defaultresourcequota"
name: "default-resourcequota"
data:
spec:
hard:

View file

@ -10,16 +10,16 @@ This policy matches and mutates pods with `emptyDir` and `hostPath` volumes, to
## Policy YAML
[add_safe_to_evict_annotation.yaml](best_practices/add_safe-to-evict_annotation.yaml)
[add_safe_to_evict_annotation.yaml](best_practices/add_safe_to_evict.yaml)
````yaml
apiVersion: "kyverno.io/v1alpha1"
kind: "ClusterPolicy"
metadata:
name: "annotate-emptydir-hostpath"
name: "add-safe-to-evict"
spec:
rules:
- name: "empty-dir-add-safe-to-evict"
- name: "annotate-empty-dir"
match:
resources:
kinds:
@ -32,7 +32,7 @@ spec:
spec:
volumes:
- (emptyDir): {}
- name: "host-path-add-safe-to-evict"
- name: "annotate-host-path"
match:
resources:
kinds:

View file

@ -1,34 +0,0 @@
# Assign Linux capabilities
Linux divides the privileges traditionally associated with superuser into distinct units, known as capabilities, which can be independently enabled or disabled by listing them in `securityContext.capabilites`.
## Policy YAML
[policy_validate_container_capabilities.yaml](more/policy_validate_container_capabilities.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-container-capablities
spec:
rules:
- name: validate-container-capablities
match:
resources:
kinds:
- Pod
validate:
message: "Allow certain linux capability"
pattern:
spec:
containers:
- securityContext:
capabilities:
add: ["NET_ADMIN"]
````
## Additional Information
* [List of linux capabilities](https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h)

View file

@ -1,34 +0,0 @@
# Configure kernel parameters
The Sysctl interface allows to modify kernel parameters at runtime and in the pod can be specified under `securityContext.sysctls`. If kernel parameters in the pod are to be modified, should be handled cautiously, and policy with rules restricting these options will be helpful. We can control minimum and maximum port that a network connection can use as its source(local) port by checking net.ipv4.ip_local_port_range
## Policy YAML
[policy_validate_sysctl_configs.yaml](more/policy_validate_sysctl_configs.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-allow-portrange-with-sysctl
spec:
rules:
- name: allow-portrange-with-sysctl
match:
resources:
kinds:
- Pod
validate:
message: "Allowed port range is from 1024 to 65535"
pattern:
spec:
securityContext:
sysctls:
- name: net.ipv4.ip_local_port_range
value: "1024 65535"
````
## Additional Information
* [List of supported namespaced sysctl interfaces](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/)

View file

@ -1,33 +0,0 @@
# Default deny all ingress traffic
By default, Kubernetes allows all ingress and egress traffic to and from pods within a cluster.
A "default" `NetworkPolicy` should be configured for each namespace to default deny all ingress traffic to the pods in that namespace. Later, the application team can configure additional `NetworkPolicy` resources to allow desired traffic to application pods from select sources.
## Policy YAML
[require_default_network_policy.yaml](best_practices/require_default_network_policy.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: default-deny-ingress-networkpolicy
spec:
rules:
- name: "default-deny-ingress"
match:
resources:
kinds:
- Namespace
name: "*"
generate:
kind: NetworkPolicy
name: default-deny-ingress
data:
spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress
````

View file

@ -1,30 +0,0 @@
# Disallow automount of Service Account credentials
Kubernetes automounts default service account credentials in each pod. To restrict access, opt out of automounting credentials by setting `automountServiceAccountToken` to `false`.
## Policy YAML
[disallow_automountingapicred.yaml](best_practices/disallow_automountingapicred.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-disallow-automoutingapicred
spec:
rules:
- name: disallow-automoutingapicred
match:
resources:
kinds:
- Pod
validate:
message: "Deny automounting API credentials"
pattern:
spec:
=(serviceAccountName): "*"
automountServiceAccountToken: false
````

View file

@ -1,25 +1,25 @@
# Disallow use of bind mounts (`hostPath` volumes)
The volume of type `hostPath` allows pods to use host bind mounts (i.e. directories and volumes mounted to a host path) in containers. Using host resources can be used to access shared data or escalate priviliges. Also, this couples pods to a specific host and data persisted in the `hostPath` volume is coupled to the life of the node leading to potential pod scheduling failures. It is highly recommeded that applications are designed to be decoupled from the underlying infrstructure (in this case, nodes).
The volume of type `hostPath` allows pods to use host bind mounts (i.e. directories and volumes mounted to a host path) in containers. Using host resources can be used to access shared data or escalate privileges. Also, this couples pods to a specific host and data persisted in the `hostPath` volume is coupled to the life of the node leading to potential pod scheduling failures. It is highly recommended that applications are designed to be decoupled from the underlying infrastructure (in this case, nodes).
## Policy YAML
[disallow_host_filesystem.yaml](best_practices/disallow_host_filesystem.yaml)
[disallow_bind_mounts.yaml](best_practices/disallow_bind_mounts.yaml)
````yaml
apiVersion: "kyverno.io/v1alpha1"
kind: "ClusterPolicy"
metadata:
name: "deny-use-of-host-fs"
name: "disallow-bind-mounts"
spec:
rules:
- name: "deny-use-of-host-fs"
- name: "validate-hostPath"
match:
resources:
kinds:
- "Pod"
validate:
message: "Host path is not allowed"
message: "Host path volumes are not allowed"
pattern:
spec:
volumes:

View file

@ -1,6 +1,6 @@
# Disallow use of default namespace
Kubernetes namespaces provide a way to segment and isolate cluster resources across multiple applictaions and users. It is recommended that each workload be isolated in its own namespace and that use of the default namespace be not allowed.
Kubernetes namespaces are an optional feature that provide a way to segment and isolate cluster resources across multiple applications and users. As a best practice, workloads should be isolated with namespaces. Namespaces should be required and the default (empty) namespace should not be used.
## Policy YAML
@ -10,20 +10,20 @@ Kubernetes namespaces provide a way to segment and isolate cluster resources acr
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-namespace
name: disallow-default-namespace
spec:
rules:
- name: check-default-namespace
- name: validate-namespace
match:
resources:
kinds:
- Pod
validate:
message: "Using 'default' namespace is restricted"
message: "Using 'default' namespace is not allowed"
pattern:
metadata:
namespace: "!default"
- name: check-namespace-exist
- name: require-namespace
match:
resources:
kinds:

View file

@ -13,11 +13,6 @@ apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-docker-sock-mount
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: The Docker socket bind mount allows access to the
Docker daemon on the node. This access can be used for privilege escalation and
to manage containers outside of Kubernetes, and hence should not be allowed.
spec:
rules:
- name: validate-docker-sock-mount

View file

@ -4,14 +4,13 @@ Tiller has known security challenges. It requires adminstrative privileges and a
## Policy YAML
[disallow_helm_tiller.yaml](best_practices/disallow_helm_tiller.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-helm-tiller
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description:
spec:
rules:
- name: validate-helm-tiller

View file

@ -5,23 +5,23 @@ Using `hostPort` and `hostNetwork` allows pods to share the host networking stac
## Policy YAML
[disallow_host_network_hostport.yaml](best_practices/disallow_host_network_hostport.yaml)
[disallow_host_network_port.yaml](best_practices/disallow_host_network_port.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-host-network-hostport
name: disallow-host-network-port
spec:
rules:
- name: validate-host-network-hostport
- name: validate-host-network-port
match:
resources:
kinds:
- Pod
validate:
message: "Defining hostNetwork and hostPort are not allowed"
message: "Using host networking is not allowed"
pattern:
spec:
(hostNetwork): false

View file

@ -6,23 +6,17 @@ To avoid pod container from having visibility to host process space, validate th
## Policy YAML
[disallow_hostpid_hostipc.yaml](best_practices/disallow_hostpid_hostipc.yaml)
[disallow_host_pid_ipc.yaml](best_practices/disallow_host_pid_ipc.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-host-pid-ipc
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: Sharing the host's PID namespace allows visibility of process
on the host, potentially exposing process information. Sharing the host's IPC namespace allows
the container process to communicate with processes on the host. To avoid pod container from
having visibility to host process space, validate that 'hostPID' and 'hostIPC' are set to 'false'.
name: disallow-host-pid-ipc
spec:
validationFailureAction: enforce
validationFailureAction: audit
rules:
- name: validate-host-pid-ipc
- name: validate-hostPID-hostIPC
match:
resources:
kinds:

View file

@ -4,36 +4,37 @@ The `:latest` tag is mutable and can lead to unexpected errors if the upstream i
## Policy YAML
[require_image_tag_not_latest.yaml](best_practices/require_image_tag_not_latest.yaml)
[disallow_latest_tag.yaml](best_practices/disallow_latest_tag.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-image-tag
name: disallow-latest-tag
spec:
rules:
- name: image-tag-notspecified
- name: require-tag
match:
resources:
kinds:
- Pod
validate:
message: "Image tag not specified"
message: "An image tag is required"
pattern:
spec:
containers:
- image: "*:*"
- name: image-tag-not-latest
- name: validate-tag
match:
resources:
kinds:
- Pod
validate:
message: "Using 'latest' image tag is restricted. Set image tag to a specific version"
message: "Using a mutable image tag e.g. 'latest' is not allowed"
pattern:
spec:
containers:
- image: "!*:latest"
````

View file

@ -14,23 +14,16 @@ default capabilities.
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-new-capabilities
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/description: Linux allows defining fine-grained permissions using
capabilities. With Kubernetes, it is possible to add capabilities that escalate the
level of kernel access and allow other potentially dangerous behaviors. This policy
enforces that pods cannot add new capabilities. Other policies can be used to set
default capabilities.
name: disallow-new-capabilities
spec:
rules:
- name: deny-new-capabilities
- name: validate-add-capabilities
match:
resources:
kinds:
- Pod
validate:
message: "Capabilities cannot be added"
message: "New capabilities cannot be added"
anyPattern:
- spec:
=(securityContext):
@ -42,4 +35,5 @@ spec:
=(securityContext):
=(capabilities):
X(add): null
````

View file

@ -1,36 +1,50 @@
# Disable privileged containers
# Diallow privileged containers
Privileged containers are defined as any container where the container uid 0 is mapped to the hosts uid 0. A process within a privileged container can get unrestricted host access. With `securityContext.allowPrivilegeEscalation` enabled, a process can gain privileges from its parent.
To disallow privileged containers and the privilege escalation it is recommended to run pod containers with `securityContext.priveleged` set to `false` and `allowPrivilegeEscalation` set to `false`.
To disallow privileged containers and privilege escalation, run pod containers with `securityContext.privileged` set to `false` and `securityContext.allowPrivilegeEscalation` set to `false`.
## Policy YAML
[disallow_priviledged_priviligedescalation.yaml](best_practices/disallow_priviledged_priviligedescalation.yaml)
[disallow_privileged.yaml](best_practices/disallow_privileged.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-deny-privileged-priviligedescalation
name: disallow-privileged
spec:
rules:
- name: deny-privileged-priviligedescalation
- name: validate-privileged
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false"
message: "Privileged mode is not allowed. Set privileged to false"
anyPattern:
- spec:
securityContext:
allowPrivilegeEscalation: false
privileged: false
- spec:
containers:
- name: "*"
securityContext:
privileged: false
- name: validate-allowPrivilegeEscalation
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation to false"
anyPattern:
- spec:
securityContext:
allowPrivilegeEscalation: false
- spec:
containers:
- name: "*"
securityContext:
allowPrivilegeEscalation: false
privileged: false
````

View file

@ -8,16 +8,17 @@ By default, all processes in a container run as the root user (uid 0). To preven
## Policy YAML
[deny_runasrootuser.yaml](best_practices/deny_runasrootuser.yaml)
[disallow_root_user.yaml](best_practices/disallow_root_user.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-deny-runasrootuser
name: disallow-root-user
annotations:
spec:
rules:
- name: deny-runasrootuser
- name: validate-runAsNonRoot
match:
resources:
kinds:

View file

@ -0,0 +1,32 @@
# Disallow changes to kernel parameters
The Sysctl interface allows modifications to kernel parameters at runtime. In a Kubernetes pod these parameters can be specified under `securityContext.sysctls`. Kernel parameter modifications can be used for exploits and should be restricted.
## Additional Information
* [List of supported namespaced sysctl interfaces](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/)
## Policy YAML
[disallow_sysctls.yaml](best_practices/disallow_sysctls.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-sysctls
spec:
rules:
- name: validate-sysctls
match:
resources:
kinds:
- Pod
validate:
message: "Changes to kernel paramaters are not allowed"
pattern:
spec:
securityContext:
X(sysctls): null
````

View file

@ -1,32 +0,0 @@
# Require a known ingress class
It can be useful to restrict Ingress resources to use a known ingress class that are allowed in the cluster.
You can customize this policy to allow ingress classes that are configured in the cluster.
## Policy YAML
[known_ingress.yaml](best_practices/known_ingress.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: known-ingress
annotations:
policies.kyverno.io/category: Ingress
policies.kyverno.io/description:
spec:
rules:
- name: known-ingress
match:
resources:
kinds:
- Ingress
validate:
message: "Unknown ingress class"
pattern:
metadata:
annotations:
kubernetes.io/ingress.class: "F5 | nginx"
````

View file

@ -1,6 +1,43 @@
# Sample Policies
Sample policies are designed to be applied to your Kubernetes clusters with minimal changes. To apply these policies to your cluster, install Kyverno and import the policies as follows:
Sample policies are designed to be applied to your Kubernetes clusters with minimal changes.
The policies are mostly validation rules in `audit` mode i.e. your existing workloads will not be impacted, but will be audited for policy complaince.
## Best Practice Policies
These policies are highly recommended.
1. [Disallow root user](DisallowRootUser.md)
2. [Disallow privileged containers](DisallowPrivilegedContainers.md)
3. [Disallow new capabilities](DisallowNewCapabilities.md)
4. [Disallow kernel parameter changes](DisallowSysctls.md)
5. [Disallow use of bind mounts (`hostPath` volumes)](DisallowBindMounts.md)
6. [Disallow docker socket bind mount](DisallowDockerSockMount.md)
7. [Disallow `hostNetwork` and `hostPort`](DisallowHostNetworkPort.md)
8. [Disallow `hostPID` and `hostIPC`](DisallowHostPIDIPC.md)
9. [Disallow use of default namespace](DisallowDefaultNamespace.md)
10. [Disallow latest image tag](DisallowLatestTag.md)
11. [Disallow Helm Tiller](DisallowHelmTiller.md)
12. [Require read-only root filesystem](RequireReadOnlyRootFS.md)
13. [Require pod resource requests and limits](RequirePodRequestsLimits.md)
14. [Require pod `livenessProbe` and `readinessProbe`](RequirePodProbes.md)
15. [Add default network policy](AddDefaultNetworkPolicy.md)
16. [Add namespace resource quotas](AddNamespaceResourceQuota.md)
17. [Add `safe-to-evict` for pods with `emptyDir` and `hostPath` volumes](AddSafeToEvict.md)
## Additional Policies
These policies provide additional best practices and are worthy of close consideration. These policies may require specific changes for your workloads and environments.
17. [Restrict image registries](RestrictImageRegistries.md)
18. [Restrict `NodePort` services](RestrictNodePort.md)
19. [Restrict auto-mount of service account credentials](RestrictAutomountSAToken.md)
20. [Restrict ingress classes](RestrictIngressClasses.md)
## Applying the sample policies
To apply these policies to your cluster, install Kyverno and import the policies as follows:
**Install Kyverno**
@ -30,36 +67,3 @@ Import addition policies from [here](more):
kubectl create -f samples/more/
````
The policies are mostly validation rules in `audit` mode i.e. your existing workloads will not be impacted, but will be audited for policy complaince.
## Best Practice Policies
These policies are highly recommended.
1. [Run as non-root user](RunAsNonRootUser.md)
2. [Disable privileged containers and disallow privilege escalation](DisablePrivilegedContainers.md)
3. [Disallow new capabilities](DisallowNewCapabilities.md)
4. [Require read-only root filesystem](RequireReadOnlyFS.md)
5. [Disallow use of bind mounts (`hostPath` volumes)](DisallowHostFS.md)
6. [Disallow docker socket bind mount](DisallowDockerSockMount.md)
7. [Disallow `hostNetwork` and `hostPort`](DisallowHostNetworkPort.md)
8. [Disallow `hostPID` and `hostIPC`](DisallowHostPIDIPC.md)
9. [Disallow unknown image registries](DisallowUnknownRegistries.md)
10. [Disallow latest image tag](DisallowLatestTag.md)
11. [Disallow use of default namespace](DisallowDefaultNamespace.md)
12. [Require namespace limits and quotas](RequireNSLimitsQuotas.md)
13. [Require pod resource requests and limits](RequirePodRequestsLimits.md)
14. [Require pod `livenessProbe` and `readinessProbe`](RequirePodProbes.md)
15. [Default deny all ingress traffic](DefaultDenyAllIngress.md)
16. [Disallow Helm Tiller](DisallowHelmTiller.md)
17. [Add `safe-to-evict` for pods with `emptyDir` and `hostPath` volumes](MutateSafeToEvict.md)
## Additional Policies
The policies provide additional best practices and are worthy of close consideration. These policies may require workload specific changes.
18. [Limit use of `NodePort` services](LimitNodePort.md)
19. [Limit automount of Service Account credentials](DisallowAutomountSACredentials.md)
20. [Configure Linux Capabilities](AssignLinuxCapabilities.md)
21. [Limit Kernel parameter access](ConfigureKernelParmeters.md)
22. [Restrict ingress class](KnownIngressClass.md)

View file

@ -1,8 +1,8 @@
# Require `livenessProbe` and `readinessProbe`
For each pod, a `livenessProbe` is carried out by the kubelet to determine if containers are running and when to restart the pod. A `readinessProbe` is used by services and deployments to determine if the pod is ready to recieve network traffic.
Liveness and readiness probes need to be configured to correctly manage a pods lifecycle during deployments, restarts, and upgrades.
Both liveness and readiness probes need to be configured to manage the pod lifecycle during restarts and upgrades.
For each pod, a periodic `livenessProbe` is performed by the kubelet to determine if the pod's containers are running or need to be restarted. A `readinessProbe` is used by services and deployments to determine if the pod is ready to receive network traffic.
## Policy YAML
@ -12,10 +12,10 @@ Both liveness and readiness probes need to be configured to manage the pod lifec
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-probes
name: require-pod-probes
spec:
rules:
- name: check-probes
- name: validate-livenessProbe-readinessProbe
match:
resources:
kinds:

View file

@ -12,11 +12,11 @@ If a namespace level request or limit is specified, defaults will automatically
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: check-resource
name: require-pod-requests-limits
spec:
validationFailureAction: "audit"
rules:
- name: check-resource-request-limit
- name: validate-resources
match:
resources:
kinds:

View file

@ -4,23 +4,23 @@ A read-only root file system helps to enforce an immutable infrastructure strate
## Policy YAML
[require_readonly_rootfilesystem.yaml](best_practices/require_readonly_rootfilesystem.yaml)
[require_ro_rootfs.yaml](best_practices/require_ro_rootfs.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-readonly-rootfilesystem
name: require-ro-rootfs
spec:
rules:
- name: validate-readonly-rootfilesystem
- name: validate-readOnlyRootFilesystem
match:
resources:
kinds:
- Pod
validate:
message: "Container require read-only rootfilesystem"
message: "Root filesystem must be read-only"
pattern:
spec:
containers:

View file

@ -0,0 +1,29 @@
# Restrict auto-mount of Service Account tokens
Kubernetes automatically mounts service account credentials in each pod. The service account may be assigned roles allowing pods to access API resources. To restrict access, opt out of auto-mounting tokens by setting `automountServiceAccountToken` to `false`.
## Policy YAML
[restrict_automount_sa_token.yaml](more/restrict_automount_sa_token.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: restrict-automount-sa-token
spec:
rules:
- name: validate-automountServiceAccountToken
match:
resources:
kinds:
- Pod
validate:
message: "Deny automounting API credentials"
pattern:
spec:
automountServiceAccountToken: false
````

View file

@ -6,16 +6,16 @@ You can customize this policy to allow image registries that you trust.
## Policy YAML
[trusted_image_registries.yaml](best_practices/trusted_image_registries.yaml)
[restrict_image_registries.yaml](more/restrict_image_registries.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: trusted-registries
name: restrict-image-registries
spec:
rules:
- name: trusted-registries
- name: validate-registries
match:
resources:
kinds:

View file

@ -0,0 +1,27 @@
# Restrict ingress classes
It can be useful to restrict Ingress resources to a set of known ingress classes that are allowed in the cluster. You can customize this policy to allow ingress classes that are configured in the cluster.
## Policy YAML
[restrict_ingress_classes.yaml](more/restrict_ingress_classes.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: restrict-ingress-classes
spec:
rules:
- name: validate-ingress
match:
resources:
kinds:
- Ingress
validate:
message: "Unknown ingress class"
pattern:
metadata:
annotations:
kubernetes.io/ingress.class: "F5 | nginx"
````

View file

@ -1,4 +1,4 @@
# Limit `NodePort` services
# Restrict use of `NodePort` services
A Kubernetes service of type `NodePort` uses a host port (on every node in the cluster) to receive traffic from any source.
@ -8,23 +8,23 @@ Although NodePort services can be useful, their use should be limited to service
## Policy YAML
[disallow_node_port.yaml](best_practices/disallow_node_port.yaml)
[restrict_node_port.yaml](more/restrict_node_port.yaml)
````yaml
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-node-port
name: restrict-node-port
spec:
rules:
- name: disallow-node-port
- name: validate-node-port
match:
resources:
kinds:
- Service
validate:
message: "Disallow service of type NodePort"
message: "Service of type NodePort is not allowed"
pattern:
spec:
type: "!NodePort"

View file

@ -0,0 +1,29 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: add-networkpolicy
annotations:
policies.kyverno.io/category: Workload Management
policies.kyverno.io/description: By default, Kubernetes allows communications across
all pods within a cluster. Network policies and, a CNI that supports network policies,
must be used to restrict communinications. A default NetworkPolicy should be configured
for each namespace to default deny all ingress traffic to the pods in the namespace.
Application teams can then configure additional NetworkPolicy resources to allow
desired traffic to application pods from select sources.
spec:
rules:
- name: "default-deny-ingress"
match:
resources:
kinds:
- Namespace
name: "*"
generate:
kind: NetworkPolicy
name: default-deny-ingress
data:
spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress

View file

@ -0,0 +1,26 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: add-ns-quota
annotations:
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: To limit the number of objects, as well as the
total amount of compute that may be consumed by a single namespace, create
a default resource quota for each namespace.
spec:
rules:
- name: generate-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: "default-resourcequota"
data:
spec:
hard:
requests.cpu: '4'
requests.memory: '16Gi'
limits.cpu: '4'
limits.memory: '16Gi'

View file

@ -1,15 +1,15 @@
apiVersion: "kyverno.io/v1alpha1"
kind: "ClusterPolicy"
metadata:
name: "annotate-emptydir-hostpath"
name: "add-safe-to-evict"
annotations:
policies.kyverno.io/category: AutoScaling
policies.kyverno.io/category: Workload Management
policies.kyverno.io/description: The Kubernetes cluster autoscaler does not evict pods that
use hostPath or emptyDir volumes. To allow eviction of these pods, the annotation
cluster-autoscaler.kubernetes.io/safe-to-evict=true must be added to the pods.
spec:
rules:
- name: "empty-dir-add-safe-to-evict"
- name: "annotate-empty-dir"
match:
resources:
kinds:
@ -22,7 +22,7 @@ spec:
spec:
volumes:
- (emptyDir): {}
- name: "host-path-add-safe-to-evict"
- name: "annotate-host-path"
match:
resources:
kinds:

View file

@ -1,21 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-disallow-automoutingapicred
annotations:
policies.kyverno.io/category: API Server Access Control
policies.kyverno.io/description: Kubernetes automounts default service account credentials in each pod.
To restrict access, opt out of automounting credentials by setting 'automountServiceAccountToken' to 'false'.
spec:
rules:
- name: disallow-automoutingapicred
match:
resources:
kinds:
- Pod
validate:
message: "Deny automounting API credentials"
pattern:
spec:
=(serviceAccountName): "*"
automountServiceAccountToken: false

View file

@ -1,26 +1,26 @@
apiVersion: "kyverno.io/v1alpha1"
kind: "ClusterPolicy"
metadata:
name: "deny-use-of-host-fs"
name: "disallow-bind-mounts"
annotations:
policies.kyverno.io/category: Data Protection
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: The volume of type `hostPath` allows pods to use host bind
mounts (i.e. directories and volumes mounted to a host path) in containers. Using host
resources can be used to access shared data or escalate priviliges. Also, this couples pods
resources can be used to access shared data or escalate privileges. Also, this couples pods
to a specific host and data persisted in the `hostPath` volume is coupled to the life of the
node leading to potential pod scheduling failures. It is highly recommeded that applications
are designed to be decoupled from the underlying infrstructure (in this case, nodes).
node leading to potential pod scheduling failures. It is highly recommended that applications
are designed to be decoupled from the underlying infrastructure (in this case, nodes).
spec:
rules:
- name: "deny-use-of-host-fs"
- name: "validate-hostPath"
match:
resources:
kinds:
- "Pod"
validate:
message: "Host path is not allowed"
message: "Host path volumes are not allowed"
pattern:
spec:
volumes:
- X(hostPath): null
- X(hostPath): null

View file

@ -1,24 +1,26 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-namespace
annotations:
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: With many users spread across multiple teams, restricting
use of the default namespace and subdividing the cluster by namesoace isolates workloads.
name: disallow-default-namespace
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: Kubernetes namespaces are an optional feature
that provide a way to segment and isolate cluster resources across multiple
applications and users. As a best practice, workloads should be isolated with
namespaces. Namespaces should be required and the default (empty) namespace
should not be used.
spec:
rules:
- name: check-default-namespace
- name: validate-namespace
match:
resources:
kinds:
- Pod
validate:
message: "Using 'default' namespace is restricted"
message: "Using 'default' namespace is not allowed"
pattern:
metadata:
namespace: "!default"
- name: check-namespace-exist
- name: require-namespace
match:
resources:
kinds:
@ -28,3 +30,4 @@ spec:
pattern:
metadata:
namespace: "?*"

View file

@ -3,7 +3,7 @@ kind: ClusterPolicy
metadata:
name: host-network-port
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: Using 'hostPort' and 'hostNetwork' allows pods to share
the host network stack, allowing potential snooping of network traffic from an application pod.
spec:

View file

@ -1,9 +1,9 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-host-pid-ipc
name: disallow-host-pid-ipc
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: Sharing the host's PID namespace allows visibility of process
on the host, potentially exposing process information. Sharing the host's IPC namespace allows
the container process to communicate with processes on the host. To avoid pod container from
@ -11,7 +11,7 @@ metadata:
spec:
validationFailureAction: audit
rules:
- name: validate-host-pid-ipc
- name: validate-hostPID-hostIPC
match:
resources:
kinds:

View file

@ -1,32 +1,32 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-image-tag
name: disallow-latest-tag
annotations:
policies.kyverno.io/category: Image
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: The ':latest' tag is mutable and can lead to
unexpected errors if the image changes. A best practice is to use an immutable
tag that maps to a specific version of an application pod.
spec:
rules:
- name: image-tag-notspecified
- name: require-image-tag
match:
resources:
kinds:
- Pod
validate:
message: "Image tag not specified"
message: "An image tag is required"
pattern:
spec:
containers:
- image: "*:*"
- name: image-tag-not-latest
- name: validate-image-tag
match:
resources:
kinds:
- Pod
validate:
message: "Using 'latest' image tag is restricted. Set image tag to a specific version"
message: "Using a mutable image tag e.g. 'latest' is not allowed"
pattern:
spec:
containers:

View file

@ -1,9 +1,9 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-new-capabilities
name: disallow-new-capabilities
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/category: Security
policies.kyverno.io/description: Linux allows defining fine-grained permissions using
capabilities. With Kubernetes, it is possible to add capabilities that escalate the
level of kernel access and allow other potentially dangerous behaviors. This policy
@ -11,13 +11,13 @@ metadata:
default capabilities.
spec:
rules:
- name: deny-new-capabilities
- name: validate-add-capabilities
match:
resources:
kinds:
- Pod
validate:
message: "Capabilities cannot be added"
message: "New capabilities cannot be added"
anyPattern:
- spec:
=(securityContext):
@ -28,4 +28,4 @@ spec:
- name: "*"
=(securityContext):
=(capabilities):
X(add): null
X(add): null

View file

@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-deny-privileged-priviligedescalation
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/description: Privileged containers are defined as any container
where the container uid 0 is mapped to the hosts uid 0. A process within privileged
containers can get unrestricted host access. With 'securityContext.allowPrivilegeEscalation'
enabled a process can gain privileges from its parent. To disallow privileged containers
and the escalation of privileges it is recommended to run pod containers with
'securityContext.priveleged' as 'false' and 'allowPrivilegeEscalation' as 'false'.
spec:
rules:
- name: deny-privileged-priviligedescalation
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false"
anyPattern:
- spec:
securityContext:
allowPrivilegeEscalation: false
privileged: false
- spec:
containers:
- name: "*"
securityContext:
allowPrivilegeEscalation: false
privileged: false

View file

@ -0,0 +1,46 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-privileged
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: Privileged containers are defined as any
container where the container uid 0 is mapped to the hosts uid 0.
A process within a privileged container can get unrestricted host access.
With `securityContext.allowPrivilegeEscalation` enabled, a process can
gain privileges from its parent.
spec:
rules:
- name: validate-privileged
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set privileged to false"
anyPattern:
- spec:
securityContext:
privileged: false
- spec:
containers:
- name: "*"
securityContext:
privileged: false
- name: validate-allowPrivilegeEscalation
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation to false"
anyPattern:
- spec:
securityContext:
allowPrivilegeEscalation: false
- spec:
containers:
- name: "*"
securityContext:
allowPrivilegeEscalation: false

View file

@ -1,22 +1,22 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-deny-runasrootuser
name: disallow-root-user
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/category: Security
policies.kyverno.io/description: By default, processes in a container run as a
root user (uid 0). To prevent potential compromise of container hosts, specify a
least privileged user ID when building the container image and require that
application containers run as non root users.
spec:
rules:
- name: deny-runasrootuser
- name: validate-runAsNonRoot
match:
resources:
kinds:
- Pod
validate:
message: "Root user is not allowed. Set runAsNonRoot to true"
message: "Running as root user is not allowed. Set runAsNonRoot to true"
anyPattern:
- spec:
securityContext:

View file

@ -0,0 +1,22 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-sysctls
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: The Sysctl interface allows modifications to kernel parameters
at runtime. In a Kubernetes pod these parameters can be specified under `securityContext.sysctls`.
Kernel parameter modifications can be used for exploits and should be restricted.
spec:
rules:
- name: validate-sysctls
match:
resources:
kinds:
- Pod
validate:
message: "Changes to kernel paramaters are not allowed"
pattern:
spec:
securityContext:
X(sysctls): null

View file

@ -1,20 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: known-ingress
annotations:
policies.kyverno.io/category: Ingress
policies.kyverno.io/description:
spec:
rules:
- name: known-ingress
match:
resources:
kinds:
- Ingress
validate:
message: "Unknown ingress class"
pattern:
metadata:
annotations:
kubernetes.io/ingress.class: "F5 | nginx"

View file

@ -1,27 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: default-deny-ingress-networkpolicy
annotations:
policies.kyverno.io/category: NetworkPolicy
policies.kyverno.io/description: By default, Kubernetes allows all ingress and egress traffic
to and from pods within a cluster. A "default" NetworkPolicy resource for a namespace should
be used to deny all ingress traffic to the pods in that namespace. Additional NetworkPolicy
resources can then be configured to allow desired traffic to application pods.
spec:
rules:
- name: "default-deny-ingress"
match:
resources:
kinds:
- Namespace
name: "*"
generate:
kind: NetworkPolicy
name: default-deny-ingress
data:
spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress

View file

@ -1,26 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: generate-namespace-quota
annotations:
policies.kyverno.io/category: Resource Quota
policies.kyverno.io/description: To limit the number of objects, as well as the
total amount of compute that may be consumed by an application, it is important
to create resource limits and quotas for each namespace.
spec:
rules:
- name: generate-namespace-quota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: "defaultresourcequota"
data:
spec:
hard:
requests.cpu: 4
requests.memory: 16Gi
limits.cpu: 4
limits.memory: 16Gi

View file

@ -1,9 +1,9 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: check-resource
name: require-pod-requests-limits
annotations:
policies.kyverno.io/category: Resource Quota
policies.kyverno.io/category: Workload Management
policies.kyverno.io/description: As application workloads share cluster resources, it is important
to limit resources requested and consumed by each pod. It is recommended to require
'resources.requests' and 'resources.limits' per pod. If a namespace level request or limit is
@ -11,7 +11,7 @@ metadata:
spec:
validationFailureAction: "audit"
rules:
- name: check-resource-request-limit
- name: validate-resources
match:
resources:
kinds:

View file

@ -1,16 +1,17 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-probes
name: require-pod-probes
annotations:
policies.kyverno.io/category: Health Check
policies.kyverno.io/description: For each pod, a 'livenessProbe' is carried out by the kubelet to
determine when to restart a container. A 'readinessProbe' is used by services and deployments to
determine if the pod is ready to recieve network traffic. Both liveness and readiness probes
need to be configured to manage the pod lifecycle during restarts and upgrades.
policies.kyverno.io/category: Workload Management
policies.kyverno.io/description: Liveness and readiness probes need to be configured to
correctly manage a pods lifecycle during deployments, restarts, and upgrades. For each
pod, a periodic `livenessProbe` is performed by the kubelet to determine if the pod's
containers are running or need to be restarted. A `readinessProbe` is used by services
and deployments to determine if the pod is ready to receive network traffic.
spec:
rules:
- name: check-probes
- name: validate-livenessProbe-readinessProbe
match:
resources:
kinds:

View file

@ -1,22 +1,22 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-readonly-rootfilesystem
name: require-ro-rootfs
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/category: Security
policies.kyverno.io/description: A read-only root file system helps to enforce an immutable
infrastructure strategy; the container only needs to write on the mounted volume that p
ersists the state. An immutable root filesystem can also prevent malicious binaries from
writing to the host system.
spec:
rules:
- name: validate-readonly-rootfilesystem
- name: validate-readOnlyRootFilesystem
match:
resources:
kinds:
- Pod
validate:
message: "Container require read-only rootfilesystem"
message: "Root filesystem must be read-only"
pattern:
spec:
containers:

View file

@ -1,24 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-container-capablities
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/description: Linux divides the privileges traditionally associated with
superuser into distinct units, known as capabilities, which can be independently enabled
or disabled by listing them in 'securityContext.capabilites'.
spec:
rules:
- name: validate-container-capablities
match:
resources:
kinds:
- Pod
validate:
message: "Allow certain linux capability"
pattern:
spec:
containers:
- securityContext:
capabilities:
add: ["NET_ADMIN"]

View file

@ -1,26 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-allow-portrange-with-sysctl
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/description: The Sysctl interface allows to modify kernel parameters at
runtime and in the pod can be specified under 'securityContext.sysctls'. If kernel parameters
in the pod are to be modified, should be handled cautiously, and policy with rules restricting
these options will be helpful. We can control minimum and maximum port that a network connection
can use as its source(local) port by checking 'net.ipv4.ip_local_port_range'.
spec:
rules:
- name: allow-portrange-with-sysctl
match:
resources:
kinds:
- Pod
validate:
message: "Allowed port range is from 1024 to 65535"
pattern:
spec:
securityContext:
sysctls:
- name: net.ipv4.ip_local_port_range
value: "1024 65535"

View file

@ -1,48 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-userid-groupid-fsgroup
annotations:
policies.kyverno.io/category: Security Context
policies.kyverno.io/description: All processes inside the pod can be made to run with specific user
and groupID by setting 'runAsUser' and 'runAsGroup' respectively. 'fsGroup' can be specified
to make sure any file created in the volume with have the specified groupID. These options can be
used to validate the IDs used for user and group.
spec:
rules:
- name: validate-userid
match:
resources:
kinds:
- Pod
validate:
message: "User ID should be 1000"
pattern:
spec:
securityContext:
runAsUser: 1000
- name: validate-groupid
match:
resources:
kinds:
- Pod
validate:
message: "Group ID should be 3000"
pattern:
spec:
securityContext:
runAsGroup: 3000
- name: validate-fsgroup
match:
resources:
kinds:
- Pod
validate:
message: "fsgroup should be 2000"
pattern:
spec:
securityContext:
fsGroup: 2000
# Alls processes inside the pod can be made to run with specific user and groupID by setting runAsUser and runAsGroup respectively.
# fsGroup can be specified to make sure any file created in the volume with have the specified groupID.
# The above parameters can also be used in a validate policy to restrict user & group IDs.

View file

@ -0,0 +1,22 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: restrict-automount-sa-token
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: Kubernetes automatically mounts service account
credentials in each pod. The service account may be assigned roles allowing pods
to access API resources. To restrict access, opt out of auto-mounting tokens by
setting automountServiceAccountToken to false.
spec:
rules:
- name: validate-automountServiceAccountToken
match:
resources:
kinds:
- Pod
validate:
message: "Auto-mounting of Service Account tokens is not allowed"
pattern:
spec:
automountServiceAccountToken: false

View file

@ -1,21 +1,20 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: trusted-registries
name: restrict-image-registries
annotations:
policies.kyverno.io/category: Image
policies.kyverno.io/category: Workload Management
policies.kyverno.io/description: Images from unknown registries may not be scanned and secured.
Requiring use of known registries helps reduce threat exposure. You can customize this policy
to allow image registries that you trust.
Requiring use of known registries helps reduce threat exposure.
spec:
rules:
- name: trusted-registries
- name: validate-registries
match:
resources:
kinds:
- Pod
validate:
message: "Deny untrusted registries"
message: "Unknown image registry"
pattern:
spec:
containers:

View file

@ -0,0 +1,22 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: restrict-ingress-classes
annotations:
policies.kyverno.io/category: Workload Management
policies.kyverno.io/description: It can be useful to restrict Ingress resources to a set of
known ingress classes that are allowed in the cluster. You can customize this policy to
allow ingress classes that are configured in the cluster.
spec:
rules:
- name: validate-ingress
match:
resources:
kinds:
- Ingress
validate:
message: "Unknown ingress class"
pattern:
metadata:
annotations:
kubernetes.io/ingress.class: "F5 | nginx"

View file

@ -1,22 +1,22 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-node-port
name: restrict-nodeport
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: A Kubernetes service of type NodePort uses a
host port to receive traffic from any source. A 'NetworkPolicy' resource cannot be used
to control traffic to host ports. Although 'NodePort' services can be useful, their use
must be limited to services with additional upstream security checks.
spec:
rules:
- name: disallow-node-port
- name: validate-nodeport
match:
resources:
kinds:
- Service
validate:
message: "Disallow service of type NodePort"
message: "Services of type NodePort are not allowed"
pattern:
spec:
type: "!NodePort"

View file

@ -8,4 +8,4 @@ spec:
image: nginxinc/nginx-unprivileged
securityContext:
allowPrivilegeEscalation: true
privileged: false
privileged: true

View file

@ -1,6 +1,6 @@
# file path relative to project root
input:
policy: samples/best_practices/require_default_network_policy.yaml
policy: samples/best_practices/add_network_policy.yaml
resource: test/resources/require_default_network_policy.yaml
expected:
generation:
@ -9,7 +9,7 @@ expected:
kind: NetworkPolicy
namespace: devtest
policyresponse:
policy: default-deny-ingress-networkpolicy
policy: add-networkpolicy
resource:
kind: Namespace
apiVersion: v1

View file

@ -1,22 +1,21 @@
# file path relative to project root
input:
policy: samples/best_practices/require_namespace_quota.yaml
policy: samples/best_practices/add_ns_quota.yaml
resource: test/resources/require_namespace_quota.yaml
expected:
generation:
generatedResources:
- name: defaultresourcequota
- name: default-resourcequota
kind: ResourceQuota
namespace: test-namespace-quota
policyresponse:
policy: generate-namespace-quota
policy: add-ns-quota
resource:
kind: Namespace
apiVersion: v1
namespace: ''
name: test-namespace-quota
rules:
- name: generate-namespace-quota
- name: generate-resourcequota
type: Generation
success: true
message: created resource ResourceQuota/test-namespace-quota/defaultresourcequota

View file

@ -1,19 +1,19 @@
# file path is relative to project root
input:
policy: samples/best_practices/add_safe-to-evict_annotation.yaml
policy: samples/best_practices/add_safe_to_evict.yaml
resource: test/resources/pod-with-emptydir.yaml
expected:
mutation:
patchedresource: test/output/pod-with-emptydir.yaml
policyresponse:
policy: annotate-emptydir-hostpath
policy: add-safe-to-evict
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: pod-with-emptydir
rules:
- name: empty-dir-add-safe-to-evict
- name: annotate-empty-dir
type: Mutation
success: true
message: "successfully processed overlay"

View file

@ -1,19 +1,19 @@
# file path is relative to project root
input:
policy: samples/best_practices/add_safe-to-evict_annotation.yaml
policy: samples/best_practices/add_safe_to_evict.yaml
resource: test/resources/pod-with-hostpath.yaml
expected:
mutation:
patchedresource: test/output/pod-with-hostpath.yaml
policyresponse:
policy: annotate-emptydir-hostpath
policy: add-safe-to-evict
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: pod-with-hostpath
rules:
- name: host-path-add-safe-to-evict
- name: annotate-host-path
type: Mutation
success: true
message: "successfully processed overlay"

View file

@ -1,18 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_host_filesystem.yaml
policy: samples/best_practices/disallow_bind_mounts.yaml
resource: test/resources/disallow_host_filesystem.yaml
expected:
validation:
policyresponse:
policy: deny-use-of-host-fs
policy: disallow-bind-mounts
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: image-with-hostpath
rules:
- name: deny-use-of-host-fs
- name: validate-hostPath
type: Validation
message: "Validation error: Host path is not allowed\nValidation rule 'deny-use-of-host-fs' failed at path '/spec/volumes/0/hostPath/'."
success: false

View file

@ -1,18 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_host_filesystem.yaml
policy: samples/best_practices/disallow_bind_mounts.yaml
resource: test/resources/disallow_host_filesystem_pass.yaml
expected:
validation:
policyresponse:
policy: deny-use-of-host-fs
policy: disallow-bind-mounts
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: image-with-hostpath
rules:
- name: deny-use-of-host-fs
- name: validate-hostPath
type: Validation
message: Validation rule 'deny-use-of-host-fs' succeeded.
success: true

View file

@ -5,7 +5,7 @@ input:
expected:
validation:
policyresponse:
policy: validate-namespace
policy: disallow-default-namespace
resource:
kind: Pod
apiVersion: v1
@ -14,12 +14,10 @@ expected:
namespace: 'default'
name: myapp-pod
rules:
- name: check-default-namespace
- name: validate-namespace
type: Validation
message: "Validation error: Using 'default' namespace is restricted\nValidation rule 'check-default-namespace' failed at path '/metadata/namespace/'."
success: false
- name: check-namespace-exist
- name: require-namespace
type: Validation
message: "Validation rule 'check-namespace-exist' succeeded."
success: true

View file

@ -1,6 +1,6 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_host_network_hostport.yaml
policy: samples/best_practices/disallow_host_network_port.yaml
resource: test/resources/disallow_host_network_hostport.yaml
expected:
validation:
@ -17,4 +17,4 @@ expected:
success: true
- name: validate-host-port
type: Validation
success: false
success: false

View file

@ -1,17 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_hostpid_hostipc.yaml
policy: samples/best_practices/disallow_host_pid_ipc.yaml
resource: test/resources/disallow_hostpid_hostipc.yaml
expected:
validation:
policyresponse:
policy: validate-host-pid-ipc
policy: disallow-host-pid-ipc
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: "nginx-with-hostpid"
rules:
- name: validate-host-pid-ipc
- name: validate-hostPID-hostIPC
type: Validation
success: false
success: false

View file

@ -0,0 +1,20 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_latest_tag.yaml
resource: test/resources/pod_with_latest_tag.yaml
expected:
validation:
policyresponse:
policy: disallow-latest-tag
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: require-image-tag
type: Validation
success: true
- name: validate-image-tag
type: Validation
success: false

View file

@ -0,0 +1,20 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_latest_tag.yaml
resource: test/resources/pod_with_version_tag.yaml
expected:
validation:
policyresponse:
policy: disallow-latest-tag
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: require-image-tag
type: Validation
success: true
- name: validate-image-tag
type: Validation
success: true

View file

@ -5,14 +5,13 @@ input:
expected:
validation:
policyresponse:
policy: validate-new-capabilities
policy: disallow-new-capabilities
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: "add-new-capabilities"
rules:
- name: deny-new-capabilities
- name: validate-add-capabilities
type: Validation
message: "Validation error: Capabilities cannot be added\nValidation rule deny-new-capabilities anyPattern[0] failed at path /spec/.\nValidation rule deny-new-capabilities anyPattern[1] failed at path /spec/containers/0/securityContext/capabilities/add/."
success: false

View file

@ -0,0 +1,20 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_privileged.yaml
resource: test/resources/disallow_privileged.yaml
expected:
validation:
policyresponse:
policy: disallow-privileged
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: check-privileged-cfg
rules:
- name: validate-privileged
type: Validation
success: false
- name: validate-allowPrivilegeEscalation
type: Validation
success: false

View file

@ -1,19 +1,18 @@
# file path relative to project root
input:
policy: samples/best_practices/deny_runasrootuser.yaml
policy: samples/best_practices/disallow_root_user.yaml
resource: test/resources/deny_runasrootuser.yaml
expected:
validation:
policyresponse:
policy: validate-deny-runasrootuser
policy: disallow-root-user
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: check-root-user
rules:
- name: deny-runasrootuser
- name: validate-runAsNonRoot
type: Validation
message: "Validation rule 'deny-runasrootuser' anyPattern[1] succeeded."
success: true

View file

@ -0,0 +1,18 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_sysctls.yaml
resource: test/resources/resource_validate_sysctl_configs.yaml
expected:
validation:
policyresponse:
policy: disallow-sysctls
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: nginx
rules:
- name: validate-sysctls
type: Validation
success: false

View file

@ -5,14 +5,13 @@ input:
expected:
validation:
policyresponse:
policy: check-resource
policy: require-pod-requests-limits
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: check-resource-request-limit
- name: validate-resources
type: Validation
message: "Validation error: CPU and memory resource requests and limits are required\nValidation rule 'check-resource-request-limit' failed at path '/spec/containers/0/resources/limits/cpu/'."
success: false

View file

@ -5,14 +5,13 @@ input:
expected:
validation:
policyresponse:
policy: validate-probes
policy: require-pod-probes
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: check-probes
- name: validate-livenessProbe-readinessProbe
type: Validation
message: "Validation error: Liveness and readiness probes are required\nValidation rule 'check-probes' failed at path '/spec/containers/0/livenessProbe/'."
success: false

View file

@ -0,0 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/require_ro_rootfs.yaml
resource: test/resources/require_readonly_rootfilesystem.yaml
expected:
validation:
policyresponse:
policy: require-ro-rootfs
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: "ghost-with-readonly-rootfilesystem"
rules:
- name: validate-readOnlyRootFilesystem
type: Validation
success: false

View file

@ -1,22 +0,0 @@
# file path relative to project root
input:
policy: samples/best_practices/require_image_tag_not_latest.yaml
resource: test/resources/require_image_tag_not_latest_deny.yaml
expected:
validation:
policyresponse:
policy: validate-image-tag
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: image-tag-notspecified
type: Validation
message: "Validation rule 'image-tag-notspecified' succeeded."
success: true
- name: image-tag-not-latest
type: Validation
message: "Validation error: Using 'latest' image tag is restricted. Set image tag to a specific version\nValidation rule 'image-tag-not-latest' failed at path '/spec/containers/0/image/'."
success: false

View file

@ -1,22 +0,0 @@
# file path relative to project root
input:
policy: samples/best_practices/require_image_tag_not_latest.yaml
resource: test/resources/resource_validate_image_tag_latest_pass.yaml
expected:
validation:
policyresponse:
policy: validate-image-tag
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: image-tag-notspecified
type: Validation
message: "Validation rule 'image-tag-notspecified' succeeded."
success: true
- name: image-tag-not-latest
type: Validation
message: "Validation rule 'image-tag-not-latest' succeeded."
success: true

View file

@ -1,19 +0,0 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_priviledged_priviligedescalation.yaml
resource: test/resources/disallow_priviledged_priviligedescalation.yaml
expected:
validation:
policyresponse:
policy: validate-deny-privileged-priviligedescalation
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: check-privileged-cfg
rules:
- name: deny-privileged-priviligedescalation
type: Validation
message: "Validation error: Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false\nValidation rule deny-privileged-priviligedescalation anyPattern[0] failed at path /spec/securityContext/.\nValidation rule deny-privileged-priviligedescalation anyPattern[1] failed at path /spec/containers/0/securityContext/allowPrivilegeEscalation/."
success: false

View file

@ -1,18 +0,0 @@
# file path relative to project root
input:
policy: samples/best_practices/require_readonly_rootfilesystem.yaml
resource: test/resources/require_readonly_rootfilesystem.yaml
expected:
validation:
policyresponse:
policy: validate-readonly-rootfilesystem
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: "ghost-with-readonly-rootfilesystem"
rules:
- name: validate-readonly-rootfilesystem
type: Validation
message: "Validation error: Container require read-only rootfilesystem\nValidation rule 'validate-readonly-rootfilesystem' failed at path '/spec/containers/0/securityContext/readOnlyRootFilesystem/'."
success: false

View file

@ -1,18 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/disallow_automountingapicred.yaml
policy: samples/more/restrict_automount_sa_token.yaml
resource: test/resources/disallow_automountingapicred.yaml
expected:
validation:
policyresponse:
policy: validate-disallow-automoutingapicred
policy: restrict-automount-sa-token
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: disallow-automoutingapicred
- name: validate-automountServiceAccountToken
type: Validation
message: Validation rule 'disallow-automoutingapicred' succeeded.
success: true

View file

@ -1,18 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/trusted_image_registries.yaml
policy: samples/more/restrict_image_registries.yaml
resource: test/resources//trusted_image_registries.yaml
expected:
validation:
policyresponse:
policy: trusted-registries
policy: restrict-image-registries
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: k8s-nginx
rules:
- name: trusted-registries
- name: validate-registries
type: Validation
message: Validation rule 'trusted-registries' succeeded.
success: true

View file

@ -1,18 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/known_ingress.yaml
policy: samples/more/restrict_ingress_classes.yaml
resource: test/resources/ingress-nginx.yaml
expected:
validation:
policyresponse:
policy: known-ingress
policy: restrict-ingress-classes
resource:
kind: Ingress
apiVersion: v1
namespace: ''
name: test-ingress
rules:
- name: known-ingress
- name: validate-ingress
type: Validation
message: Validation rule 'known-ingress' succeeded.
success: true

View file

@ -1,17 +1,16 @@
input:
policy: samples/best_practices/disallow_node_port.yaml
policy: samples/more/restrict_node_port.yaml
resource: test/resources/disallow_node_port.yaml
expected:
validation:
policyresponse:
policy: disallow-node-port
policy: restrict-nodeport
resource:
kind: Service
apiVersion: v1
namespace: ''
name: "my-service"
rules:
- name: disallow-node-port
- name: validate-nodeport
type: Validation
message: "Validation error: Disallow service of type NodePort\nValidation rule 'disallow-node-port' failed at path '/spec/type/'."
success: false

View file

@ -1,19 +0,0 @@
# file path relative to project root
input:
policy: samples/more/policy_validate_container_capabilities.yaml
resource: test/resources/resource_validate_container_capabilities.yaml
expected:
validation:
policyresponse:
policy: validate-container-capablities
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: add-capabilities
rules:
- name: validate-container-capablities
type: Validation
message: "Validation error: Allow certain linux capability\nValidation rule 'validate-container-capablities' failed at path '/spec/containers/0/securityContext/capabilities/add/0/'."
success: false

View file

@ -1,27 +0,0 @@
# file path relative to project root
input:
policy: samples/more/policy_validate_user_group_fsgroup_id.yaml
resource: test/resources/resource_validate_fsgroup.yaml
expected:
validation:
policyresponse:
policy: validate-userid-groupid-fsgroup
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: fsgroup-demo
rules:
- name: validate-userid
type: Validation
message: Validation rule 'validate-userid' succeeded.
success: true
- name: validate-groupid
type: Validation
message: Validation rule 'validate-groupid' succeeded.
success: true
- name: validate-fsgroup
type: Validation
message: Validation rule 'validate-fsgroup' succeeded.
success: true

View file

@ -1,19 +0,0 @@
# file path relative to project root
input:
policy: samples/more/policy_validate_sysctl_configs.yaml
resource: test/resources/resource_validate_sysctl_configs.yaml
expected:
validation:
policyresponse:
policy: validate-allow-portrange-with-sysctl
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: nginx
rules:
- name: allow-portrange-with-sysctl
type: Validation
message: "Validation error: Allowed port range is from 1024 to 65535\nValidation rule 'allow-portrange-with-sysctl' failed at path '/spec/securityContext/sysctls/0/value/'."
success: false

View file

@ -1,18 +1,17 @@
# file path relative to project root
input:
policy: samples/best_practices/known_ingress.yaml
policy: samples/more/restrict_ingress_classes.yaml
resource: test/resources/ingress-haproxy.yaml
expected:
validation:
policyresponse:
policy: known-ingress
policy: restrict-ingress-classes
resource:
kind: Ingress
apiVersion: v1
namespace: ''
name: test-ingress
rules:
- name: known-ingress
- name: validate-ingress
type: Validation
message: "Validation error: Unknown ingress class\nValidation rule 'known-ingress' failed at path '/metadata/annotations/kubernetes.io/ingress.class/'."
success: false